trttung1610 commited on
Commit
6bc2b54
1 Parent(s): 5282e48

Upload 5 files

Browse files
Files changed (6) hide show
  1. .gitattributes +2 -0
  2. app_9x9.py +122 -0
  3. object_9x9.py +278 -0
  4. policy_9x9_p1 +3 -0
  5. policy_9x9_p2 +3 -0
  6. requirements.txt +2 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ policy_9x9_p1 filter=lfs diff=lfs merge=lfs -text
37
+ policy_9x9_p2 filter=lfs diff=lfs merge=lfs -text
app_9x9.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ from object_9x9 import Player, HumanPlayer
4
+
5
+ # Create RL bot player and human player
6
+ p1 = Player("9x9_p1")
7
+ human_player = HumanPlayer("p2")
8
+
9
+ def handle_click(i, j):
10
+ if (i, j) not in check_available_moves(extra=True):
11
+ st.session_state.warning = True
12
+ elif not st.session_state.winner:
13
+ st.session_state.warning = False
14
+ st.session_state.board[i, j] = st.session_state.player
15
+ winner = check_win(st.session_state.board)
16
+ if winner:
17
+ st.session_state.winner = winner
18
+ if st.session_state.opponent == 'Computer':
19
+ # Give reward to the RL bot and update its policy
20
+ if winner == 'X':
21
+ p1.feedReward(1)
22
+ elif winner == 'O':
23
+ p1.feedReward(0)
24
+ else:
25
+ p1.feedReward(0.1)
26
+
27
+ # Save the RL bot's policy
28
+ p1.savePolicy()
29
+
30
+ # Toggle the player's turn
31
+ st.session_state.player = "O" if st.session_state.player == "X" else "X"
32
+
33
+ def init(post_init=False):
34
+ if not post_init:
35
+ st.session_state.win = {'X': 0, 'O': 0}
36
+ st.session_state.opponent = 'Computer' # Initialize the opponent attribute
37
+ st.session_state.board = np.full((9, 9), '.', dtype=str)
38
+ st.session_state.player = 'X'
39
+ st.session_state.warning = False
40
+ st.session_state.winner = None
41
+ st.session_state.over = False
42
+
43
+ def check_available_moves(extra=False) -> list:
44
+ raw_moves = [row for col in st.session_state.board.tolist() for row in col]
45
+ num_moves = [i for i, spot in enumerate(raw_moves) if spot == '.']
46
+ if extra:
47
+ return [(i // 9, i % 9) for i in num_moves]
48
+ return num_moves
49
+
50
+ def check_win(board):
51
+ # Check for a winner by checking rows, columns, and diagonals for three linked points
52
+ for i in range(9):
53
+ for j in range(9):
54
+ if board[i, j] != '.':
55
+ symbol = board[i, j]
56
+ # Check horizontally
57
+ if j + 2 < 9 and board[i, j + 1] == board[i, j + 2] == symbol:
58
+ return symbol
59
+ # Check vertically
60
+ if i + 2 < 9 and board[i + 1, j] == board[i + 2, j] == symbol:
61
+ return symbol
62
+ # Check diagonally (top-left to bottom-right)
63
+ if i + 2 < 9 and j + 2 < 9 and board[i + 1, j + 1] == board[i + 2, j + 2] == symbol:
64
+ return symbol
65
+ # Check diagonally (top-right to bottom-left)
66
+ if i + 2 < 9 and j - 2 >= 0 and board[i + 1, j - 1] == board[i + 2, j - 2] == symbol:
67
+ return symbol
68
+ return None
69
+
70
+ def computer_player():
71
+ moves = check_available_moves(extra=True)
72
+ if moves:
73
+ # Use p1 to choose the action
74
+ positions = check_available_moves(extra=True)
75
+ p1_action = p1.chooseAction(positions, st.session_state.board, -1)
76
+
77
+ # Check if the chosen action is valid and make the move
78
+ if p1_action in moves:
79
+ i, j = p1_action
80
+ handle_click(i, j)
81
+
82
+ def main():
83
+ st.write(
84
+ """
85
+ # ❎🅾️ Tic Tac Toe (9x9)
86
+ """
87
+ )
88
+
89
+ if "board" not in st.session_state:
90
+ init()
91
+
92
+ reset, score, player = st.columns([0.5, 0.6, 1])
93
+ reset.button('New game', on_click=init, args=(True,))
94
+
95
+ for i, row in enumerate(st.session_state.board):
96
+ cols = st.columns([5] + [1] * 9 + [5])
97
+ for j, field in enumerate(row):
98
+ if st.session_state.player == 'X' and not st.session_state.winner:
99
+ # Only allow human player to make a move if it's their turn and the game is not over
100
+ cols[j + 1].button(
101
+ field,
102
+ key=f"{i}-{j}",
103
+ on_click=lambda i=i, j=j: handle_click(i, j) # Use lambda to pass arguments
104
+ )
105
+ else:
106
+ # If it's not the human player's turn or the game is over, let the bot play
107
+ computer_player()
108
+
109
+ if st.session_state.winner:
110
+ st.success(f"Congrats! {st.session_state.winner} won the game! 🎈")
111
+ elif not check_available_moves():
112
+ st.info(f"It's a tie 📍")
113
+
114
+ score.button(f'❌{st.session_state.win["X"]} 🆚 {st.session_state.win["O"]}⭕')
115
+ player.button(
116
+ f'{"❌" if st.session_state.player == "X" else "⭕"}\'s turn'
117
+ if not st.session_state.winner
118
+ else f'🏁 Game finished'
119
+ )
120
+
121
+ if __name__ == '__main__':
122
+ main()
object_9x9.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pickle
3
+
4
+ BOARD_ROWS = 9
5
+ BOARD_COLS = 9
6
+
7
+
8
+ class State:
9
+ def __init__(self, p1, p2):
10
+ self.board = np.zeros((BOARD_ROWS, BOARD_COLS))
11
+ self.p1 = p1
12
+ self.p2 = p2
13
+ self.isEnd = False
14
+ self.boardHash = None
15
+ # init p1 plays first
16
+ self.playerSymbol = 1
17
+
18
+ # get unique hash of current board state
19
+ def getHash(self):
20
+ self.boardHash = str(self.board.reshape(BOARD_COLS * BOARD_ROWS))
21
+ return self.boardHash
22
+
23
+ def winner(self):
24
+ # row
25
+ for i in range(BOARD_ROWS):
26
+ if sum(self.board[i, :]) == 3:
27
+ self.isEnd = True
28
+ return 1
29
+ if sum(self.board[i, :]) == -3:
30
+ self.isEnd = True
31
+ return -1
32
+ # col
33
+ for i in range(BOARD_COLS):
34
+ if sum(self.board[:, i]) == 3:
35
+ self.isEnd = True
36
+ return 1
37
+ if sum(self.board[:, i]) == -3:
38
+ self.isEnd = True
39
+ return -1
40
+ # diagonal
41
+ diag_sum1 = sum([self.board[i, i] for i in range(BOARD_COLS)])
42
+ diag_sum2 = sum([self.board[i, BOARD_COLS - i - 1] for i in range(BOARD_COLS)])
43
+ diag_sum = max(abs(diag_sum1), abs(diag_sum2))
44
+ if diag_sum == 3:
45
+ self.isEnd = True
46
+ if diag_sum1 == 3 or diag_sum2 == 3:
47
+ return 1
48
+ else:
49
+ return -1
50
+
51
+ # tie
52
+ # no available positions
53
+ if len(self.availablePositions()) == 0:
54
+ self.isEnd = True
55
+ return 0
56
+ # not end
57
+ self.isEnd = False
58
+ return None
59
+
60
+ def availablePositions(self):
61
+ positions = []
62
+ for i in range(BOARD_ROWS):
63
+ for j in range(BOARD_COLS):
64
+ if self.board[i, j] == 0:
65
+ positions.append((i, j)) # need to be tuple
66
+ return positions
67
+
68
+ def updateState(self, position):
69
+ self.board[position] = self.playerSymbol
70
+ # switch to another player
71
+ self.playerSymbol = -1 if self.playerSymbol == 1 else 1
72
+
73
+ # only when game ends
74
+ def giveReward(self):
75
+ result = self.winner()
76
+ # backpropagate reward
77
+ if result == 1:
78
+ self.p1.feedReward(1)
79
+ self.p2.feedReward(0)
80
+ elif result == -1:
81
+ self.p1.feedReward(0)
82
+ self.p2.feedReward(1)
83
+ else:
84
+ self.p1.feedReward(0.1)
85
+ self.p2.feedReward(0.5)
86
+
87
+ # board reset
88
+ def reset(self):
89
+ self.board = np.zeros((BOARD_ROWS, BOARD_COLS))
90
+ self.boardHash = None
91
+ self.isEnd = False
92
+ self.playerSymbol = 1
93
+
94
+ def playwithbot(self, rounds=100):
95
+ for i in range(rounds):
96
+ if i % 1000 == 0:
97
+ print("Rounds {}".format(i))
98
+ while not self.isEnd:
99
+ # Player 1
100
+ positions = self.availablePositions()
101
+ p1_action = self.p1.chooseAction(positions, self.board, self.playerSymbol)
102
+ # take action and upate board state
103
+ self.updateState(p1_action)
104
+ board_hash = self.getHash()
105
+ self.p1.addState(board_hash)
106
+ # check board status if it is end
107
+
108
+ win = self.winner()
109
+ if win is not None:
110
+ # self.showBoard()
111
+ # ended with p1 either win or draw
112
+ self.giveReward()
113
+ self.p1.reset()
114
+ self.p2.reset()
115
+ self.reset()
116
+ break
117
+
118
+ else:
119
+ # Player 2
120
+ positions = self.availablePositions()
121
+ p2_action = self.p2.chooseAction(positions, self.board, self.playerSymbol)
122
+ self.updateState(p2_action)
123
+ board_hash = self.getHash()
124
+ self.p2.addState(board_hash)
125
+
126
+ win = self.winner()
127
+ if win is not None:
128
+ # self.showBoard()
129
+ # ended with p2 either win or draw
130
+ self.giveReward()
131
+ self.p1.reset()
132
+ self.p2.reset()
133
+ self.reset()
134
+ break
135
+
136
+ # play with human
137
+ def playwithhuman(self):
138
+ while not self.isEnd:
139
+ # Player 1
140
+ positions = self.availablePositions()
141
+ p1_action = self.p1.chooseAction(positions, self.board, self.playerSymbol)
142
+ # take action and upate board state
143
+ self.updateState(p1_action)
144
+ self.showBoard()
145
+ # check board status if it is end
146
+ win = self.winner()
147
+ if win is not None:
148
+ if win == 1:
149
+ print(self.p1.name, "wins!")
150
+ else:
151
+ print("tie!")
152
+ self.reset()
153
+ break
154
+
155
+ else:
156
+ # Player 2
157
+ positions = self.availablePositions()
158
+ p2_action = self.p2.chooseAction(positions)
159
+
160
+ self.updateState(p2_action)
161
+ self.showBoard()
162
+ win = self.winner()
163
+ if win is not None:
164
+ if win == -1:
165
+ print(self.p2.name, "wins!")
166
+ else:
167
+ print("tie!")
168
+ self.reset()
169
+ break
170
+
171
+ # def showBoard(self):
172
+ # # p1: x p2: o
173
+ # for i in range(0, BOARD_ROWS):
174
+ # print('-------------')
175
+ # out = '| '
176
+ # for j in range(0, BOARD_COLS):
177
+ # if self.board[i, j] == 1:
178
+ # token = 'x'
179
+ # if self.board[i, j] == -1:
180
+ # token = 'o'
181
+ # if self.board[i, j] == 0:
182
+ # token = ' '
183
+ # out += token + ' | '
184
+ # print(out)
185
+ # print('-------------')
186
+
187
+
188
+ class Player:
189
+ def __init__(self, name, exp_rate=0.3):
190
+ self.name = name
191
+ self.states = [] # record all positions taken
192
+ self.lr = 0.2
193
+ self.exp_rate = exp_rate
194
+ self.decay_gamma = 0.9
195
+ self.states_value = {} # state -> value
196
+ self.loadPolicy('policy_' + str(self.name)) # Load the pre-trained policy
197
+
198
+ def getHash(self, board):
199
+ boardHash = str(board.reshape(BOARD_COLS * BOARD_ROWS))
200
+ return boardHash
201
+
202
+ def chooseAction(self, positions, current_board, symbol):
203
+ if np.random.uniform(0, 1) <= self.exp_rate:
204
+ # take random action
205
+ idx = np.random.choice(len(positions))
206
+ action = positions[idx]
207
+ else:
208
+ value_max = -999
209
+ for p in positions:
210
+ next_board = current_board.copy()
211
+ next_board[p] = symbol
212
+ next_boardHash = self.getHash(next_board)
213
+ value = 0 if self.states_value.get(next_boardHash) is None else self.states_value.get(next_boardHash)
214
+ # print("value", value)
215
+ if value >= value_max:
216
+ value_max = value
217
+ action = p
218
+ # print("{} takes action {}".format(self.name, action))
219
+ return action
220
+
221
+ # append a hash state
222
+ def addState(self, state):
223
+ self.states.append(state)
224
+
225
+ # at the end of game, backpropagate and update states value
226
+ def feedReward(self, reward):
227
+ for st in reversed(self.states):
228
+ if self.states_value.get(st) is None:
229
+ self.states_value[st] = 0
230
+ self.states_value[st] += self.lr * (self.decay_gamma * reward - self.states_value[st])
231
+ reward = self.states_value[st]
232
+
233
+ def reset(self):
234
+ self.states = []
235
+
236
+ def savePolicy(self):
237
+ fw = open('policy_9x9_' + str(self.name), 'wb')
238
+ pickle.dump(self.states_value, fw)
239
+ fw.close()
240
+
241
+ def loadPolicy(self, file):
242
+ fr = open(file, 'rb')
243
+ self.states_value = pickle.load(fr)
244
+ fr.close()
245
+
246
+
247
+ class HumanPlayer:
248
+ def __init__(self, name):
249
+ self.name = name
250
+
251
+ def chooseAction(self, positions):
252
+ pass
253
+
254
+ # append a hash state
255
+ def addState(self, state):
256
+ pass
257
+
258
+ # at the end of game, backpropagate and update states value
259
+ def feedReward(self, reward):
260
+ pass
261
+
262
+ def reset(self):
263
+ pass
264
+
265
+
266
+ if __name__ == "__main__":
267
+ # training
268
+ p1 = Player("p1")
269
+ p2 = Player("p2")
270
+
271
+ st = State(p1, p2)
272
+ print("training...")
273
+ st.playwithbot(2000)
274
+
275
+ p1.savePolicy()
276
+ p2.savePolicy()
277
+
278
+
policy_9x9_p1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df6ea3d6afd12a02c2249ed25c2e259b379d3c7e27127fc45b311f9716625c7c
3
+ size 34402854
policy_9x9_p2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bc0ddefb84074489fa5f9c6fda365f3b85e48f090624f15d427d820bb7d3bf0
3
+ size 37261567
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ numpy
2
+ streamlit