9 Star 44 Fork 16

RLMaster陈常鸿/强化学习

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
TicTacToe.py 10.54 KB
一键复制 编辑 原始数据 按行查看 历史
RLMaster陈常鸿 提交于 2018-01-21 21:49 . Upload TicTacToe.py
from __future__ import print_function
import numpy as np
import pickle #数据储存模块
BOARD_ROWS = 3
BOARD_COLS = 3
BOARD_SIZE = BOARD_ROWS * BOARD_COLS
class State:
def __init__(self):
# 1 表示先行玩家
# -1 表示另一个玩家
# 0 表示空位置
self.data = np.zeros((BOARD_ROWS, BOARD_COLS))
self.winner = None
self.hashVal = None
self.end = None
# 在状态中计算哈希值
def getHash(self):
if self.hashVal is None:
self.hashVal = 0
for i in self.data.reshape(BOARD_ROWS * BOARD_COLS):
if i == -1:
i = 2
self.hashVal = self.hashVal * 3 + i
return int(self.hashVal)
# 判断玩家是赢了还是打平
def isEnd(self):
if self.end is not None:
return self.end
results = []
# 检查行
for i in range(0, BOARD_ROWS):
results.append(np.sum(self.data[i, :]))
# 检查列
for i in range(0, BOARD_COLS):
results.append(np.sum(self.data[:, i]))
# check diagonals
results.append(0)
for i in range(0, BOARD_ROWS):
results[-1] += self.data[i, i]
results.append(0)
for i in range(0, BOARD_ROWS):
results[-1] += self.data[i, BOARD_ROWS - 1 - i]
for result in results:
if result == 3:
self.winner = 1
self.end = True
return self.end
if result == -3:
self.winner = -1
self.end = True
return self.end
# 检测是否打平
sum = np.sum(np.abs(self.data))
if sum == BOARD_ROWS * BOARD_COLS:
self.winner = 0
self.end = True
return self.end
# 游戏继续
self.end = False
return self.end
# @symbol 1 or -1
# 把棋子放进位置(i, j)
def nextState(self, i, j, symbol):
newState = State()
newState.data = np.copy(self.data)
newState.data[i, j] = symbol
return newState
# 打印格子
def show(self):
for i in range(0, BOARD_ROWS):
print('-------------')
out = '| '
for j in range(0, BOARD_COLS):
if self.data[i, j] == 1:
token = '*'
if self.data[i, j] == 0:
token = '0'
if self.data[i, j] == -1:
token = 'x'
out += token + ' | '
print(out)
print('-------------')
def getAllStatesImpl(currentState, currentSymbol, allStates):
for i in range(0, BOARD_ROWS):
for j in range(0, BOARD_COLS):
if currentState.data[i][j] == 0:
newState = currentState.nextState(i, j, currentSymbol)
newHash = newState.getHash()
if newHash not in allStates.keys():
isEnd = newState.isEnd()
allStates[newHash] = (newState, isEnd)
if not isEnd:
getAllStatesImpl(newState, -currentSymbol, allStates)
def getAllStates():
currentSymbol = 1
currentState = State()
allStates = dict()
allStates[currentState.getHash()] = (currentState, currentState.isEnd())
getAllStatesImpl(currentState, currentSymbol, allStates)
return allStates
# 所有可能的主板配置
allStates = getAllStates()
class Judger:
# @player1: 第一个移动的玩家为1
# @player2: 另一个玩家为-1
# @feedback: 当为true的时候,两个玩家都会有reward,游戏结束
def __init__(self, player1, player2, feedback=True):
self.p1 = player1
self.p2 = player2
self.feedback = feedback
self.currentPlayer = None
self.p1Symbol = 1
self.p2Symbol = -1
self.p1.setSymbol(self.p1Symbol)
self.p2.setSymbol(self.p2Symbol)
self.currentState = State()
self.allStates = allStates
# 两个玩家的reward
def giveReward(self):
if self.currentState.winner == self.p1Symbol:
self.p1.feedReward(1)
self.p2.feedReward(0)
elif self.currentState.winner == self.p2Symbol:
self.p1.feedReward(0)
self.p2.feedReward(1)
else:
self.p1.feedReward(0.1)
self.p2.feedReward(0.5)
def feedCurrentState(self):
self.p1.feedState(self.currentState)
self.p2.feedState(self.currentState)
def reset(self):
self.p1.reset()
self.p2.reset()
self.currentState = State()
self.currentPlayer = None
# @show: 如果为 True, 打印棋盘
def play(self, show=False):
self.reset()
self.feedCurrentState()
while True:
# 设置当前玩家
if self.currentPlayer == self.p1:
self.currentPlayer = self.p2
else:
self.currentPlayer = self.p1
if show:
self.currentState.show()
[i, j, symbol] = self.currentPlayer.takeAction()
self.currentState = self.currentState.nextState(i, j, symbol)
hashValue = self.currentState.getHash()
self.currentState, isEnd = self.allStates[hashValue]
self.feedCurrentState()
if isEnd:
if self.feedback:
self.giveReward()
return self.currentState.winner
# AI 玩家
class Player:
# @stepSize: 更新评估的步数
# @exploreRate: 探索的概率
def __init__(self, stepSize = 0.1, exploreRate=0.1):
self.allStates = allStates
self.estimations = dict()
self.stepSize = stepSize
self.exploreRate = exploreRate
self.states = []
def reset(self):
self.states = []
def setSymbol(self, symbol):
self.symbol = symbol
for hash in self.allStates.keys():
(state, isEnd) = self.allStates[hash]
if isEnd:
if state.winner == self.symbol:
self.estimations[hash] = 1.0
else:
self.estimations[hash] = 0
else:
self.estimations[hash] = 0.5
# 接受状态
def feedState(self, state):
self.states.append(state)
# 根据奖励更新估计
def feedReward(self, reward):
if len(self.states) == 0:
return
self.states = [state.getHash() for state in self.states]
target = reward
for latestState in reversed(self.states):
value = self.estimations[latestState] + self.stepSize * (target - self.estimations[latestState])
self.estimations[latestState] = value
target = value
self.states = []
# 下一步的决策
def takeAction(self):
state = self.states[-1]
nextStates = []
nextPositions = []
for i in range(BOARD_ROWS):
for j in range(BOARD_COLS):
if state.data[i, j] == 0:
nextPositions.append([i, j])
nextStates.append(state.nextState(i, j, self.symbol).getHash())
if np.random.binomial(1, self.exploreRate):
np.random.shuffle(nextPositions)
# 不确定 截取法是否适合探索
# 但总好过忘记所有步骤
self.states = []
action = nextPositions[0]
action.append(self.symbol)
return action
values = []
for hash, pos in zip(nextStates, nextPositions):
values.append((self.estimations[hash], pos))
np.random.shuffle(values)
values.sort(key=lambda x: x[0], reverse=True)
action = values[0][1]
action.append(self.symbol)
return action
def savePolicy(self):
fw = open('optimal_policy_' + str(self.symbol), 'wb')
pickle.dump(self.estimations, fw)
fw.close()
def loadPolicy(self):
fr = open('optimal_policy_' + str(self.symbol),'rb')
self.estimations = pickle.load(fr)
fr.close()
# 人类玩家接口
# 输入棋盘位置
# | 1 | 2 | 3 |
# | 4 | 5 | 6 |
# | 7 | 8 | 9 |
class HumanPlayer:
def __init__(self, stepSize = 0.1, exploreRate=0.1):
self.symbol = None
self.currentState = None
return
def reset(self):
return
def setSymbol(self, symbol):
self.symbol = symbol
return
def feedState(self, state):
self.currentState = state
return
def feedReward(self, reward):
return
def takeAction(self):
data = int(input("输入你的位置:"))
data -= 1
i = data // int(BOARD_COLS) #//表示整除,而/表示结果是浮点型
j = data % BOARD_COLS #余数
if self.currentState.data[i, j] != 0:
return self.takeAction()
return (i, j, self.symbol)
def train(epochs=20000):
player1 = Player()#实例化玩家一
player2 = Player()#实例化玩家二
judger = Judger(player1, player2)#判断玩家一和玩家二胜负
player1Win = 0.0#玩家一胜利场数
player2Win = 0.0#玩家二胜利场数
#epochs为训练次数
for i in range(0, epochs):
print("训练代数", i)
winner = judger.play()
if winner == 1:
player1Win += 1
if winner == -1:
player2Win += 1
judger.reset()#一盘对决结束就重置游戏
print(player1Win / epochs)#玩家一胜率
print(player2Win / epochs)#玩家二胜率
player1.savePolicy()#保存策略
player2.savePolicy()
def compete(turns=500):
player1 = Player(exploreRate=0)#没有探索率
player2 = Player(exploreRate=0)
judger = Judger(player1, player2, False)
player1.loadPolicy()#加载训练模型
player2.loadPolicy()
player1Win = 0.0#玩家一胜利场数
player2Win = 0.0
#tuens为测试回合数
for i in range(0, turns):
print("Epoch", i)
winner = judger.play()
if winner == 1:
player1Win += 1
if winner == -1:
player2Win += 1
judger.reset()
print(player1Win / turns)#玩家一胜率
print(player2Win / turns)
def play():
while True:
player1 = Player(exploreRate=0)
player2 = HumanPlayer()#人类控制的玩家
judger = Judger(player1, player2, False)
player1.loadPolicy()
winner = judger.play(True)
if winner == player2.symbol:
print("赢!")
elif winner == player1.symbol:
print("失败!")
else:
print("平!")
train()
compete()
play()
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/CCHChenChangHong/QiangHuaXueXi.git
[email protected]:CCHChenChangHong/QiangHuaXueXi.git
CCHChenChangHong
QiangHuaXueXi
强化学习
master

搜索帮助