
var AI = AI||{}; // 定义AI对象

// 添加神经网络相关属性
AI.neuralNetwork = null;
AI.isTraining = false;
AI.trainingData = [];

AI.historyTable	=	{}; // 历史表

// AI 对象初始化
AI.init = function(pace){
	//
	var bill = AI.historyBill || com.gambit; //开局库
	if (bill.length){
		var len=pace.length;
		var arr=[];
		//先搜索棋谱
		for (var i=0;i< bill.length;i++){
			if (bill[i].slice(0,len)==pace) {
			arr.push(bill[i]);
			}
		}
		if (arr.length){
			var inx=Math.floor( Math.random() * arr.length );
			AI.historyBill = arr ;
			return arr[inx].slice(len,len+4).split("");
		}else{
			AI.historyBill = [] ;
		}
		
	}
	 //如果棋谱里面没有，人工智能开始运作
	var initTime = new Date().getTime();
	AI.treeDepth=play.depth;
	//AI.treeDepth=4;
	
	AI.number=0;
	AI.setHistoryTable.lenght = 0

	var val=AI.getAlphaBeta(-99999 ,99999, AI.treeDepth, com.arr2Clone(play.map),play.my);
	//var val = AI.iterativeSearch(com.arr2Clone(play.map),play.my)
	if (!val||val.value==-8888) {
		AI.treeDepth=2;
		val=AI.getAlphaBeta(-99999 ,99999, AI.treeDepth, com.arr2Clone(play.map),play.my);
	}
	//var val = AI.iterativeSearch(com.arr2Clone(play.map),play.my);
	if (val&&val.value!=-8888) {
		var man = play.mans[val.key];
		var nowTime= new Date().getTime();
		com.get("moveInfo").innerHTML='<h3>AI搜索结果：</h3>最佳着法：'+
										com.createMove(com.arr2Clone(play.map),man.x,man.y,val.x,val.y)+
										'<br />搜索深度：'+AI.treeDepth+'<br />搜索分支：'+
										AI.number+'个 <br />最佳着法评估：'+
										val.value+'分'+
										' <br />搜索用时：'+
										(nowTime-initTime)+'毫秒'
		return [man.x,man.y,val.x,val.y]
	}else {
		return false;	
	}
}

// 初始化神经网络
AI.initNeuralNetwork = function() {
    // 简单的前馈神经网络结构
    // 输入层: 9x10棋盘状态 (90) + 当前玩家 (1) = 91个输入
    // 隐藏层: 256个神经元
    // 输出层: 1个评估值
    AI.neuralNetwork = {
        inputSize: 91,
        hiddenSize: 256,
        outputSize: 1,
        // 隐藏层权重
        hiddenWeights: [],
        // 输出层权重
        outputWeights: [],
        // 偏置
        hiddenBias: [],
        outputBias: 0
    };
    
    // 初始化权重和偏置
    for (let i = 0; i < AI.neuralNetwork.hiddenSize; i++) {
        AI.neuralNetwork.hiddenWeights[i] = [];
        for (let j = 0; j < AI.neuralNetwork.inputSize; j++) {
            AI.neuralNetwork.hiddenWeights[i][j] = (Math.random() - 0.5) * 2;
        }
        AI.neuralNetwork.hiddenBias[i] = (Math.random() - 0.5) * 2;
    }
    
    for (let i = 0; i < AI.neuralNetwork.outputSize; i++) {
        AI.neuralNetwork.outputWeights[i] = [];
        for (let j = 0; j < AI.neuralNetwork.hiddenSize; j++) {
            AI.neuralNetwork.outputWeights[i][j] = (Math.random() - 0.5) * 2;
        }
        AI.neuralNetwork.outputBias = (Math.random() - 0.5) * 2;
    }
};

// 激活函数 (ReLU)
AI.relu = function(x) {
    return Math.max(0, x);
};

// 激活函数的导数
AI.reluDerivative = function(x) {
    return x > 0 ? 1 : 0;
};

// 将棋盘状态转换为神经网络输入
AI.boardToInput = function(map, my) {
    let input = [];
    
    // 将棋盘状态转换为输入向量
    for (let i = 0; i < map.length; i++) {
        for (let j = 0; j < map[i].length; j++) {
            let piece = map[i][j];
            if (!piece) {
                input.push(0); // 空位
            } else {
                // 根据棋子类型和颜色编码
                let pieceValue = 0;
                switch (piece.charAt(0)) {
                    case 'j': pieceValue = 1; break;  // 将/帅
                    case 's': pieceValue = 2; break;  // 士
                    case 'x': pieceValue = 3; break;  // 象
                    case 'm': pieceValue = 4; break;  // 马
                    case 'c': pieceValue = 5; break;  // 车
                    case 'p': pieceValue = 6; break;  // 炮
                    case 'z': pieceValue = 7; break;  // 卒/兵
                }
                // 敌方棋子用负值表示
                if (piece.charAt(1) !== (my > 0 ? '0' : '1')) {
                    pieceValue = -pieceValue;
                }
                input.push(pieceValue);
            }
        }
    }
    
    // 添加当前玩家信息
    input.push(my);
    
    return input;
};

// 神经网络前向传播
AI.forward = function(input) {
    if (!AI.neuralNetwork) {
        AI.initNeuralNetwork();
    }
    
    // 隐藏层计算
    let hiddenOutputs = [];
    for (let i = 0; i < AI.neuralNetwork.hiddenSize; i++) {
        let sum = 0;
        for (let j = 0; j < AI.neuralNetwork.inputSize; j++) {
            sum += input[j] * AI.neuralNetwork.hiddenWeights[i][j];
        }
        sum += AI.neuralNetwork.hiddenBias[i];
        hiddenOutputs[i] = AI.relu(sum);
    }
    
    // 输出层计算
    let output = 0;
    for (let i = 0; i < AI.neuralNetwork.hiddenSize; i++) {
        output += hiddenOutputs[i] * AI.neuralNetwork.outputWeights[0][i];
    }
    output += AI.neuralNetwork.outputBias;
    
    return output;
};

// 使用神经网络评估棋局
AI.evaluateWithNN = function(map, my) {
    const input = AI.boardToInput(map, my);
    return AI.forward(input);
};

// 训练神经网络
AI.train = function(epochs, learningRate) {
    if (!AI.neuralNetwork) {
        AI.initNeuralNetwork();
    }
    
    if (AI.trainingData.length === 0) {
        console.log("没有训练数据");
        return;
    }
    
    AI.isTraining = true;
    
    for (let epoch = 0; epoch < epochs; epoch++) {
        let totalError = 0;
        
        for (let d = 0; d < AI.trainingData.length; d++) {
            const data = AI.trainingData[d];
            const input = data.input;
            const target = data.target;
            
            // 前向传播
            let hiddenOutputs = [];
            for (let i = 0; i < AI.neuralNetwork.hiddenSize; i++) {
                let sum = 0;
                for (let j = 0; j < AI.neuralNetwork.inputSize; j++) {
                    sum += input[j] * AI.neuralNetwork.hiddenWeights[i][j];
                }
                sum += AI.neuralNetwork.hiddenBias[i];
                hiddenOutputs[i] = AI.relu(sum);
            }
            
            let output = 0;
            for (let i = 0; i < AI.neuralNetwork.hiddenSize; i++) {
                output += hiddenOutputs[i] * AI.neuralNetwork.outputWeights[0][i];
            }
            output += AI.neuralNetwork.outputBias;
            
            // 计算误差
            const error = target - output;
            totalError += Math.abs(error);
            
            // 反向传播
            // 输出层梯度
            const outputDelta = error;
            
            // 更新输出层权重和偏置
            for (let i = 0; i < AI.neuralNetwork.hiddenSize; i++) {
                AI.neuralNetwork.outputWeights[0][i] += learningRate * outputDelta * hiddenOutputs[i];
            }
            AI.neuralNetwork.outputBias += learningRate * outputDelta;
            
            // 隐藏层梯度
            for (let i = 0; i < AI.neuralNetwork.hiddenSize; i++) {
                const hiddenDelta = outputDelta * AI.neuralNetwork.outputWeights[0][i] * AI.reluDerivative(hiddenOutputs[i]);
                
                // 更新隐藏层权重和偏置
                for (let j = 0; j < AI.neuralNetwork.inputSize; j++) {
                    AI.neuralNetwork.hiddenWeights[i][j] += learningRate * hiddenDelta * input[j];
                }
                AI.neuralNetwork.hiddenBias[i] += learningRate * hiddenDelta;
            }
        }
        
        if (epoch % 10 === 0) {
            console.log(`Epoch ${epoch}, Average Error: ${totalError / AI.trainingData.length}`);
        }
    }
    
    AI.isTraining = false;
    console.log("训练完成");
};

// 保存模型
AI.saveModel = function() {
    if (!AI.neuralNetwork) {
        console.log("没有可保存的模型");
        return;
    }
    
    const modelData = {
        inputSize: AI.neuralNetwork.inputSize,
        hiddenSize: AI.neuralNetwork.hiddenSize,
        outputSize: AI.neuralNetwork.outputSize,
        hiddenWeights: AI.neuralNetwork.hiddenWeights,
        outputWeights: AI.neuralNetwork.outputWeights,
        hiddenBias: AI.neuralNetwork.hiddenBias,
        outputBias: AI.neuralNetwork.outputBias
    };
    
    const jsonData = JSON.stringify(modelData);
    const blob = new Blob([jsonData], {type: "application/json"});
    const url = URL.createObjectURL(blob);
    
    const a = document.createElement("a");
    a.href = url;
    a.download = "chess_model.json";
    document.body.appendChild(a);
    a.click();
    document.body.removeChild(a);
    URL.revokeObjectURL(url);
    
    console.log("模型已保存");
};

// 加载模型
AI.loadModel = function(file) {
    const reader = new FileReader();
    reader.onload = function(e) {
        try {
            const modelData = JSON.parse(e.target.result);
            AI.neuralNetwork = {
                inputSize: modelData.inputSize,
                hiddenSize: modelData.hiddenSize,
                outputSize: modelData.outputSize,
                hiddenWeights: modelData.hiddenWeights,
                outputWeights: modelData.outputWeights,
                hiddenBias: modelData.hiddenBias,
                outputBias: modelData.outputBias
            };
            console.log("模型加载成功");
        } catch (error) {
            console.error("模型加载失败:", error);
        }
    };
    reader.readAsText(file);
};

// 添加训练数据
AI.addTrainingData = function(map, my, evaluation) {
    if (!AI.neuralNetwork) {
        AI.initNeuralNetwork();
    }
    
    const input = AI.boardToInput(map, my);
    AI.trainingData.push({
        input: input,
        target: evaluation
    });
};

// 基于神经网络的AI搜索
AI.getBestMoveWithNN = function(map, my) {
    if (!AI.neuralNetwork) {
        console.log("神经网络未初始化");
        return AI.init(); // 回退到原有AI
    }
    
    const moves = AI.getMoves(map, my);
    if (moves.length === 0) {
        return false;
    }
    
    let bestMove = null;
    let bestValue = -Infinity;
    
    for (let i = 0; i < moves.length; i++) {
        const move = moves[i];
        const key = move[4];
        const oldX = move[0];
        const oldY = move[1];
        const newX = move[2];
        const newY = move[3];
        const clearKey = map[newY][newX] || "";
        
        // 执行走法
        map[newY][newX] = key;
        delete map[oldY][oldX];
        play.mans[key].x = newX;
        play.mans[key].y = newY;
        
        // 评估局面
        const value = -AI.evaluateWithNN(map, -my);
        
        // 撤销走法
        play.mans[key].x = oldX;
        play.mans[key].y = oldY;
        map[oldY][oldX] = key;
        delete map[newY][newX];
        if (clearKey) {
            map[newY][newX] = clearKey;
        }
        
        if (value > bestValue) {
            bestValue = value;
            bestMove = move;
        }
    }
    
    if (bestMove) {
        return [bestMove[0], bestMove[1], bestMove[2], bestMove[3]];
    }
    
    return false;
};

//取得棋盘上所有棋子
AI.getMapAllMan = function (map, my){
	var mans=[];
	for (var i=0; i<map.length; i++){
		for (var n=0; n<map[i].length; n++){
			var key = map[i][n];
			if (key && play.mans[key].my == my){
				play.mans[key].x = n;
				play.mans[key].y = i;
				mans.push(play.mans[key])
			}
		}
	}
	return mans;
}

/*
//取得棋谱所有己方棋子的着法
AI.getMoves = function (map, my, txtMap){
	var highMores = [];   //优先级高的着法
	var manArr = AI.getMapAllMan (map, my);
	var moves = [];
	var history=AI.historyTable[txtMap];
	for (var i=0; i<manArr.length; i++){
		var man = manArr[i];
		var val=man.bl(map);
		for (var n=0; n<val.length; n++){
			if (history){
				highMores.push([man.x,man.y,val[n][0],val[n][1],man.key])
			}else{
				moves.push([man.x,man.y,val[n][0],val[n][1],man.key])
			}
		}
	}
	return highMores.concat(moves);
}
*/
//取得棋谱所有己方棋子的着法
AI.getMoves = function (map, my){
	var manArr = AI.getMapAllMan (map, my);
	var moves = [];
	var foul=play.isFoul;
	for (var i=0; i<manArr.length; i++){
		var man = manArr[i];
		var val=man.bl(map);
		
		for (var n=0; n<val.length; n++){
			var x=man.x;
			var y=man.y;
			var newX=val[n][0];
			var newY=val[n][1];
			 //如果不是长将着法
			if (foul[0]!=x || foul[1]!=y || foul[2]!=newX || foul[3]!=newY ){
				moves.push([x,y,newX,newY,man.key])
			}
		}
	}
	return moves;
}
//A:当前棋手value/B:对手value/depth：层级
AI.getAlphaBeta = function (A, B, depth, map ,my) { 
	//var txtMap= map.join();
	//var history=AI.historyTable[txtMap];
	//	if (history && history.depth >= AI.treeDepth-depth+1){
	//		return 	history.value*my;
	//}
	if (depth == 0) {
		return {"value":AI.evaluate(map , my)}; //局面评价函数; 
　	}
　	var moves = AI.getMoves(map , my ); //生成全部走法; 
　	//这里排序以后会增加效率

	for (var i=0; i < moves.length; i++) {
		
		
　　	//走这个走法;
		var move= moves[i];
		var key = move[4];
		var oldX= move[0];
		var oldY= move[1];
		var newX= move[2];
		var newY= move[3];
		var clearKey = map[ newY ][ newX ]||"";

		map[ newY ][ newX ] = key;
		delete map[ oldY ][ oldX ];
		play.mans[key].x = newX;
		play.mans[key].y = newY;
		
	　　if (clearKey=="j0"||clearKey=="J0") {//被吃老将,撤消这个走法; 
			play.mans[key]	.x = oldX;
			play.mans[key]	.y = oldY;
			map[ oldY ][ oldX ] = key;
			delete map[ newY ][ newX ];
			if (clearKey){
				 map[ newY ][ newX ] = clearKey;
				// play.mans[ clearKey ].isShow = false;
			}

			return {"key":key,"x":newX,"y":newY,"value":8888};
			//return rootKey; 
	　　}else { 
	　　	var val = -AI.getAlphaBeta(-B, -A, depth - 1, map , -my).value; 
			//val = val || val.value;
	
	　　	//撤消这个走法;　 
			play.mans[key]	.x = oldX;
			play.mans[key]	.y = oldY;
			map[ oldY ][ oldX ] = key;
			delete map[ newY ][ newX ];
			if (clearKey){
				 map[ newY ][ newX ] = clearKey;
				 //play.mans[ clearKey ].isShow = true;
			}
	　　	if (val >= B) { 
				//将这个走法记录到历史表中; 
				//AI.setHistoryTable(txtMap,AI.treeDepth-depth+1,B,my);
				return {"key":key,"x":newX,"y":newY,"value":B}; 
			} 
			if (val > A) { 
	　　　　	A = val; //设置最佳走法; 
				if (AI.treeDepth == depth) var rootKey={"key":key,"x":newX,"y":newY,"value":A};
			} 
		} 
　	} 
	//将这个走法记录到历史表中; 
	//AI.setHistoryTable(txtMap,AI.treeDepth-depth+1,A,my);
	if (AI.treeDepth == depth) {//已经递归回根了
		if (!rootKey){
			//AI没有最佳走法，说明AI被将死了，返回false
			return false;
		}else{
			//这个就是最佳走法;
			return rootKey;
		}
	}
　return {"key":key,"x":newX,"y":newY,"value":A}; 
}

//奖着法记录到历史表
AI.setHistoryTable = function (txtMap,depth,value,my){
	AI.setHistoryTable.lenght ++;
	AI.historyTable[txtMap] = {depth:depth,value:value} 
}

//评估棋局 取得棋盘双方棋子价值差
AI.evaluate = function (map,my){
    // 如果有神经网络，优先使用神经网络评估
    if (AI.neuralNetwork) {
        return AI.evaluateWithNN(map, my);
    }
    
	var val=0;
	for (var i=0; i<map.length; i++){
		for (var n=0; n<map[i].length; n++){
			var key = map[i][n];
			if (key){
				val += play.mans[key].value[i][n] * play.mans[key].my;
			}
		}
	}
	//val+=Math.floor( Math.random() * 10);  //让AI走棋增加随机元素
	//com.show()
	//z(val*my)
	AI.number++;
	return val*my;
}

//评估棋局 取得棋盘双方棋子价值差
AI.evaluate1 = function (map,my){
	var val=0;
	for (var i in play.mans){
		var man=play.mans[i];
		if (man.isShow){
			val += man.value[man.y][man.x] * man.my;
		}
	}
	//val+=Math.floor( Math.random() * 10);  //让AI走棋增加随机元素
	//com.show()
	//z(val*my)
	AI.number++;
	return val*my;
}


