#!/usr/bin/env python
# coding: utf-8

# In[1]:


"""GCN建立神经网络"""

"""导入数据"""

#构建数组数据
import numpy as np
quizzes = np.zeros((1000000, 81), np.int32)
solutions = np.zeros((1000000, 81), np.int32)
for i, line in enumerate(open('D:\\data\\3\\sudoku.csv', 'r').read().splitlines()[1:]):
    quiz, solution = line.split(",")
    for j, q_s in enumerate(zip(quiz, solution)):
        q, s = q_s
        quizzes[i, j] = q
        solutions[i, j] = s
quizzes = quizzes.reshape((1000000, 9, 9,1))/10
solutions = solutions.reshape((1000000,9,9))

# 查看数据尺寸
quizzes[3].reshape((9,9))*10


# In[2]:


#查看数独解答
solutions[3].reshape((9,9))


# In[5]:


from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()


# In[6]:


import tensorflow as tf
#构造数独图及其邻接矩阵A
def sudoku_edges():
        def cross(a):
            return [(i, j) for i in a.flatten() for j in a.flatten() if not i == j]

        idx = np.arange(81).reshape(9, 9)
        rows, columns, squares = [], [], []
        for i in range(9):
            rows += cross(idx[i, :])
            columns += cross(idx[:, i])
        
        for i in range(3):
            for j in range(3):
                squares += cross(idx[i * 3:(i + 1) * 3, j * 3:(j + 1) * 3])
        self_link = [(i, i) for i in range(81)]
        edges=list(set(rows + columns + squares))
       
        A = np.zeros((81, 81))
        for i, j in edges:
            A[j, i] = 1
            A[i, j] = 1

        return A
#归一化邻接矩阵
def normalize_digraph(A):
    Dl = np.sum(A, 0)
    num_node = A.shape[0]
    Dn = np.zeros((num_node, num_node))
    for i in range(num_node):
        if Dl[i] > 0:
            Dn[i, i] = Dl[i]**(-1)
    AD = np.dot(A, Dn)
    return AD
#设置边界权重：所有权重相等
A=sudoku_edges()*np.ones((81,81))
print(A)
AD=normalize_digraph(A)
AD=AD.reshape(1,81,81)
print(AD)
A=tf.convert_to_tensor(AD)#(1,81,81)
A=tf.cast(A,dtype=tf.float32)


# In[7]:


#网络搭建
input_shape=(1000,9,9,1)

def GCNNet(inputs):
    conv1=tf.keras.layers.Conv2D(256, (3,3),activation='relu',padding='same', input_shape=input_shape[1:])(inputs)#(N,9,9,64)
    conv1=tf.keras.layers.BatchNormalization()( conv1)
    conv1=tf.transpose(conv1,perm=[0,3,1,2])#(N,64,9,9)
    conv1=tf.reshape(conv1,[-1,256,9*9])#(N,64,81)
    conv1=tf.matmul(conv1,A)#(N,64,81)
    conv1=tf.reshape(conv1,[-1,256,81,1])
    conv1=tf.transpose(conv1,perm=[0,2,3,1])#(N,81,1,64)
    conv1=tf.reshape(conv1,[-1,9,9,256])
    res1=tf.reshape(conv1,[-1,9,9,256])
    
    conv1=tf.keras.layers.Conv2D(256, (3,3),activation='relu',padding='same')(conv1)
    conv1=tf.keras.layers.BatchNormalization()( conv1)
    conv1=tf.transpose(conv1,perm=[0,3,1,2])#(N,64,9,9)
    conv1=tf.reshape(conv1,[-1,256,9*9])#(N,64,81)
    conv1=tf.matmul(conv1,A)#(N,64,81)
    conv1=tf.reshape(conv1,[-1,256,81,1])
    conv1=tf.transpose(conv1,perm=[0,2,3,1])#(N,81,1,64)
    conv1=tf.reshape(conv1,[-1,9,9,256])
    conv1=conv1+res1
    res2=conv1
    conv1=tf.keras.layers.Conv2D(256, (3,3),activation='relu',padding='same')(conv1)
    conv1=tf.keras.layers.BatchNormalization()( conv1)
    conv1=tf.transpose(conv1,perm=[0,3,1,2])#(N,64,9,9)
    conv1=tf.reshape(conv1,[-1,256,9*9])#(N,64,81)
    conv1=tf.matmul(conv1,A)#(N,64,81)
    conv1=tf.reshape(conv1,[-1,256,81,1])
    conv1=tf.transpose(conv1,perm=[0,2,3,1])#(N,81,1,64)
    conv1=tf.reshape(conv1,[-1,9,9,256])
    conv1=res2+conv1
    res3=conv1
    
    conv1=tf.keras.layers.Conv2D(256, (3,3),activation='relu',padding='same')(conv1)
    conv1=tf.keras.layers.BatchNormalization()( conv1)
    conv1=tf.transpose(conv1,perm=[0,3,1,2])#(N,64,9,9)
    conv1=tf.reshape(conv1,[-1,256,9*9])#(N,64,81)
    conv1=tf.matmul(conv1,A)#(N,64,81)
    conv1=tf.reshape(conv1,[-1,256,81,1])
    conv1=tf.transpose(conv1,perm=[0,2,3,1])#(N,81,1,64)
    conv1=tf.reshape(conv1,[-1,9,9,256])
    conv1=res3+conv1

    outputs0=tf.keras.layers.Conv2D(10, (3,3),activation='softmax',padding='same')( conv1)

    print(outputs0.shape)
    return outputs0


# In[8]:


inputs = tf.keras.layers.Input(shape=(9,9,1), name='inputs')
outputs0=GCNNet(inputs)
auto_encoder =tf.keras.Model(inputs,outputs0)
auto_encoder.summary()


# In[9]:


class WeightedSDRLoss(tf.keras.losses.Loss):
    

    def __init__(self, noisy_signal, reduction=tf.keras.losses.Reduction.AUTO, name='WeightedSDRLoss'):
        super().__init__(reduction=reduction, name=name)
        self.x = x=tf.squeeze(noisy_signal)
        self.istarget=tf.compat.v1.to_float(tf.equal(self.x,tf.zeros_like(self.x)))

        

    def call(self, y_true, y_pred):
        loss=tf.keras.metrics.sparse_categorical_crossentropy(y_true,y_pred)#（None，9，9）
        print("the shape of loss is{}".format(loss.shape))
        print("the shape of istarget is{}".format(self.istarget.shape))
        return tf.reduce_sum(loss*self.istarget)/(tf.reduce_sum(self.istarget))


# In[10]:



# 模型编译
auto_encoder.compile(optimizer='adam', loss=WeightedSDRLoss(inputs))

# 模型训练
history = auto_encoder.fit(quizzes, solutions, batch_size=128, shuffle=True,epochs=5,validation_split=0.3)


# In[ ]:


#进行预测，这里选第1、2、3个数独进行预测：
n=10#数独1（或2、3）
x=quizzes[n]
y=solutions[n].reshape(1,9,9)
x_=x.reshape(1,9,9)

x__=x.reshape(1,9,9,1)

temp=[]
for i in range(80):
    #print("第{}轮".format(i))
    pred_=tt.predict(x.reshape(1,9,9,1))
   
    istarget=(np.equal(x__,np.zeros_like(x__))).astype(np.float64)
    pred_= pred_* istarget
    
    prob=np.max(pred_, axis=-1) #(N,9,9)
    #prob=prob* istarget
    prob=prob.reshape(1,81)
    prob[:,temp]=0
    pred=np.argmax(pred_,axis=-1)
    #pred=pred*istarget
    pred=pred.reshape(1,81)
    maxprob_ids=np.argmax(prob,axis=1)
    temp.append( maxprob_ids)
    print(prob)
    #print('-----------------')
    x=np.reshape(x,(1,81))
   
    x[:,maxprob_ids]=pred[:,maxprob_ids]/10
    print("将{}位置处的变成{}".format(maxprob_ids,pred[:,maxprob_ids]/10))
    prob[:,maxprob_ids]
    x=np.reshape(x,(1,9,9))
   
    
    
    x=np.where(x_==0,x,y/10)
    #print("还剩下{}".format(x.size-np.count_nonzero(x)))
   
    if np.count_nonzero(x)==x.size:
       
        break
x=x.reshape(9,9) *10   
print("预测第{}个数独为:".format(n))
print(x)

