#!/usr/bin/env python
# coding: utf-8

# In[1]:


"""soduku"""

"""数据集处理"""

import torch.utils.data as data
import torch
import pandas as pd

# 数据处理
def create_sudoku_tensors(df, train_split=0.5):
    s = df.shape[0]
    def one_hot_encode(s):
        zeros = torch.zeros((1, 81, 9), dtype=torch.float)
        for a in range(81):
            zeros[0, a, int(s[a]) - 1] = 1 if int(s[a]) > 0 else 0
        return zeros
    quizzes_t = df.quizzes.apply(one_hot_encode)
    solutions_t = df.solutions.apply(one_hot_encode)
    quizzes_t = torch.cat(quizzes_t.values.tolist())
    solutions_t = torch.cat(solutions_t.values.tolist())
    randperm = torch.randperm(s)
    train = randperm[:int(train_split * s)]
    test = randperm[int(train_split * s):]
    
    return data.TensorDataset(quizzes_t[train], solutions_t[train]),data.TensorDataset(quizzes_t[test], solutions_t[test])

# 建立约束
def create_constraint_mask():
    constraint_mask = torch.zeros((81, 3, 81), dtype=torch.float)
    # row constraints
    for a in range(81):
        r = 9 * (a // 9)
        for b in range(9):
            constraint_mask[a, 0, r + b] = 1
    
    # column constraints
    for a in range(81):
        c = a % 9
        for b in range(9):
            constraint_mask[a, 1, c + 9 * b] = 1
    
    # box constraints
    for a in range(81):
        r = a // 9
        c = a % 9
        br = 3 * 9 * (r // 3)
        bc = 3 * (c // 3)
        for b in range(9):
            r = b % 3
            c = 9 * (b // 3)
            constraint_mask[a, 2, br + bc + r + c] = 1
    
    return constraint_mask

# 加载数据
def load_dataset(subsample=10000):
    dataset = pd.read_csv('D:\\data\\3\\sudoku.csv', sep=',')
    my_sample = dataset.sample(subsample)
    train_set, test_set = create_sudoku_tensors(my_sample)
    return train_set, test_set


# In[2]:


"""RNN机器学习网络模型"""

import torch
import torch.nn as nn


class SudokuSolver(nn.Module):
    def __init__(self, constraint_mask, n=9, hidden1=100):
        super(SudokuSolver, self).__init__()
        self.constraint_mask = constraint_mask.view(1, n * n, 3, n * n, 1)
        self.n = n
        self.hidden1 = hidden1
        
        # Feature vector is the 3 constraints
        self.input_size = 3 * n
        self.l1 = nn.Linear(self.input_size,
                            self.hidden1, bias=False)
        self.a1 = nn.ReLU()
        self.l2 = nn.Linear(self.hidden1,
                            n, bias=False)
        self.softmax = nn.Softmax(dim=1)
    
    # x is a (batch, n^2, n) tensor
    def forward(self, x):
        n = self.n
        bts = x.shape[0]
        c = self.constraint_mask
        min_empty = (x.sum(dim=2) == 0).sum(dim=1).max()
        x_pred = x.clone()
        for a in range(min_empty):
            # score empty numbers
            constraints = (x.view(bts, 1, 1, n * n, n) * c).sum(dim=3)
            # empty cells
            empty_mask = (x.sum(dim=2) == 0)
            
            f = constraints.reshape(bts, n * n, 3 * n)
            y_ = self.l2(self.a1(self.l1(f[empty_mask])))

            s_ = self.softmax(y_)
            
            # Score the rows
            x_pred[empty_mask] = s_
            
            s = torch.zeros_like(x_pred)
            s[empty_mask] = s_
            # find most probable guess
            score, score_pos = s.max(dim=2)
            mmax = score.max(dim=1)[1]
            # fill it in
            nz = empty_mask.sum(dim=1).nonzero().view(-1)
            mmax_ = mmax[nz]
            ones = torch.ones(nz.shape[0])
            x.index_put_((nz, mmax_, score_pos[nz, mmax_]), ones)
        return x_pred, x


# In[6]:


"""模型训练"""

import torch
import torch.utils.data as data
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt

batch_size = 100

train_set, test_set = load_dataset()

constraint_mask = create_constraint_mask()

dataloader_ = data.DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True)

dataloader_val_ = data.DataLoader(test_set,
                                  batch_size=batch_size,
                                  shuffle=True)

loss = nn.MSELoss()

sudoku_solver = SudokuSolver(constraint_mask)

optimizer = optim.Adam(sudoku_solver.parameters(),
                       lr=0.01,
                       weight_decay=0.000)

epochs = 2
loss_train = []
loss_val = []

for e in range(epochs):
    for i_batch, ts_ in enumerate(dataloader_):
        sudoku_solver.train()
        optimizer.zero_grad()
        pred, mat = sudoku_solver(ts_[0])
        ls = loss(pred, ts_[1])
        ls.backward()
        optimizer.step()
        print("Epoch " + str(e) + " batch " + str(i_batch)
              + ": " + str(ls.item()))
        
        sudoku_solver.eval()
        
        with torch.no_grad():
            n = 100
            rows = torch.randperm(test_set.tensors[0].shape[0])[:n]
            test_pred, test_fill = sudoku_solver(test_set.tensors[0][rows])
            errors = test_fill.max(dim=2)[1]!= test_set.tensors[1][rows].max(dim=2)[1]
            loss_val.append(errors.sum().item())
            
            print("Cells in error: " + str(errors.sum().item()))


# In[ ]:




