#!/usr/bin/env python
# coding: utf-8

# In[1]:


import operator
import struct
import numpy as np
from scipy.stats import zscore
train_images_dir = 'train-images.idx3-ubyte'# 训练集
train_labels_dir = 'train-labels.idx1-ubyte'# 训练集标签
test_images_dir = 't10k-images.idx3-ubyte'# 测试集
test_labels_dir = 't10k-labels.idx1-ubyte'# 测试集标签


# In[2]:


def decode_idx3_ubyte(idx3_ubyte_file):
    bin_data = open(idx3_ubyte_file, 'rb').read()
    # 解析文件头信息，依次为魔数、图片数量、每张图片高、每张图片宽
    offset = 0
    fmt_header = '>iiii'
    magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)
    print('魔数:%d, 图片数量: %d张, 图片大小: %d*%d' % (magic_number, num_images, num_rows, num_cols))
    # 解析数据集
    image_size = num_rows * num_cols
    offset += struct.calcsize(fmt_header)
    fmt_image = '>' + str(image_size) + 'B'
    images = np.zeros([num_images,num_rows,num_cols])
    for i in range(num_images):
        images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows,num_cols))
        offset += struct.calcsize(fmt_image)
    return images
 
def decode_idx1_ubyte(idx1_ubyte_file):
    bin_data = open(idx1_ubyte_file, 'rb').read()
    #解析文件头信息，依次为魔数和标签数
    offset = 0
    fmt_header = '>ii'
    magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)
    print('魔数:%d, 图片数量: %d张' % (magic_number, num_images)) #解析数据集
    offset += struct.calcsize(fmt_header)
    fmt_image = '>B'
    labels = np.zeros(num_images)
    for i in range(num_images):
        labels[i]=struct.unpack_from(fmt_image, bin_data, offset)[0]
        offset += struct.calcsize(fmt_image)
    return labels


# In[3]:


def average_pooling(feature_map,size,stride):
    channel=feature_map.shape[0]
    height=feature_map.shape[1]
    width=feature_map.shape[2]
    padding_height=np.uint16(round((height-size+1)/stride))
    padding_width=np.uint16(round((width-size+1)/stride))
    print(padding_height,padding_width)
    pool_out = np.zeros((channel,padding_height,padding_width))
    for map_num in range(channel):  
        out_height = 0  
        for r in np.arange(0,height, stride):  
            out_width = 0  
            for c in np.arange(0, width, stride):  
                pool_out[map_num,out_height, out_width] = np.sum(feature_map[map_num,r:r+size,c:c+size])/4
                out_width=out_width+1
            out_height=out_height+1
    return pool_out

def max_pooling(feature_map,size,stride):
    channel=feature_map.shape[0]
    height=feature_map.shape[1]
    width=feature_map.shape[2]
    padding_height=np.uint16(round(height/stride))
    padding_width=np.uint16(round(width/stride))
    print(padding_height,padding_width)
    pool_out = np.zeros((channel,padding_height,padding_width))
    for map_num in range(channel):  
        out_height = 0  
        for r in np.arange(0,height, stride):  
            out_width = 0  
            for c in np.arange(0, width, stride):  
                pool_out[map_num,out_height, out_width] = np.max(feature_map[map_num,r:r+size,c:c+size])
                out_width=out_width+1
            out_height=out_height+1
    return pool_out


# In[4]:


def Feature_Select(feature_map):
    data_size=feature_map.shape[0]
    feature_size=feature_map.shape[1]
    feature_count=np.zeros(feature_size)
    belief_bound=data_size/8
    i=0
    for i in range(data_size):
        for j in range(feature_size):
            if feature_map[i][j] > 1.0:
                feature_count[j]+=1.0
    i=0
    select_col=[]
    for i in range(feature_size):
        if feature_count[i]>belief_bound:
            select_col.append(i)
    i=0
    print(select_col)
    pool_out=np.zeros([data_size,len(select_col)])
    for i in range(data_size):
        pool_out[i]=feature_map[i,select_col]
    return pool_out

def train(train_dataset,train_labels,class_number):
    data_size=train_dataset.shape[0]
    size=train_dataset.shape[1]
    mu=np.zeros([class_number,size])
    sigma=np.zeros([class_number,size,size])
    W=np.zeros(class_number)
    i=0
    for i in range(data_size):
        label=int(train_labels[i])
        mu[label]=mu[label]+train_dataset[i]
        W[label]+=1
    i=0
    for i in range(class_number):
        mu[i]/=W[i]
    i=0
    for i in range(data_size):
        label=int(train_labels[i])
        tmp=np.array(train_dataset[i]-mu[label]).reshape(1,-1)
        #print(tmp.shape)
        #print("matmul=",np.matmul(tmp.T,tmp))
        sigma[label]=sigma[label]+np.matmul(tmp.T,tmp)
        #print(sigma[label])
    i=0
    for i in range(class_number):
        sigma[i]/=W[i]
        #print(sigma[i])
        sigma[i]=np.linalg.inv(sigma[i])
        W[i]/=data_size
    print("Train is OK!\n")
    return W,mu,sigma

def classify(dataset,class_number,W,mu,sigma_1): 
    data_size=dataset.shape[0]
    result=np.zeros(data_size)
    for i in range(data_size):
        result_i=np.zeros(class_number)
        for j in range(class_number):
            x_mu=np.array(dataset[i]-mu[j]).reshape(1,-1)
            #print(sigma_1.shape)
            tmp=np.matmul(x_mu,sigma_1[j])
            result_i[j]=np.log(W[j])-0.5*np.matmul(tmp,x_mu.T)
        result[i]=np.argmax(result_i)
    return result


# In[5]:


train_images = decode_idx3_ubyte(train_images_dir)
train_labels = decode_idx1_ubyte(train_labels_dir)
print(train_images.shape)
print(train_labels.shape)
test_images = decode_idx3_ubyte(test_images_dir)
test_labels = decode_idx1_ubyte(test_labels_dir)
print(test_images.shape)
print(test_labels.shape)


# In[6]:


train_images=average_pooling(train_images,2,2)
test_images=average_pooling(test_images,2,2)
train_images=max_pooling(train_images,2,2)
test_images=max_pooling(test_images,2,2)
train_images=train_images.reshape(60000,-1)
test_images=test_images.reshape(10000,-1)
print(train_images.shape)
print(test_images.shape)


# In[7]:


#train_dataset=average_pooling(train_images,2,2)
train_dataset=Feature_Select(train_images)
#train_images=train_images.reshape(60000,-1)
print(train_dataset.shape)
#test_dataset=average_pooling(test_images,2,2)
test_dataset=Feature_Select(test_images)
#test_images=test_images.reshape(10000,-1)
print(test_dataset.shape)


# In[8]:


W,mu,sigma_1=train(train_dataset,train_labels,10)
size=test_dataset.shape[0]
predict_labels=classify(test_dataset,10,W,mu,sigma_1)
errorCount = 0.0
size=test_dataset.shape[0]
for i in range(size):
    if predict_labels[i]!=test_labels[i]:
        #print(predict_labels[i],test_labels[i],"\n")
        errorCount+=1
print("\n错误数： %d" % errorCount)


# In[9]:


TP=np.zeros(10);FN=np.zeros(10);FP=np.zeros(10)
for i in range(test_labels.shape[0]):
    if test_labels[i]==predict_labels[i]:
        TP[int(test_labels[i])]+=1;
    else :
        FN[int(test_labels[i])]+=1
        FP[int(predict_labels[i])]+=1


# In[10]:


ans=open("ans.txt", 'w')
for i in range(10):
    ans.write("Num is:"+str(i))
    ans.write(" Precision is "+str(TP[i]/(TP[i]+FP[i]))[0:5])
    ans.write(" Recall is "+str(TP[i]/(TP[i]+FN[i]))[0:5])
    ans.write("\n")


# In[ ]:




