#!/usr/bin/python
#  coding=UTF-8
import csv
from numpy import *


def normalizing(array):
    m, n = shape(array)
    for i in xrange(m):
        for j in xrange(n):
            if array[i, j] != 0:
                array[i, j] = 1
    return array


def to_int(array):
    array = mat(array)
    m, n = shape(array)
    new_array = zeros((m, n))
    for i in xrange(m):
        for j in xrange(n):
            new_array[i, j] = int(array[i, j])
    return new_array


def load_train_data():
    l = []
    with open('train.csv') as file:
        lines = csv.reader(file)
        for line in lines:
            l.append(line)  # 42001*785
    l.remove(l[0])
    l = array(l)
    lable = l[:, 0]
    data = l[:, 1:]
    # label 1*42000  data 42000*784
    # return trainData,trainLabel
    return normalizing(to_int(data)), to_int(lable)


def loadTestData():
    l = []
    with open('test.csv') as file:
        lines = csv.reader(file)
        for line in lines:
            l.append(line)  # 28001*784
    l.remove(l[0])
    data = array(l)
    return normalizing(to_int(data))  # data 28000*784
    # return testData


def load_test_result():
    l = []
    with open('knn_benchmark.csv') as file:
        lines = csv.reader(file)
        for line in lines:
            l.append(line)  # 28001*2
    l.remove(l[0])
    label = array(l)
    return to_int(label[:, 1])  # label 28000*1


# result是结果列表
# csvName是存放结果的csv文件名

def save_result(result, csv_name):
    with open(csv_name, 'wb') as new_file:
        my_writer = csv.writer(new_file)
        for i in result:
            tmp = []
            tmp.append(i)
            my_writer.writerow(tmp)


# 调用scikit的knn算法包
from sklearn.neighbors import KNeighborsClassifier


def knn_classify(train_data, train_lable, test_data):
    knn_clf = KNeighborsClassifier()  # default:k = 5,defined by yourself:KNeighborsClassifier(n_neighbors=10)
    knn_clf.fit(train_data, ravel(train_lable))
    test_lable = knn_clf.predict(test_data)
    save_result(test_lable, 'sklearn_knn_Result.csv')
    return test_lable


# 调用scikit的SVM算法包
from sklearn import svm


def svm_classify(train_data, train_lable, test_data):
    svc_clf = svm.SVC(
        C=5.0)  # default:C=1.0,kernel = 'rbf'. you can try kernel:‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’
    svc_clf.fit(train_data, ravel(train_lable))
    test_lable = svc_clf.predict(test_data)
    save_result(test_lable, 'sklearn_SVC_C=5.0_Result.csv')
    return test_lable


# 调用scikit的朴素贝叶斯算法包,GaussianNB和MultinomialNB
from sklearn.naive_bayes import GaussianNB


def gaussian_nb_classify(train_data, train_label, test_data):
    nb_clf = GaussianNB()
    nb_clf.fit(train_data, ravel(train_label))
    test_label = nb_clf.predict(test_data)
    save_result(test_label, 'sklearn_GaussianNB_Result.csv')
    return test_label


from sklearn.naive_bayes import MultinomialNB  # nb for 多项式分布的数据


def multi_nomial_nb_classify(train_data, train_label, test_data):
    nbClf = MultinomialNB(
        alpha=0.1)  # default alpha=1.0,Setting alpha = 1 is called Laplace smoothing,
    # while alpha < 1 is called Lidstone smoothing.
    nbClf.fit(train_data, ravel(train_label))
    test_label = nbClf.predict(test_data)
    save_result(test_label, 'sklearn_MultinomialNB_alpha=0.1_Result.csv')
    return test_label


def digit_recognition():
    train_data, train_label = load_train_data()
    test_data = loadTestData()
    # 使用不同算法
    result1 = knn_classify(train_data, train_label, test_data)
    result2 = svm_classify(train_data, train_label, test_data)
    result3 = gaussian_nb_classify(train_data, train_label, test_data)
    result4 = multi_nomial_nb_classify(train_data, train_label, test_data)

    # 将结果与跟给定的knn_benchmark对比,以result1为例
    result_given = load_test_result()
    m, n = shape(test_data)
    different = 0  # result1中与benchmark不同的label个数，初始化为0
    for i in xrange(m):
        if result1[i] != result_given[0, i]:
            different += 1
    print different


digit_recognition()
