#! python3
# -*- coding: utf-8 -*-
"""
本代码沿用knn_recog.py的方法，使用skleran的knn分类器，但是数据集改为UCI的DBRHD（Recognition
of Handwritten digits），导入数据集的的函数有变，处理用户输入需要调用的函数也完全不同。所以
在新的文件编写代码，能独立运行。

2019-2-14
"""

import numpy as np
from sklearn import neighbors
import os
import logging

logging.basicConfig(level=logging.DEBUG)


# file relative
PATH = os.path.join(r'dataSet', 'dbrhd')
F_NAME_TRAIN = r'pendigits.tra'
F_NAME_TEST = r'pendigits.tes'
NUM_ATTRIBUTE = 16  # the number of attributes of a vector in dataset
SEP = ','  # separator

# training relative
N_NEIGHBORS = 3
discard_digit = (1,)  # discard the data in the original data set with labels of these digits in this tuple

def load_data(f_name):
    """Load the training or testing data form file.
    Return -> (dataSet, label), <type ndarray>"""
    print('import dataset', f_name)
    num = NUM_ATTRIBUTE

    with open(f_name) as f:
        lines = f.readlines()
        f.close()

    logging.debug('len of lines: %s' % str(len(lines)))

    train_dataSet = np.zeros([len(lines), num], dtype=int)
    train_label = np.zeros([len(lines)], dtype=int)

    for i in range(len(lines)):
        for j in range(num):
            line = lines[i].split(SEP)
            train_dataSet[i][j] = line[j]

    # writing like this is for readability, at the meantime, for longer code
    for i in range(len(lines)):
        line = lines[i].split(SEP)
        train_label[i] = line[-1]

    return train_dataSet, train_label


def predict(vector):
    """To make convenience for the other module invokes."""
    return knn.predict(vector)


print('loading dataSet...')
train_dataSet, train_label = load_data(os.path.join(PATH, F_NAME_TRAIN))
extend_dataSet_0, extend_label_0 = load_data(os.path.join(PATH, 'extend_0.tra'))
extend_dataSet_1, extend_label_1 = load_data(os.path.join(PATH, 'extend_1.tra'))
extend_dataSet_3, extend_label_3 = load_data(os.path.join(PATH, 'extend_3.tra'))
extend_dataSet_7, extend_label_7 = load_data(os.path.join(PATH, 'extend_7.tra'))
input_dataSet_0, input_label_0 = load_data(os.path.join(PATH, r'input_0.tra'))

# ====== discard disturbing original data ===== ===== =====
print('deleting disturbing data...')
for digit in discard_digit:                                 #
    train_dataSet = train_dataSet[train_label != digit]     #
    train_label = train_label[train_label != digit]         #
# ===== ===== ===== ===== ===== ===== ===== ===== ==== ====

# concatenate extend data set
# extend 0
train_dataSet = np.concatenate([train_dataSet, extend_dataSet_0])
train_label = np.concatenate([train_label, extend_label_0])

# extend 1
train_dataSet = np.concatenate([train_dataSet, extend_dataSet_1])
train_label = np.concatenate([train_label, extend_label_1])

# input 1
train_dataSet = np.concatenate([train_dataSet, input_dataSet_0])
train_label = np.concatenate([train_label, input_label_0])

# extend 3
train_dataSet = np.concatenate([train_dataSet, extend_dataSet_3])
train_label = np.concatenate([train_label, extend_label_3])

# extend 7
train_dataSet = np.concatenate([train_dataSet, extend_dataSet_7])
train_label = np.concatenate([train_label, extend_label_7])



print('training...')
knn = neighbors.KNeighborsClassifier(n_neighbors=N_NEIGHBORS, algorithm='brute')
knn.fit(train_dataSet, train_label)
print('training finished.\n\n')

if __name__ == '__main__':
    print("begin testing...")
    print()

    if True:  # If you don't want to reload training data set, set the condition to False.
        print(u'该程序采用的是UCI的Pen-Based recognition of handwritten digits数据集。'
              u'该数据集采集了44位实验者的手写字迹，其中30位实验者的数据用于训练集，另外'
              u'14位实验者的数据用于测试集等。但是在实验中发现，美国人的书写习惯与'
              u'中国人不同，直接使用原训练集容易造成将中国人（至少是我）书写的7识别成1，'
              u'并且几乎识别不出来1和顺时针写的0。所以我对原训练集的数据做了进一步的处理，'
              u'扩展了原训练集的数据。'
              u'\n\n'
              u'另外在导入过程中删除了训练集中一部分会对识别造成干扰的数据，以提高对国人笔迹的'
              u'识别正确率。但是使用改进过的训练集测试原测试集却会导致正确率下降。所以这里重新'
              u'导入原训练集，并使用原测试集进行测试，以评价算法的效果。如果想要看改进过的训练集'
              u'的测试效果，将此处条件语句设为False即可。')
        print('reload training data...')
        train_dataSet, train_label = load_data(os.path.join(PATH, F_NAME_TRAIN))

        print('training...')
        knn = neighbors.KNeighborsClassifier(n_neighbors=N_NEIGHBORS, algorithm='brute')
        knn.fit(train_dataSet, train_label)
        print('training finished.')

    test_dataSet, test_labels = load_data(os.path.join(PATH, F_NAME_TEST))
    res = knn.predict(test_dataSet)
    error_num = np.sum(res != test_labels)
    num = len(res)

    error_rate = error_num / num
    accuracy = 1 - error_rate

    print("n of neighbors: %i" % N_NEIGHBORS)
    print("Test num: %i" % num)
    print("error num: %i" % error_num)
    print("accuracy: %.2f" % (accuracy*100))
    print('error rate: %.2f' % (error_rate*100))

    # dataSet, labels = load_data(os.path.join(PATH, F_NAME_TRAIN))




