# -*- coding: utf-8 -*-

"""
前期的实际实验中，数字2、3、4、5、6均有极高的正确率，但是7、9的正确率只有60%至80%， 0、1就根本没有正确率。
后来花了点时间写个脚本把dbrhd训练集可视化，发现0都是逆时针写的，而我习惯顺时针写。当我把方向改过来后，0的
识别率也变成了百发百中。而1的轨迹就耐人寻味了，所有的1都写得像7，上面都有一横，而且很长，只是上面的一横是
柺向下的。老外写字这么奔放的吗？不过，不管怎么说，还是要感谢UCI提供了个这么好的数据集，正是有了里面几十位
不同实验者的手写字迹数据，才使我能这么容易地实现手写数字识别。

知道了原因，其实改起来也不难。数据集中数字0的轨迹全是逆时针的，那就写个脚本翻转一下，另外保存起来做个拓展的
数据集。数字1可以把形成上面一横的前一两个点去掉后，对剩下的竖线重新采样生成拓展数据集。并且原数据集中的数字
1的轨迹全部在水平方向压缩一倍后再用，以减少将7和2误判成1的情况.具体操作如下:

数据集中字迹以左下角为坐标原点，坐标范围100x100.
    1)对于数字0，以y=50为轴，对所有坐标做轴对称操作。
    2)对于数字1，丢弃前两个点，然后对剩余点缩放(scale)，重采样(resample)，居中(center)。重采样放在后面是为了
减小数值取整带来的误差。
    3)对于数字3，水平方向压缩1/2。
    4)对于数字7， 丢弃后两个点，然后对剩余点缩放，重采样，居中。

以上，看一下\dbrhd_images就会明白。

2019-2-15
"""
import numpy as np
import trajectory_preprocess as tra_pre
NORMALIZED_SIZE = (100, 100)  # 100x100 value range
NUM = 8  # NUM of trajectory points
SEP = ','

PREFIX = 'extend'  # file name prefix


def extend_0(dataSet):
    """Flip the points upside down. Expecting ndarray."""
    print("Extending digit 0")
    size = NORMALIZED_SIZE
    for i in range(NUM):
        dataSet[:, 2*i + 1] = -dataSet[:, 2*i + 1]  # flip
        dataSet[:, 2*i + 1] += size[1]  # parallel move

    return dataSet


def extend_1(dataSet):
    print("extending digit 1")
    p = tra_pre.PreProcessor()

    dataSet = dataSet.reshape([len(dataSet), NUM, 2])
    dataSet = dataSet[:, 2:]  # discard the first tow points
    out = []
    for tra in dataSet:
        tra = tra
        tra = p.scale(tra)
        tra = p.resample(tra)
        tra = p.center(tra)
        out.append(tra.flatten())

    return out


def extend_3(dataSet):
    print("Extending digit 3")
    scale_ratio = 0.3
    trajectories = dataSet.reshape([len(dataSet), NUM, 2]).copy()

    p = tra_pre.PreProcessor()
    for i in range(len(trajectories)):
        for j in range(NUM):
            # scale the size of horizontal direction
            x = trajectories[i][j][0]
            trajectories[i][j][0] = int(x * scale_ratio)
        trajectories[i] = p.center(trajectories[i])

    trajectories = trajectories.reshape([len(trajectories), 2*NUM])

    return trajectories


def extend_7(dataSet):
    dataSet = dataSet.reshape([len(dataSet), NUM, 2])
    dataSet = dataSet[:, :-2].copy()  # discard the last two points

    p = tra_pre.PreProcessor()
    out = []
    for i in range(len(dataSet)):
        tra = dataSet[i]
        tra = p.scale(tra)
        tra = p.resample(tra)
        tra = p.center(tra)
        out.append(tra.flatten())

    return out


def load_file(f_name):
    """Return flatten vectors."""
    with open(f_name) as f:
        lines = f.readlines()
        f.close()

    dataSet = np.zeros([len(lines), 2*NUM], dtype=int)
    labels = np.zeros([len(lines)], dtype=int)

    for i in range(len(lines)):
        line = lines[i].split(SEP)
        data = line[: -1]
        labels[i] = int(line[-1])
        for j in range(2*NUM):
            dataSet[i][j] = data[j]

    return dataSet, labels


def save_file(f_name, dataSet, labels):
    """Expecting flattened vectors."""
    if len(dataSet) != len(labels):
        raise IndexError('dataSet and labels should have the same length.')

    print('writing %s' % f_name)
    with open(f_name, 'a') as f:
        for i in range(len(labels)):
            for j in range(2*NUM):
                f.write('{:>3}'.format(str(dataSet[i][j])))
                f.write(SEP)
            f.write('{:>2}'.format(str(labels[i])))
            f.write('\n')
        f.close()
        print('save %s' % f_name)


def filter(dataSet, labels, digit):
    """Filter the data of desired digit from given dataSet."""
    out = dataSet[labels == digit]

    print('Find %i of digit %i' % (len(out), digit))
    return out


if __name__ == '__main__':
    dataSet, labels = load_file(r'pendigits.tra')

    data_0 = filter(dataSet, labels, 0)
    data_0 = extend_0(data_0)
    save_file('%s_0.tra' % PREFIX, data_0, np.full([len(data_0)], 0))

    data_1 = filter(dataSet, labels, 1)
    data_1 = extend_1(data_1)
    save_file('%s_1.tra' % PREFIX, data_1, np.full([len(data_1)], 1))
    
    data_7 = filter(dataSet, labels, 7)
    data_7 = extend_7(data_7)
    save_file('%s_7.tra' % PREFIX, data_7, np.full([len(data_7)], 7))

    data_3 = filter(dataSet, labels, 3)
    data_3 = extend_3(data_3)
    save_file('%s_3.tra' % PREFIX, data_3, np.full([len(data_3)], 3))










