import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import math




def read_file(filename):
    info=[]
    f = open(filename)
    line=f.readline()
    while line:
        res=line.split(':')
        res[4]=res[4].replace('\n', '')
        info.append(res)
        line=f.readline()
    f.close()
    return info

def get_data(info):
    result=[]
    for index in info:
        i=int(index[2])
        value=float(index[4])
        if(len(result)<=i):
            p_result=[]
            p_result.append(value)
            result.append(p_result)
        else:
            result[i].append(value)
    return result

def txt_to_csv(filename):
    # filename= "data.txt"
    info = read_file(filename)
    # 转换成100000*44维的数据
    result = get_data(info)
    f = open('data.csv', 'w', encoding='utf-8',newline='')
    csv_writer=csv.writer(f)
    for res in result:
        csv_writer.writerow(res)
    f.close()


# 低方差滤波器 数据降维
def data_reduction(path_csv):

    # read the data
    train = pd.read_csv(path_csv,header=None)
    pca = PCA(n_components=8)
    # train_data=train.iloc[:,0:44].values
    pca_result = pca.fit_transform(train.values)
    no=[]
    for i in range(100000):
        no.append(i)
    pca_result=np.insert(pca_result, 0, values=no, axis=1)
    pca_result = np.concatenate((pca_result,train),axis=1)
    return pca_result

# 数据划分
def data_devide(pca_result):
    pca_result_0 = [pca_result]
    for i in range(8):
        locals()['pca_result_'+str(i+1)] = []
        for pca in locals()['pca_result_'+str(i)]:
            pca_sorted=sorted(pca, key=lambda x: x[i + 1], reverse=True)
            pca_result_xy=devide(pca_sorted)
            for pca_xy in pca_result_xy:
                locals()['pca_result_'+str(i+1)].append(pca_xy)
    return locals()['pca_result_'+str(i+1)]

def devide(pca_result):
    length=len(pca_result)
    pca_result_x=pca_result[:(length//2)]
    pca_result_y=pca_result[(length//2):]
    pca_result_xy=[]
    pca_result_xy.append(pca_result_x)
    pca_result_xy.append(pca_result_y)
    return  pca_result_xy

def data_aggregation(pca_result_devided):
    keypoints=[]
    for pca in pca_result_devided:
        no=len(pca)
        keypoint=[0 for i in range(53)]
        for pp in pca:
            for i in range(53):
                keypoint[i]+=pp[i]
        keypoint= [i/no for i in keypoint]
        keypoints.append(keypoint)
    return keypoints

def keypoint_value(keypoints):
    length=len(keypoints)
    target = [0 for i in range(8)]
    for keypoint in keypoints:
        for i in range(1,9):
            target[i-1]+=keypoint[i]
    target=[i/length for i in target]
    value=0
    for tar in target:
        value+=tar**2
    value=value**0.5

    values=[]
    for i in range(length):
        target = [0 for i in range(8)]
        for j in range(length):
            if i==j:
                continue
            else:
                keypoint=keypoints[j]
                for k in range(1,9):
                    target[k-1]+=keypoint[k]
        target=[i/(length-1) for i in target]
        value_one = 0
        for tar in target:
            value_one += tar ** 2
        value_one = value_one ** 0.5
        values.append(value_one)

    affects=[]
    for value_p in values:
        affect= math.fabs(value_p-value)
        affects.append(affect)

    return affects.index(min(affects))

def redundancy_data(useless_keypoint_no,pca_result_devided):
    length=len(pca_result_devided)
    final_data=[]
    for  i in range(length):
        if i==useless_keypoint_no:
            continue
        else:
            for x in pca_result_devided[i]:
                data1=[]
                data1.append(x[0])
                data2=x[9:53].tolist()
                data=data1+data2
                final_data.append(data)
    return  sorted(final_data, key=lambda x: x[0], reverse=False)

def list_to_csv(data):
    # filename= "data.txt"
    f = open('data_processed.csv', 'w', encoding='utf-8', newline='')
    csv_writer = csv.writer(f)
    for res in data:
        csv_writer.writerow(res)
    f.close()

if __name__ == '__main__':
    # txt_to_csv("data.txt")
    # 数据降维
    path_csv="data.csv"
    pca_result=data_reduction(path_csv)
    # 数据划分
    pca_result_devided=data_devide(pca_result)
    # 数据聚合
    keypoints=data_aggregation(pca_result_devided)
    # 压缩点影响值计算并排序
    useless_keypoint_no=keypoint_value(keypoints)
    # 冗余数据发现与移除
    final_data=redundancy_data(useless_keypoint_no,pca_result_devided)
    # 将处理后的数据写入csv中
    list_to_csv(final_data)


