# -*- coding: utf-8 -*-

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import KernelPCA, PCA
from sklearn import preprocessing
from scipy import stats
from sklearn.ensemble import IsolationForest
import vector

file = '20161017 (1).csv' # 可以通过循环读取文件达到批量处理噪声的目的

# 位移向量数据
dataset = pd.read_csv('C:/Users/Xiao/Documents/output/'+file)
dataset_array = dataset.values

# 经纬度轨迹数据
location_dataset = pd.read_csv('C:/Users/Xiao/Documents/output_data/'+file)
location_dataset_array = location_dataset.values

# 归一化
scalar = preprocessing.MinMaxScaler(feature_range=(0, 1), copy=True)
dataset_std = scalar.fit_transform(dataset_array)

# KPCA
kernel_pca = KernelPCA(n_components=5, kernel='rbf', fit_inverse_transform=True, gamma=10)
dataset_kernel_pca = kernel_pca.fit_transform(dataset_std)

# 协方差
cov_mat = np.cov(dataset_kernel_pca.T)

# 计算特征值和特征向量
eig_vals, eig_vecs = np.linalg.eig(cov_mat)

# 每个特征值对应一组特征向量
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:, i]) for i in range(len(eig_vals))]
eig_pairs.sort(key=lambda x: x[0], reverse=True)

# 特征值求和
tot = sum(eig_vals)

# 每个主要成分的解释方差
var_exp = [(i / tot) * 100 for i in sorted(eig_vals, reverse=True)]

# 累计的解释方差
cum_var_exp = np.cumsum(var_exp)
print(var_exp)
print(cum_var_exp)

# 找到80%的主成分个数
pca_cnt = 2
for index,value in enumerate(cum_var_exp):
    if value>=80:
        pca_cnt = index +1
        break
print(pca_cnt)

# 绘制主成分累积图
plt.figure(figsize=(30, 20))
plt.subplot(221)
plt.bar(range(len(var_exp)), var_exp, alpha=0.3, align='center', label='var_exp', color='g')
plt.step(range(len(cum_var_exp)), cum_var_exp, where='mid', label='cum_var_exp')
plt.ylabel('cum_var_exp_rate')
plt.xlabel('PCA')
plt.legend(loc='best')

# 将特征维度降到3
pca = PCA(n_components=pca_cnt)
dataset_pca = pca.fit_transform(dataset_kernel_pca)


# 归一化
dataset_pca_std = scalar.fit_transform(dataset_pca)

X_train = dataset_pca_std
rng = np.random.RandomState(42)

outliers_fraction = 0.03  #异常样本比例
clf = IsolationForest(max_samples=len(X_train), random_state=rng, contamination='auto') # 孤立森林
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
scores_pred = clf.decision_function(X_train)
threshold = stats.scoreatpercentile(scores_pred, 100 *outliers_fraction)
print(scores_pred)

# 绘制噪声消除前的轨迹位置图
plt.subplot(223)
plt.scatter(location_dataset_array[:, 0], location_dataset_array[:, 1], marker='o')

# 打分低于阈值的点的下标
delete_index = []
for index, value in enumerate(scores_pred):
    if value < threshold:
        delete_index.append(index)
'''
delete_index = []
for index, value in enumerate(y_pred_train):
    if value == -1:
        delete_index.append(index)
'''

# 将打分低于阈值的点从轨迹数据中抹去
new_location_dataset_array = location_dataset_array

new_location_dataset_array = np.delete(new_location_dataset_array, delete_index, axis=0)

# 绘制噪声消除后的轨迹位置图
plt.subplot(223)
plt.scatter(new_location_dataset_array[:, 0], new_location_dataset_array[:, 1], marker='o')

    
i = 0
with open("C:/Users/Xiao/Documents/Dguiji.txt","w",encoding='utf-8') as file:
        for line in new_location_dataset_array:
            i+=1
            file.write(str(i)+','+str(line[0])+','+str(line[1])+'\n')