# -*- coding: utf-8 -*-
"""
    @author:sirius
    @time:2017.10.14
"""
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler
from math import radians, sin, cos, sqrt, asin, log
from sklearn.externals import joblib
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
import handle_file as hf
import cos_dis as cd
import pandas as pd
import numpy as np
import datetime
import sklearn
import apriori_model
import scrapy
import json
import os

DATA_PATH = './data/'
P_MATRIX = [('workday', 'Morning'), ('workday', 'Noon'), ('workday', 'Afternoon'), ('workday', 'Evening'), ('workday', 'Night'), ('holiday', 'Morning'), ('holiday', 'Noon'), ('holiday', 'Afternoon'), ('holiday', 'Evening'), ('holiday', 'Night')]

class PeriodClassifier():

    """
        初始化模型
    """
    def __init__(self):
        # 无参数配置模型
        self.apriori = apriori_model.apriori([])

    """
        数据离散化
    """
    def data_dis(self):
        # 读取数据
        self.data = pd.read_csv('./md_cache/app_rec_gps.csv') # 结果数据集

        print('Input data:')
        print(self.data)

        # 使用DBSCAN聚类算法处理地点
        self.__cluster_location()

        # 处理时间数据
        self.__split_time()
        
        # 处理传感器数据
        self.__proc__speed()

        print('Result data:')
        print(self.data)
        # 经过预处理的结果数据，并写入csv文件中
        self.data.to_csv('./md_cache/precessed.csv', index=False)
        print 'Precessed-Finished...'

        # 使用加权周期律挖掘算法找出app应用的关联规则
        L, supData = self.__associate_app()

        print 'app开启事件次数：', self.click_dict

        # 计算权重
        self.__weight_cal()

        # 相关矩阵
        self.__corr_matrix()

        # 计算用户相似度
        self.__cal_similarity(6085.0, supData)

        print '用户相似度：', self.similarities

        # 获取用户的“邻居”
        self.__get_neighbors(5)

        print 'Neighbors：', self.neighbors

        # 将用户的“邻居”写入文件
        hf.write_file(str(self.neighbors), './md_cache/neighbors.txt')

        # 对数据特征进行筛选
        # train_features = ['date', 'month', 'week', 'period', 'location', 'sensor', 'associate_location', 'support']
        # target_features = ['name']
        # all_features = train_features + target_features

        # self.data = self.data.loc[:, all_features]

        # print('Clean data:')
        # print(self.data)

        # # 查看总共有多少个APP
        # # print('APP总数:', len(self.data.loc[:, 'name'].unique()))
        # # print('APP列表如下:', self.data.loc[:, 'name'].unique())

        # # FIXME 分类标签数据格式有问题，导致XGB无法训练模型，修改分类标签格式
        # app_list = []
        # for i in self.data['name']:
        #     app_list.append(i)

        # # 分割数据集
        # self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.data[train_features], app_list, test_size=0.25, random_state=33)

        # # 将数据作特征映射成数值数据
        # self.vec = DictVectorizer(sparse=False)
        # self.X_train = self.vec.fit_transform(self.X_train.to_dict(orient='record'))
        # self.X_test = self.vec.transform(self.X_test.to_dict(orient='record'))
        # del(self.vec)

    """
       训练模型 
    """
    def fit_model(self):
        self.xgbc.fit(self.X_train, self.y_train)

    """
       测试模型、根据情况调节模型 
    """
    def test_model(self):
        score = self.xgbc.score(self.X_test, self.y_test)
        print('模型的准确度:', score)
        print()

    def get_result(self):
        # 使用测试数据
        test_data = np.array([self.X_test[0]])

        # 获取分类标签
        app_list = self.xgbc.classes_
        # 预测输入数据的分类可能性
        pred_proba = self.xgbc.predict_proba(test_data)[0]

        # 获取结果
        result_dict = {}
        result_dict['app_name'] = app_list
        result_dict['proba'] = pred_proba
        result = pd.DataFrame(result_dict).sort_values('proba', ascending=False).iloc[:5, :]
        print(result)

        # 遍历结果，转换输出格式
        result_dict = {}
        for i, j in zip(result.iloc[:, 0], result.iloc[:, 1]):
            result_dict[i] = str(j)

    """
        使用DBSCAN算法对地点作聚类
    """
    def __cluster_location(self):
        # gps_location = pd.read_csv('./mdcdb_1104/gps.csv').loc[:40000, ['latitude', 'longitude']]
        # 判断DBSCAN模型是否已保存
        if os.path.exists('./cache/dbscan.pkl'):
            # 数据标准化
            # ss = StandardScaler()
            # ss.fit_transform(gps_location)
            # del(gps_location)
            app_location = self.data[['latitude', 'longitude']]
            # app_location = ss.transform(app_location)

            # 加载模型
            dbsc = joblib.load('./cache/dbscan.pkl')
            self.data['location'] = dbsc.fit_predict(app_location)
            # del(ss)
            del(app_location)
        else:
            gps_location = pd.read_csv('./data/gps.csv', sep='\t').loc[:40000, ['latitude', 'longitude']]

            # 将数据预处理
            ss = StandardScaler()
            gps_location = ss.fit_transform(gps_location)

            # 训练DBSCAN模型并保存
            dbscan = DBSCAN(eps=0.8, min_samples=10)
            dbscan.fit(gps_location)
            joblib.dump(dbscan, './cache/dbscan.pkl')

    """
        时间分割
    """
    def __split_time(self):
        date_list = []

        # print self.data['time']
        for i in self.data.values:
            date_dict = {}

            date_dict['db_key'] = i[0]
            date_dict['time'] = i[3]

            # 取日期
            dt = datetime.datetime.utcfromtimestamp(i[3])
            date = dt.strftime('%Y-%m-%d')
            date_dict['date'] = date

            # 取星期
            date_dict['week'] = dt.strftime('%a')

            # 取月
            date_dict['month'] = dt.strftime('%b')

            # 取时间点
            # d = datetime.datetime(dt.year, dt.month, dt.day, 2)
            if datetime.datetime(dt.year, dt.month, dt.day, 0) < dt < datetime.datetime(dt.year, dt.month, dt.day, 7):
                date_dict['period'] = 'Night'
            elif dt < datetime.datetime(dt.year, dt.month, dt.day, 11):
                date_dict['period'] = 'Morning'
            elif dt < datetime.datetime(dt.year, dt.month, dt.day, 14):
                date_dict['period'] = 'Noon'
            elif dt < datetime.datetime(dt.year, dt.month, dt.day, 18):
                date_dict['period'] = 'Afternoon'
            elif dt < datetime.datetime(dt.year, dt.month, dt.day, 23, 59, 59):
                date_dict['period'] = 'Evening'

            date_list.append(date_dict)

        date_dt = pd.DataFrame(date_list)

        # 工作日和假期的区分
        date_dt.loc[date_dt['week'] == 'Mon', 'week'] = 'workday'
        date_dt.loc[date_dt['week'] == 'Tue', 'week'] = 'workday'
        date_dt.loc[date_dt['week'] == 'Wed', 'week'] = 'workday'
        date_dt.loc[date_dt['week'] == 'Thu', 'week'] = 'workday'
        date_dt.loc[date_dt['week'] == 'Fri', 'week'] = 'workday'
        date_dt.loc[date_dt['week'] == 'Sat', 'week'] = 'holiday'
        date_dt.loc[date_dt['week'] == 'Sun', 'week'] = 'holiday'
        
        self.data = pd.merge(self.data, date_dt, on='db_key').sort_values('time_x')
        self.data.reset_index(inplace=True)

        del(self.data['index'])
        del(date_dict)
        del(date_list)

    """
        对app应用进行周期律的关联规则
    """
    def __associate_app(self):
        # 找出一定时间段内地点变化的关联
        columns = list(self.data.columns)
        # 特征状态列表
        self.feature = []
        # app应用关联规则
        associate_app = ''
        # app应用关联规则列表
        self.associate_list = []
        # app应用关联规则
        self.associate_dict = {}
        # 用户事件特征值
        self.listUser2Period = {}
        # 特征值中间变量
        self.features_tmp = {}
        # 频繁项集
        L_dict = {}
        # 支持度
        supData_dict = {}
        # app应用点击次数
        self.click_dict = {}
        # 豌豆荚应用市场下载量
        self.down_app = {}

        # 枚举类型遍历，将各个用户数据分离
        for index, value in enumerate(self.data['date']):
            # 用户ID
            userid = self.data.iloc[index, columns.index('userid')]
            # 年份
            date = self.data.iloc[index, columns.index('date')]
            # 月份
            month = self.data.iloc[index, columns.index('month')]
            # 周
            week = self.data.iloc[index, columns.index('week')]
            # 周期
            period = self.data.iloc[index, columns.index('period')]
            # 速度传感器
            speed_split = self.data.iloc[index, columns.index('speed_split')]
            # app应用名称
            name = self.data.iloc[index, columns.index('name')]
            # 地点
            location = self.data.iloc[index, columns.index('location')]

            # 用户特征值元组
            user_rank = (week, period, month, date, speed_split, location, name)
            # 根据不同用户，构建用户事件特征值字典
            if userid in self.listUser2Period:
                self.listUser2Period[userid].append(user_rank)
            else:
                self.listUser2Period[userid] = [user_rank]
            # 根据不同用户的不同应用，构建app应用点击次数字典
            if userid in self.click_dict:
                if name in self.click_dict[userid]:
                    self.click_dict[userid][name] += 1
                else :
                    self.click_dict[userid][name] = 1
                    self.down_app[userid][name] = scrapy.spider(name)
                self.click_dict[userid]
            else :
                self.click_dict[userid] = {name:1}
                self.down_app[userid] = {name:scrapy.spider(name)}

        # print 'test', self.listUser2Period

        for i in self.listUser2Period:
            self.associate_list = []
            for p in self.listUser2Period[i]:
                if p[:-1] not in self.feature:
                    self.feature.append(p[:-1])
                    associate_app = [p[-1]]
                    self.associate_list.append(associate_app)
                else:
                    index = self.associate_list.index(p[:-1])
                    self.associate_list[index].append(p[-1])

                try:
                    self.features_tmp[i].append(p)
                except Exception, e:
                    self.features_tmp[i] = [p]

            self.associate_dict[i] = self.associate_list

        # print 'features_tmp:', self.features_tmp
        # print self.associate_dict
        # print self.feature

        # 将app应用的关联规则作为新特征
        # self.data['associate_app'] = associate_list


        # 将app应用关联使用Apriori算法计算关系之间的支持度
        # item_list = []
        # for i in associate_list:
            # item_list.append(i.split('=>'))

        # 使用Apriori算法计算出item的支持度和关系可信度
        for user in self.associate_dict:
            L, supData = apriori_model.apriori(self.associate_dict[user], 0.5)
            L_dict[user] = L
            supData_dict[user] = supData

        print L_dict, supData_dict
        return L_dict, supData_dict

        # 去除重复时间段的数据, 不能在时间处理上去除
        # self.data.drop_duplicates(['date', 'period'], inplace=True)
        # self.data.reset_index(inplace=True)
        # del(self.data['index'])

    """
        根据Haversine formula算法计算两个地点之间的距离
    """
    def __haversine(self, lat1, lon1, lat2, lon2):
        R = 6372.8 # 地球半径(单位公里)

        dLat = radians(lat2 - lat1)
        dLon = radians(lon2 - lon1)
        lat1 = radians(lat1)
        lat2 = radians(lat2)

        a = sin(dLat/2)**2 + cos(lat1)*cos(lat2)*sin(dLon/2)**2
        c = 2*asin(sqrt(a))

        return R * c

    """
        处理传感器数据
    """
    def __proc__speed(self):
        # 传感器重置列表
        speed_list = []

        # 遍历传感器数据集
        for i in self.data['speed']:
            if i == '\N' or i <= 0.5:
                speed_list.append(0) # Static ： 0
            elif i <= 1.0:
                speed_list.append(1) # Walking ： 1
            elif i <= 2:
                speed_list.append(2) # Running ： 2
            else :
                speed_list.append(3) # Use Transaction ： 3

        self.data['speed_split'] = speed_list

    """
        计算权重
    """
    def __weight_cal(self):
        # print 'down_app:', self.down_app
        # 各用户app使用权重字典
        self.u_weight = {}
        # print 'click_dict',self.click_dict
        for user in self.click_dict:
            # app使用流行度字典
            up = {}
            # Android市场流行度字典
            iw = {}
            # 流行度最大值索引
            index_use = max(self.click_dict[user], key=self.click_dict[user].get)
            index_market = max(self.down_app[user], key=self.down_app[user].get)
            for i in self.click_dict[user]:
                up[i] = self.click_dict[user][i]/self.click_dict[user][index_use]
                iw[i] = log(self.down_app[user][index_market]/scrapy.spider(i))
                # print self.down_app[user][index_market], scrapy.spider(i)
                try:
                    self.u_weight[user][i] = up[i]*iw[i]
                except Exception, e:
                    self.u_weight[user] = {i:up[i]*iw[i]}
            # print 'up', up
            # print 'iw', iw
        print '周期律权重：', self.u_weight

    """
        构建相关矩阵
    """
    def __corr_matrix(self):
        # 相关矩阵
        self.corr_matrix = {}

        for u in self.listUser2Period:
            self.corr_matrix[u] = {}
            for i in P_MATRIX:
                n = 0
                self.corr_matrix[u][i] = {(0, '', '')}
                for j in self.listUser2Period[u]:
                    if cmp((j[0],j[1]), i) == 0:
                        n += 1
                        if n > 1:
                            # 相同状态元素添加
                            self.corr_matrix[u][i].add((j[4], j[5], j[6]))
                        else:
                            self.corr_matrix[u][i] = {(j[4], j[5], j[6])}
                        # print self.corr_matrix[u][i]

        print '相关矩阵：', self.corr_matrix
        hf.write_file(str(self.corr_matrix), './md_cache/matrix.txt')

    """
        相似度用户计算
    """
    def __cal_similarity(self, userid, supData):
        # Vectorize features
        vec = DictVectorizer()
        # features dictionary
        features = {} # 目标用户特征字典
        c_features = {} # 云端用户特征字典
        # features array
        arrays = [] # 目标用户特征向量
        c_arrays = [] # 云端用户特征向量
        # 用户相似度
        self.similarities = {}

        # print 'similarities-supData:', supData

        for f in P_MATRIX:
            for item in self.corr_matrix[userid][f]:
                # print item
                features[f[0]+','+f[1]+','+'speed'] = item[0]
                features[f[0]+','+f[1]+','+'location'] = item[1]
                try:
                    features[f[0]+','+f[1]+','+item[2]] = supData[userid][item[2]]*u_weight[userid][item[2]]
                except Exception, e:
                    features[f[0]+','+f[1]+','+item[2]] = 0

        # 将目标用户特征值转化为特征向量
        arrays = vec.fit_transform(features).toarray()

        # 遍历相关矩阵
        for c_user in self.corr_matrix:
            if userid != c_user:
                # print self.corr_matrix[userid]
                # print self.corr_matrix[c_user]
                for f in P_MATRIX:
                    for item in self.corr_matrix[c_user][f]:
                        # print item
                        c_features[f[0]+','+f[1]+','+'speed'] = item[0]
                        c_features[f[0]+','+f[1]+','+'location'] = item[1]
                        
                        try:
                            # print '-'*20
                            # print supData[c_user][frozenset([item[2]])]
                            # print frozenset([item[2]])
                            # print item[2]    
                            # print self.u_weight[c_user][item[2]]
                            c_features[f[0]+','+f[1]+','+item[2]] = float(supData[c_user][frozenset([item[2]])])*float(u_weight[c_user][item[2]])
                        except Exception, e:
                            c_features[f[0]+','+f[1]+','+item[2]] = 0

                # 将云端用户特征值转化为特征向量
                c_arrays = vec.fit_transform(c_features).toarray()

                # 用户余弦相似度计算
                self.similarities[c_user] = cd.cos(arrays[0], c_arrays[0])
                
        print 'features:',features
        print 'c_features:',c_features
        print 'arrays:',arrays
        print 'c_arrays:',c_arrays

    """
        获取用户的“邻居”
    """
    def __get_neighbors(self, number):
        self.neighbors = []
        self.similarities = sorted(self.similarities.iteritems(), key=lambda asd:asd[1], reverse = True)
        self.neighbors = self.similarities[:number]

if __name__ == '__main__':
    p = PeriodClassifier()

    # p.fit_dbscan_model()
    
    p.data_dis()
    # p.fit_model()
    # p.test_model()
    # p.get_result()
