# -*- coding:utf-8 -*-
"""
    @author:sirius
    @time:2017.10.14
"""
from common.util import format_time, get_event_timestamp_list,\
    weight_cal, dicts_equal, format_frozenset, lol_contain, lol_contain_app
from period.apriori_model import apriori, generateRules
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import StandardScaler
from math import radians, sin, cos, sqrt, asin, log
from common.recommend_result import show_result_by_apps
from sklearn.externals import joblib
from sklearn.cluster import DBSCAN
import common.handle_file as hf
import period.cos_dis as cd
from random import choices
from random import random
import pandas as pd
import common.scrapy
import os

DATA_PATH = './data/'
GPS_PATH = '../datasets/md_cache/app_rec_gps_tmp.csv'
# GPS_PATH = './md_cache/app_rec_gps.csv'
# 应用列表常量
APPS_LIST = ['akncapserver', 'Messaging', 'Mail for Exchange', 'Standby mode', 'Telephone', 'screensaver',
             'MMFControllerProxyServer-1c52', 'Maps', 'Local-c837d0a8', 'File manager']
# 周期条件列表
P_MATRIX = [('workday', 'Morning'), ('workday', 'Noon'), ('workday', 'Afternoon'), ('workday', 'Evening'),
            ('workday', 'Night'), ('holiday', 'Morning'), ('holiday', 'Noon'), ('holiday', 'Afternoon'),
            ('holiday', 'Evening'), ('holiday', 'Night')]

class PeriodClassifier:

    """
        初始化模型
    """
    def __init__(self):
        # 无参数配置模型
        self.apriori = apriori

    """
        获取应用-用户倒排表
    """
    def __get_items_app(self, DICT_PATH):
        file_obj = open(DICT_PATH, 'r', encoding='iso-8859-1')
        try:
            items_app = file_obj.read()
        finally:
            file_obj.close()

        return items_app

    """
        数据离散化
    """
    def data_disc(self):
        self.rules = {}
        # 读取数据
        self.data = pd.read_csv(GPS_PATH)
        print('Input data:')
        print(self.data)

        # 使用DBSCAN聚类算法处理地点
        self.__cluster_location()
        # 处理时间数据
        self.__split_time()
        # 处理传感器数据
        self.__proc__speed()

        print('Result data:')
        print(self.data)
        # 经过预处理的结果数据，并写入csv文件中
        self.data.to_csv('./cache/ResultData.csv', index=False)
        print('ResultData-Finished...')


    """
       训练模型 
    """
    def fit_model(self):
        self.data_disc()

        # 使用加权周期律挖掘算法找出app应用的关联规则
        L, supData = self.__associate_app()
        # 根据频繁项集和最小可信度生成规则。
        for l, s in zip(L, supData):
            rule = generateRules(L[l], supData[s])
            self.rules[l] = rule
        # 频繁项集L
        hf.write_file(str(L), './cache/L.txt', 'L')
        # 所有候选项集的支持度信息
        hf.write_file(str(supData), './cache/supData.txt', 'supData')
        # 规则
        hf.write_file(str(self.rules), './cache/Rules.txt', 'Rules')
        print('app开启事件次数：', self.click_dict)

        # 计算权重
        weight_cal(self.click_dict, self.down_app, './cache/weight.txt')

        # 相关矩阵
        self.__corr_matrix()

        # 对数据特征进行筛选
        # train_features = ['date', 'month', 'week', 'period', 'location', 'sensor', 'associate_location', 'support']
        # target_features = ['name']
        # all_features = train_features + target_features

        # self.data = self.data.loc[:, all_features]

        # print('Clean data:')
        # print(self.data)

        # # 查看总共有多少个APP
        # # print('APP总数:', len(self.data.loc[:, 'name'].unique()))
        # # print('APP列表如下:', self.data.loc[:, 'name'].unique())

        # # FIXME 分类标签数据格式有问题，导致apriori无法训练模型，修改分类标签格式
        # app_list = []
        # for i in self.data['name']:
        #     app_list.append(i)

        # # 分割数据集
        # self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.data[train_features], app_list, test_size=0.25, random_state=33)

        # # 将数据作特征映射成数值数据
        # self.vec = DictVectorizer(sparse=False)
        # self.X_train = self.vec.fit_transform(self.X_train.to_dict(orient='record'))
        # self.X_test = self.vec.transform(self.X_test.to_dict(orient='record'))
        # del(self.vec)

    """
       测试模型、根据情况调节模型 
    """
    def test_model(self):
        # 事件序列
        event_num = 0
        # 目标用户
        target_user = 5449.0
        # 命中次数
        hitNum = 0
        # 周期律分类器
        p = PeriodClassifier()
        # 应用记录列表文件路径
        APP_SYS_PATH = './cache/ResultData.csv'
        # 学习模型结果保存路径
        MODEL_PATH = './cache/Rules.txt'
        # 获取事件列表
        EventList = list(get_event_timestamp_list(APP_SYS_PATH, target_user))
        # 获取应用-用户倒排表
        items_app = self.__get_items_app('../user/cache/DictItem2Users.txt')
        # 上一个周期条件特征
        feature_last = {}

        index = 0
        apps_tmp = []
        date_dict_tmp = {}
        for event in EventList:
            global date_dict
            date_dict = {'location': event[2], 'speed': event[3]}
            date_dict = format_time(date_dict, event[1])

            print("-----------------------App周期律-----------------------")
            print("日期：" + date_dict['date'])
            print("星期：" + date_dict['week'])
            print("月份：" + date_dict['month'])
            print("时间：" + date_dict['period'])

            if index == 0 or not dicts_equal(date_dict, feature_last):
                # 特定周期条件下，下一启动App
                last_app = ''
            else:
                last_app = EventList[index - 1][0]

            apps = p.get_result(date_dict, MODEL_PATH, last_app)
            while len(apps) < 5:
                if date_dict_tmp == date_dict:
                    apps = apps_tmp
                else:
                    app_random = [choices(APPS_LIST)[0], random()]
                    isNotContain = lol_contain_app(apps, app_random)
                    if isNotContain[0]:
                        continue
                    else:
                        apps.append(app_random)

            apps_tmp = apps
            date_dict_tmp = date_dict
            hf.write_file(str(apps), './cache/AppsResult.txt', 'AppsResult')
            hitNum, event_num = show_result_by_apps(APP_SYS_PATH, apps, date_dict)
            print("用户%s：%s" % (target_user, event[0]))
            if event[0] in [k[0] for k in apps]:
                hitNum += 1.0
            event_num += 1.0
            index += 1
            feature_last = date_dict
            print("--------------------------------------------------------")
        print("预测准确度：", float(hitNum) / event_num)


    def get_result(self, PERIOD_FEATURE, MODEL_PATH, last_app):
        # 加载模型
        model_file = open(MODEL_PATH, 'r')
        try:
            model_result = model_file.read()
        finally:
            model_file.close()
        model_result_dict = eval(model_result)

        isMatched = False
        apps_association = []
        for rules_key in model_result_dict:
            if rules_key[1] == PERIOD_FEATURE['period'] and PERIOD_FEATURE['date'][5:] in rules_key[3] and rules_key[4] == PERIOD_FEATURE['location'] and rules_key[5] == PERIOD_FEATURE['speed']:
                apps_association = model_result_dict[rules_key]
                isMatched = True
                break
        if not isMatched:
            for rules_key in model_result_dict:
                if rules_key[1] == PERIOD_FEATURE['period'] and PERIOD_FEATURE['date'][5:] in rules_key[3]:
                    apps_association.extend(model_result_dict[rules_key])

        apps = []
        apps_sorted = sorted(apps_association, key=lambda x: x[2], reverse=True)
        for apps_tuple in apps_sorted:
            if last_app != '':
                if len(last_app) != 0 and len(apps_tuple) > 2:
                    index_of_tuple = 0
                    for apps_relation in apps_tuple:
                        is_contained = False
                        if index_of_tuple + 1 < len(apps_tuple):
                            is_contained, index_of_list = lol_contain(apps, format_frozenset(apps_tuple[index_of_tuple + 1]))
                        if format_frozenset(apps_relation) == last_app and not is_contained and index_of_tuple < len(apps_tuple) - 2:
                            apps.append([format_frozenset(apps_tuple[index_of_tuple + 1]), apps_tuple[-1]])
                        elif format_frozenset(apps_relation) == last_app and is_contained:
                            apps[index_of_list] = [apps[index_of_list][0], apps[index_of_list][1] + apps_tuple[-1]]
                        index_of_tuple += 1
            else:
                is_contained, index_of_list = lol_contain(apps, apps_tuple[0])
                if len(apps_tuple) == 2 and not is_contained:
                    apps.append([format_frozenset(apps_tuple[0]), apps_tuple[-1]])
                elif len(apps_tuple) == 2 and is_contained:
                    apps[apps_tuple[0]] += apps_tuple[-1]

            if len(apps) == 5:
                break

        return apps


    """
        使用DBSCAN算法对地点作聚类
    """
    def __cluster_location(self):
        # 判断DBSCAN模型是否已保存
        if os.path.exists('./cache/dbscan.pkl'):
            # 数据标准化
            # ss = StandardScaler()
            # ss.fit_transform(gps_location)
            # del(gps_location)
            app_location = self.data[['latitude', 'longitude']]
            # app_location = ss.transform(app_location)

            # 加载模型
            dbsc = joblib.load('./cache/dbscan.pkl')
            self.data['location'] = dbsc.fit_predict(app_location)
            # del(ss)
            del app_location
        else:
            gps_location = pd.read_csv('./data/gps.csv', sep='\t').loc[:40000, ['latitude', 'longitude']]

            # 将数据预处理
            ss = StandardScaler()
            gps_location = ss.fit_transform(gps_location)

            # 训练DBSCAN模型并保存
            dbscan = DBSCAN(eps=0.8, min_samples=10)
            dbscan.fit(gps_location)
            joblib.dump(dbscan, './cache/dbscan.pkl')

    """
        时间分割
    """
    def __split_time(self):
        global date_dict
        date_list = []

        # print self.data['time']
        for i in self.data.values:
            date_dict = {'db_key': i[0], 'time': i[4]}
            date_dict = format_time(date_dict, i[4])
            date_list.append(date_dict)

        date_dt = pd.DataFrame(date_list)

        # 工作日和假期的区分
        date_dt.loc[date_dt['week'] == 'Mon', 'week'] = 'workday'
        date_dt.loc[date_dt['week'] == 'Tue', 'week'] = 'workday'
        date_dt.loc[date_dt['week'] == 'Wed', 'week'] = 'workday'
        date_dt.loc[date_dt['week'] == 'Thu', 'week'] = 'workday'
        date_dt.loc[date_dt['week'] == 'Fri', 'week'] = 'workday'
        date_dt.loc[date_dt['week'] == 'Sat', 'week'] = 'holiday'
        date_dt.loc[date_dt['week'] == 'Sun', 'week'] = 'holiday'
        
        self.data = pd.merge(self.data, date_dt, on='db_key').sort_values('time_x')
        self.data.reset_index(inplace=True)

        del self.data['index']
        del date_dict
        del date_list

    """
        对app应用进行周期律的关联规则
    """
    def __associate_app(self):
        # 找出一定时间段内地点变化的关联
        columns = list(self.data.columns)
        # 特征状态列表
        self.feature = []
        # app应用关联规则
        associate_app = ''
        # app应用关联规则列表
        self.associate_list = []
        # app应用关联规则字典
        self.associate_dict = {}
        # app应用关联规则字典
        self.all_of_period = {}
        # 用户事件特征值
        self.listUser2Period = {}
        # 特征值中间变量
        self.features_tmp = {}
        # 频繁项集
        L_dict = {}
        # 支持度
        supData_dict = {}
        # 频繁项集
        whole_L_dict = {}
        # 支持度
        whole_supData_dict = {}
        # app应用点击次数
        self.click_dict = {}
        # 豌豆荚应用市场下载量
        self.down_app = {}

        # 枚举类型遍历，将各个用户数据分离
        for index, value in enumerate(self.data['date']):
            # 用户ID
            userid = self.data.iloc[index, columns.index('userid')]
            # 年份
            date = self.data.iloc[index, columns.index('date')]
            # 月份
            month = self.data.iloc[index, columns.index('month')]
            # 周
            week = self.data.iloc[index, columns.index('week')]
            # 周期
            period = self.data.iloc[index, columns.index('period')]
            # 速度传感器
            speed_split = self.data.iloc[index, columns.index('speed_split')]
            # app应用名称
            name = self.data.iloc[index, columns.index('name')]
            # 地点
            location = self.data.iloc[index, columns.index('location')]

            if type(name) != str:
                continue

            # 用户特征值元组
            user_rank = (week, period, month, date, speed_split, location, name)
            # 根据不同用户，构建用户事件特征值字典
            if userid in self.listUser2Period:
                self.listUser2Period[userid].append(user_rank)
            else:
                self.listUser2Period[userid] = [user_rank]
            # 根据不同用户的不同应用，构建app应用点击次数字典
            if userid in self.click_dict:
                if name in self.click_dict[userid]:
                    self.click_dict[userid][name] += 1
                else:
                    self.click_dict[userid][name] = 1
                    self.down_app[userid][name] = common.scrapy.spider(name)
                self.click_dict[userid]
            else:
                self.click_dict[userid] = {name: 1}
                self.down_app[userid] = {name: common.scrapy.spider(name)}

        hf.write_file(str(self.click_dict), './cache/ClickDict.txt', 'ClickDict')
        hf.write_file(str(self.down_app), './cache/DownloadNums.txt', 'DownloadNums')

        # print 'test', self.listUser2Period
        self.associate_list = []
        for i in self.listUser2Period:
            for p in self.listUser2Period[i]:
                if p[:-1] not in self.feature:
                    self.feature.append(p[:-1])
                    associate_app = [p[-1]]
                    self.associate_list.append(associate_app)
                else:
                    index = self.feature.index(p[:-1])
                    self.associate_list[index].append(p[-1])

                try:
                    self.features_tmp[i].append(p)
                except Exception as e:
                    self.features_tmp[i] = [p]

                if p[:-1] not in self.all_of_period.keys():
                    self.all_of_period[p[:-1]] = [p[-1]]
                else:
                    self.all_of_period[p[:-1]].append(p[-1])

            self.associate_dict[i] = self.associate_list

        # print 'features_tmp:', self.features_tmp
        # print self.associate_dict
        # print self.feature

        # 将app应用的关联规则作为新特征
        # self.data['associate_app'] = associate_list


        # 将app应用关联使用Apriori算法计算关系之间的支持度
        # item_list = []
        # for i in associate_list:
            # item_list.append(i.split('=>'))

        # 使用Apriori算法计算出item的支持度和关系可信度
        for user in self.associate_dict:
            L, supData = apriori(self.associate_dict[user], 0.5)
            L_dict[user] = L
            supData_dict[user] = supData

        print('User-Single')
        print(L_dict, supData_dict)


        # 使用Apriori算法关联特征条件和启动的App应用
        for period_key in self.all_of_period:
            L, supData = apriori(self.associate_dict[user], 0.5)
            whole_L_dict[period_key] = L
            whole_supData_dict[period_key] = supData

        print('Whole')
        print(whole_L_dict, whole_supData_dict)

        return whole_L_dict, whole_supData_dict

        # 去除重复时间段的数据, 不能在时间处理上去除
        # self.data.drop_duplicates(['date', 'period'], inplace=True)
        # self.data.reset_index(inplace=True)
        # del(self.data['index'])

    """
        根据Haversine formula算法计算两个地点之间的距离
    """
    def __haversine(lat1, lon1, lat2, lon2):
        R = 6372.8                      # 地球半径(单位公里)

        dLat = radians(lat2 - lat1)
        dLon = radians(lon2 - lon1)
        lat1 = radians(lat1)
        lat2 = radians(lat2)

        a = sin(dLat/2)**2 + cos(lat1)*cos(lat2)*sin(dLon/2)**2
        c = 2*asin(sqrt(a))

        return R * c

    """
        处理传感器数据
    """
    def __proc__speed(self):
        # 传感器重置列表
        speed_list = []

        # 遍历传感器数据集
        for i in self.data['speed']:
            if i == '\\N':
                speed_list.append(0)    # Static ： 0
            elif float(i) <= 0.5:
                speed_list.append(0)    # Static ： 0
            elif float(i) <= 1.0:
                speed_list.append(1)    # Walking ： 1
            elif float(i) <= 2:
                speed_list.append(2)    # Running ： 2
            else:
                speed_list.append(3)    # Use Transaction ： 3

        self.data['speed_split'] = speed_list


    """
        构建相关矩阵
    """
    def __corr_matrix(self):
        # 相关矩阵
        self.corr_matrix = {}

        for u in self.listUser2Period:
            self.corr_matrix[u] = {}
            for i in P_MATRIX:
                n = 0
                self.corr_matrix[u][i] = {(0, '', '')}
                for j in self.listUser2Period[u]:
                    if (j[0], j[1]) == i:
                        n += 1
                        if n > 1:
                            # 相同状态元素添加
                            self.corr_matrix[u][i].add((j[4], j[5], j[6]))
                        else:
                            self.corr_matrix[u][i] = {(j[4], j[5], j[6])}
                        # print self.corr_matrix[u][i]

        print('相关矩阵：', self.corr_matrix)
        hf.write_file(str(self.corr_matrix), './cache/matrix.txt', 'Matrix')


    """
        相似度用户计算
    """
    def __cal_similarity(self, userid, supData):
        # Vectorize features
        vec = DictVectorizer()
        # features dictionary
        features = {}           # 目标用户特征字典
        c_features = {}         # 云端用户特征字典
        # features array
        arrays = []             # 目标用户特征向量
        c_arrays = []           # 云端用户特征向量
        # 用户相似度
        self.similarities = {}

        # print 'similarities-supData:', supData

        for f in P_MATRIX:
            for item in self.corr_matrix[userid][f]:
                # print item
                features[f[0]+','+f[1]+','+'speed'] = item[0]
                features[f[0]+','+f[1]+','+'location'] = item[1]
                try:
                    features[f[0]+','+f[1]+','+item[2]] = supData[userid][item[2]]*u_weight[userid][item[2]]
                except Exception as e:
                    features[f[0] + ',' + f[1] + ',' + item[2]] = 0

        # 将目标用户特征值转化为特征向量
        arrays = vec.fit_transform(features).toarray()

        # 遍历相关矩阵
        for c_user in self.corr_matrix:
            if userid != c_user:
                for f in P_MATRIX:
                    for item in self.corr_matrix[c_user][f]:
                        # print item
                        c_features[f[0]+','+f[1]+','+'speed'] = item[0]
                        c_features[f[0]+','+f[1]+','+'location'] = item[1]
                        
                        try:
                            c_features[f[0]+','+f[1]+','+item[2]] = \
                                float(supData[c_user][frozenset([item[2]])])*float(u_weight[c_user][item[2]])
                        except Exception as e:
                            c_features[f[0] + ',' + f[1] + ',' + item[2]] = 0

                # 将云端用户特征值转化为特征向量
                c_arrays = vec.fit_transform(c_features).toarray()

                # 用户余弦相似度计算
                self.similarities[c_user] = cd.cos(arrays[0], c_arrays[0])
                
        print('features:', features)
        print('c_features:', c_features)
        print('arrays:', arrays)
        print('c_arrays:', c_arrays)

    """
        获取用户的“邻居”
    """
    def __get_neighbors(self, number):
        self.neighbors = []
        self.similarities = sorted(self.similarities.items(), key=lambda asd: asd[1], reverse=True)
        self.neighbors = self.similarities[:number]


if __name__ == '__main__':
    # 周期特征条件
    PERIOD_FEATURE = {'date': '2019-09-01', 'period': 'Night', 'location': 3, 'speed': 0}

    p = PeriodClassifier()

    # p.fit_dbscan_model()
    # p.fit_model()
    p.test_model()
