import os
import sys
import pickle
import logging
from pyspark import *
from pyspark.sql import *
from graphframes import *
from pyspark.sql.functions import countDistinct
from pyspark.sql import functions
from pyspark.sql.types import *

from data_process.result_process import csv_save


class PersonGraph:
    def __init__(self, config, nodes, edges):
        self.config = config
        self.load_graph_model = config['load_graph_model']
        self.statis_ym = self.config['monthid']
        self.province = self.config['province']
        self.mode = config['mode']
        self.model_path = self.config['output']['local_graph_model_save_path'] if self.mode == 'local' else self.config['output']['graph_model_save_path']
        self.inter_dir = self.config['output']['local_inter_save_dir'] if self.mode == 'local' else self.config['output']['inter_save_dir']
        self.graph_result_table_name = self.config['output']['graph_result_table_name']
        self.init_spark()

        if self.load_graph_model == "1":
            self.G = self.model_load()
        else:
            self.G = self.create_graph(nodes, edges)
            self.model_save()

        self.msisdn_user_map_path = self.config['output']['msisdn_user_map_path']
        self.load_msisdn_user_map()

    def init_spark(self):
        jar_files = self.config['model']['jar_files']
        conf = SparkConf()
        conf.set('spark.executor.memory', '500g')
        conf.set('spark.driver.memory', '500g')
        conf.setMaster('local')
        conf.set("spark.scheduler.capacity", "10")
        if self.config['mode'] != 'local':
            conf.set("spark.jars", jar_files)
        self.spark = SparkSession.builder.config(conf=conf).appName('graph_model_gf').getOrCreate()

    def load_msisdn_user_map(self):
        # 这里相对于原版有修改：msisdn_user_map 存储在 model_path，不是 inter_dir
        map_path = self.model_path + self.msisdn_user_map_path
        if not os.path.exists(map_path):
            logging.getLogger('graph_model').error(map_path + " file not exits!")
            sys.exit(-1)
        else:
            with open(map_path, "rb") as f:
                msisdn_map = pickle.load(f)
            # self.msisdn_map = msisdn_map
            self.msisdn_map = [[k, v] for k, v in msisdn_map.items()]
            logging.getLogger('graph_model').info(f'Success load msisdn_user_map {map_path}')

    def create_graph(self, nodes, edges):
        node_property = self.config['data_process']['user_features']
        edge_property = self.config['data_process']['call_features']
        if len(edge_property) > 2:
            edge_pro = edge_property[2:]
        else:
            edge_pro = []
        logging.getLogger('graph_model').info('start create v')

        v = self.spark.createDataFrame(nodes, ["id"]+node_property[1:])
        logging.getLogger('graph_model').info(f'create v_dataframe {v.head()}')
        del nodes

        v = v.drop_duplicates(subset=['id'])
        logging.getLogger('graph_model').info(f'get v_dataframe {v.count()}, {v.head()}')

        e1 = self.spark.createDataFrame(edges, ["src", "dst"] + edge_pro)
        logging.getLogger('graph_model').info(f'create e1_dataframe {e1.count()}, {e1.head()}')
        e1 = e1.drop_duplicates(subset=["src", "dst"])
        logging.getLogger('graph_model').info(f'filter e1_dataframe {e1.count()}, {e1.head()}')

        e2 = self.spark.createDataFrame(edges, ["dst", "src"] + edge_pro)
        logging.getLogger('graph_model').info(f'create e2_dataframe {e2.head()}')
        e2 = e2.drop_duplicates(subset=["src", "dst"])
        logging.getLogger('graph_model').info(f'filter e2_dataframe {e2.count()}, {e2.head()}')
        del edges

        e = e1.union(e2)
        logging.getLogger('graph_model').info(f'get final e_dataframe {e.count()}, {e.head()}')
        del e1, e2

        G = GraphFrame(v, e)
        logging.getLogger('graph_model').info(f'Success create Graph, {G.vertices.count()} vertices, {G.edges.count()} edges.')
        return G

    def get_1hop_neighbor(self):
        results = self.G.find("(a)-[e]->(b)") \
            .filter("a.NEW_RCN_ID == '1'").groupby("a.id").agg(countDistinct("b.id"))  # DataFrame
        new_clos = ['MSISDN', '1_HOP_NEI_COUNT']

        if results.count() == 0:
            schema = StructType(
                [
                    StructField(new_clos[0], StringType(), True),
                    StructField(new_clos[1], StringType(), True)
                ]
            )

            results = self.spark.createDataFrame([], schema)

        results = results.toDF(*new_clos)
        results = results.withColumn(new_clos[1], results[new_clos[1]].cast('String'))

        result_table_name = f'{new_clos[-1]}_{self.province}_{self.statis_ym}.csv'
        save_path = self.inter_dir + result_table_name

        csv_save(results, save_path)
        logging.getLogger('graph_model').info(f'Results saved to {save_path} successfully!')
        return results

    def get_call_another_user(self):
        results = self.G.find("(a)-[e]->(b)").filter("a.NEW_RCN_ID == '1' and a.IDTY_NBR == b.IDTY_NBR").groupby(
            "a.id").agg(countDistinct("b.id"))

        new_clos = ['MSISDN', 'CALL_OTHER_USER_COUNT']

        if results.count() == 0:
            schema = StructType(
                [
                    StructField(new_clos[0], StringType(), True),
                    StructField(new_clos[1], StringType(), True)
                ]
            )

            results = self.spark.createDataFrame([], schema)

        results = results.toDF(*new_clos)
        results = results.withColumn(new_clos[1], results[new_clos[1]].cast('String'))

        result_table_name = f'{new_clos[-1]}_{self.province}_{self.statis_ym}.csv'
        save_path = self.inter_dir + result_table_name

        csv_save(results, save_path)
        logging.getLogger('graph_model').info(f'Results saved to {save_path} successfully!')
        return results

    def get_1hop_connected_neighbor(self):
        results = self.G.find("(a)-[ab]->(b);(a)-[ac]->(c);(b)-[bc]->(c)").filter("a.NEW_RCN_ID == '1' and b.id != c.id").groupby(["a.id"]).agg(countDistinct("b.id")+countDistinct("c.id"))
        new_clos = ['MSISDN', "1_HOP_CONNECT_NEI_COUNT"]

        if results.count() == 0:
            schema = StructType(
                [
                    StructField(new_clos[0], StringType(), True),
                    StructField(new_clos[1], StringType(), True)
                ]
            )

            results = self.spark.createDataFrame([], schema)

        results = results.toDF(*new_clos)
        results = results.withColumn(new_clos[1], results[new_clos[1]].cast('String'))

        result_table_name = f'{new_clos[-1]}_{self.province}_{self.statis_ym}.csv'
        save_path = self.inter_dir + result_table_name

        csv_save(results, save_path)
        logging.getLogger('graph_model').info(f'Results saved to {save_path} successfully!')
        return results

    def get_common_neighbor_with_other_user(self):
        results = self.G.find("(a)-[ac]->(c);(b)-[bc]->(c)").filter("a.NEW_RCN_ID == '1' and a.IDTY_NBR == b.IDTY_NBR and a.id != b.id").groupby(["a.id"]).agg(countDistinct("c.id"))
        new_clos = ['MSISDN', 'USERS_COMMON_NEI_COUNT']

        if results.count() == 0:
            schema = StructType(
                [
                    StructField(new_clos[0], StringType(), True),
                    StructField(new_clos[1], StringType(), True)
                ]
            )

            results = self.spark.createDataFrame([], schema)

        results = results.toDF(*new_clos)
        results = results.withColumn(new_clos[1], results[new_clos[1]].cast('String'))

        result_table_name = f'{new_clos[-1]}_{self.province}_{self.statis_ym}.csv'
        save_path = self.inter_dir + result_table_name

        csv_save(results, save_path)
        logging.getLogger('graph_model').info(f'Results saved to {save_path} successfully!')
        return results

    def get_1hop_neighbor_connected_with_other_user(self):
        results = self.G.find("(a)-[ac]->(c);(b)-[bd]->(d); (c)-[cd]->(d)").filter("a.NEW_RCN_ID == '1' and a.IDTY_NBR == b.IDTY_NBR and a.id != b.id and c.id != d.id").groupby(["a.id"]).agg(countDistinct("c.id"))
        new_clos = ['MSISDN', "USERS_1_HOP_NEI_CONNECT_COUNT"]

        if results.count() == 0:
            schema = StructType(
                [
                    StructField(new_clos[0], StringType(), True),
                    StructField(new_clos[1], StringType(), True)
                ]
            )

            results = self.spark.createDataFrame([], schema)

        results = results.toDF(*new_clos)
        results = results.withColumn(new_clos[1], results[new_clos[1]].cast('String'))

        result_table_name = f'{new_clos[-1]}_{self.province}_{self.statis_ym}.csv'
        save_path = self.inter_dir + result_table_name

        csv_save(results, save_path)
        logging.getLogger('graph_model').info(f'Results saved to {save_path} successfully!')
        return results

    def calculate(self):
        logging.getLogger('graph_model').info('Start Graph calculation!')
        res1 = self.get_1hop_neighbor()
        logging.getLogger('graph_model').info(f'Get 1hop_neighbor! length {res1.count()}, {res1.head()}')

        res2 = self.get_call_another_user()
        logging.getLogger('graph_model').info(f'Get call_another_user! length {res2.count()}, {res2.head()}')

        res = res1.join(res2, 'MSISDN', "outer")
        del res1, res2

        res3 = self.get_1hop_connected_neighbor()
        logging.getLogger('graph_model').info(f'Get 1hop_connected_neighbor! length {res3.count()}, {res3.head()}')

        res = res.join(res3, 'MSISDN', "outer")
        del res3

        res4 = self.get_common_neighbor_with_other_user()
        logging.getLogger('graph_model').info(
            f'Get common_neighbor_with_other_user! length {res4.count()}, {res4.head()}')

        res = res.join(res4, 'MSISDN', "outer")
        del res4

        res5 = self.get_1hop_neighbor_connected_with_other_user()
        logging.getLogger('graph_model').info(
            f'Get 1hop_neighbor_connected_with_other_user! length {res5.count()}, {res5.head()}')

        res = res.join(res5, 'MSISDN', "outer")
        del res5

        res = res.fillna('0')
        # 结果所属数据账期
        res = res.withColumn("STATIS_YM", functions.lit(self.statis_ym))

        logging.getLogger('graph_model').info(f'Finished Graph calculation!')
        logging.getLogger('graph_model').info(f"Get {res.count()} results, {res.head()}")

        # merge user_id
        logging.getLogger('graph_model').info(f'Start to merge MSISDN to USER_ID!')

        schema = StructType(
            [
                StructField("MSISDN", StringType(), True),
                StructField("USER_ID", StringType(), True)
            ]
        )
        df_map = self.spark.createDataFrame(self.msisdn_map, schema=schema)
        df = df_map.join(res, 'MSISDN', 'right')
        logging.getLogger('graph_model').info(f'Merge MSISDN to USER_ID! Get {df.count()} results, {df.head()}')
        df = df.dropna()
        df = df.drop_duplicates(subset=['MSISDN', 'USER_ID'])
        df = df.drop('MSISDN')
        logging.getLogger('graph_model').info(f'Success merge MSISDN to USER_ID! Get final {df.count()} results, {df.head()}')

        result_table_name = self.graph_result_table_name  # f'graph_result_{self.province}_{self.statis_ym}.csv'
        save_path = self.inter_dir + result_table_name

        csv_save(df, save_path)
        # csv_save(res, save_path)
        logging.getLogger('graph_model').info(f'Results saved to {save_path} successfully!')

        return df

    def model_save(self):
        model_name = self.config['output']['gf_graph_model_name']
        try:
            with open(self.model_path + model_name, "wb") as f:
                pickle.dump(self.G, f)

            logging.getLogger('graph_model').info(f'Graph_model saved to {self.model_path} successfully!')
        except Exception as e:
            logging.getLogger('graph_model').info(f'Failed to save Graph_model to {self.model_path}! Error: {e}')

    def model_load(self):
        model_name = self.config['output']['gf_graph_model_name']
        if not os.path.exists(self.model_path + model_name):
            logging.getLogger('graph_model').error(self.model_path + model_name + " file not exits!")
            sys.exit(-1)
        else:
            with open(self.model_path + model_name, "rb") as f:
                model = pickle.load(f)
        logging.getLogger('graph_model').info(f'Graph_model load {self.model_path}{model_name} successfully!')
        return model