# coding: utf-8

import os
import time
import json
from collections import Counter
from pymongo import MongoClient
from datetime import timedelta, datetime
from pyspark.sql import functions
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark import SparkContext, SparkConf
from urllib.parse import quote
from concurrent.futures import ThreadPoolExecutor


HOME_PATH = os.environ['HOME']
BASE_PATH = os.path.dirname(__file__)
JARS_PATH = os.path.join(BASE_PATH, 'jar')

SPARK_NAME = 'foreign-user-portrait'
# SPARK_HOST = 'local[*]'
SPARK_HOST = 'spark://10.0.3.93:7077'
# SPARK_HOST = 'spark://10.0.3.125:7077'
SPARK_MAX_CORES = 4
SPARK_EXECUTOR_CORES = 4
SPARK_NUM_EXECUTORS = 10

# HBASE_HOST = 'hb-uf6i3710q15t89giw-001.hbase.rds.aliyuncs.com:2181'
HBASE_HOST = 'hb-uf66z31i3t6sl387t-001.hbase.rds.aliyuncs.com:2181'
HBASE_TABLE = 'foreign-news-action'


THREAD_POOL_SIZE = 100

# MONGO_HOST = 'dds-uf6291853e669e341.mongodb.rds.aliyuncs.com'
MONGO_HOST = 'dds-uf6e2e29f3ff6bd41.mongodb.rds.aliyuncs.com'
MONGO_PORT = 3717
MONGO_USERNAME = quote('root')
MONGO_PASSWORD = quote('CfLceH2vCAFc')
MONGO_URI = 'mongodb://%s:%s@%s:%s' % (MONGO_USERNAME, MONGO_PASSWORD, MONGO_HOST, MONGO_PORT)
MONGO_CONFIG_TYPE = 'constant'
FOREIGN_ACTION_SCORE = 'foreign-action-score'

mongo = MongoClient(MONGO_URI)
mongo_database = mongo.foreign

'''
1 刷文章列表
2 查看文章
3 评论文章
4 收藏文章
5 退出文章详情
6 打开app
7 隐藏app
8 取消收藏
9 展开摘要
10 收起摘要
'''
ACTION_SCORE_DICT = {
    2: 5.0,
    3: 10.0,
    4: 20.0,
    8: -2.0,
    9: 2.0
}

now = datetime.now()
begin_date = now - timedelta(hours=now.hour, minutes=now.minute, seconds=now.second, microseconds=now.microsecond)
end_date = now

begin_timestamp, end_timestamp = int(begin_date.timestamp()) * 1000, int(end_date.timestamp()) * 1000

print('begin_date=%s' % begin_date)
print('end_date=%s' % end_date)


def initialize_config():
    global ACTION_SCORE_DICT
    config_collection = mongo_database.algorithm_config
    action_score = config_collection.find_one({'_id': FOREIGN_ACTION_SCORE, 'type': MONGO_CONFIG_TYPE})
    if not action_score:
        print('DEFAULT ACTION_SCORE_DICT=%s' % ACTION_SCORE_DICT)
        return
    ACTION_SCORE_DICT = dict()
    score_dict = action_score['score']
    for (key, value) in score_dict.items():
        ACTION_SCORE_DICT[int(key)] = value
    print('ACTION_SCORE_DICT=%s' % ACTION_SCORE_DICT)


def get_spark_context(level='INFO'):
    spark_conf = SparkConf()
    jars = [os.path.join(JARS_PATH, f) for f in os.listdir(JARS_PATH)]
    spark_conf.set('spark.jars', ','.join(jars))
    print(','.join(jars))
    spark_conf.set('spark.num.executors', str(SPARK_NUM_EXECUTORS))
    spark_conf.set('spark.executor.cores', str(SPARK_EXECUTOR_CORES))
    spark_conf.set('spark.cores.max', str(SPARK_MAX_CORES))
    # spark_conf.set('spark.driver.port', '10000')
    # spark_conf.set('spark.executor.memory', '2g')
    spark_context = SparkContext(master=SPARK_HOST, appName=SPARK_NAME, conf=spark_conf)
    spark_context.setLogLevel(level)
    return spark_context


def get_spark_session(spark_context):
    return SparkSession(spark_context).newSession()


def get_spark_rdd(spark_session):
    hbase_conf = {
        'hbase.zookeeper.quorum': HBASE_HOST,
        'hbase.mapreduce.inputtable': HBASE_TABLE,
        'hbase.mapreduce.scan.timerange.start': str(begin_timestamp),
        'hbase.mapreduce.scan.timerange.end': str(end_timestamp)
    }
    return spark_session.sparkContext.newAPIHadoopRDD(
        inputFormatClass='org.apache.hadoop.hbase.mapreduce.TableInputFormat',
        keyClass='org.apache.hadoop.hbase.io.ImmutableBytesWritable',
        valueClass='org.apache.hadoop.hbase.client.Result',
        keyConverter='org.apache.spark.examples.pythonconverters.ImmutableBytesWritableToStringConverter',
        valueConverter='org.apache.spark.examples.pythonconverters.HBaseResultToStringConverter',
        conf=hbase_conf
    )


def string_to_chinese(string):
    try:
        while True:
            index1 = string.find("\\x")
            if index1 == -1:
                break
            string1 = string[index1+2:index1+4]
            index2 = string[index1 + 4:].find("\\x") + index1 + 4
            if index2 == -1:
                continue
            string2 = string[index2+2:index2+4]
            index3 = string[index2+4:].find("\\x") + index2 + 4
            string3 = string[index3+2:index3+4] if index3 > -1 else None
            if index1 == -1 or index2 == -1 or index3 == -1:
                continue
            string_all = ' '.join([string1, string2, string3])
            # string_all = string_all.decode('hex')
            string_all = bytes.fromhex(string_all).decode()
            string_old = string[index1:index3+4]
            string = string.replace(string_old, string_all)
    except:
        pass
    return string


def _handler_row_score(row_dict):
    current_time = time.time()
    timestamp = row_dict.get('timestamp')
    action_id = row_dict.get('action_id')
    keywords = row_dict['keywords']
    global ACTION_SCORE_DICT
    if action_id not in ACTION_SCORE_DICT or not keywords:
        row_dict['user_id'] = None
        row_dict['keywords'] = [{'keyword': '', 'score': 0.0}]
        return row_dict
    time_weight = 0.9 ** ((current_time - timestamp) / 86400)
    score = ACTION_SCORE_DICT[action_id] * time_weight
    row_dict['keywords'] = [{'keyword': s, 'score': score} for s in keywords if s]
    return row_dict


def callback_parse_row(rows):
    row_dict_list = [json.loads(row) for row in rows]
    row_dict, keywords = dict(), list()
    for row in row_dict_list:
        family = row['columnFamily']
        key, value = row['qualifier'], row['value']
        if 'keywords' == family:
            keywords.append(string_to_chinese(value))
            continue
        row_dict[key] = value
    row_dict['keywords'] = keywords
    row_dict['user_id'] = row_dict['user_id']
    row_dict['action_id'] = int(row_dict['action_id'])
    row_dict['timestamp'] = int(row_dict['timestamp'])
    row_dict = _handler_row_score(row_dict)
    return dict(user_id=row_dict['user_id'], keywords=row_dict['keywords'])


def to_data_frame(rdd):
    frame_dict_split = rdd.map(lambda x: (x[0], x[1].split('\n')))
    frame_dict_rdd = frame_dict_split.map(lambda x: (x[0], callback_parse_row(x[1])))
    # col_names = frame_dict_rdd.map(lambda x: [i for i in x[1]]).take(1)[0]
    # print('col_names----------%s' % json.dumps(col_names))
    frame_dict_rdd = frame_dict_rdd.map(lambda x: [x[1][i] for i in x[1]])
    col_names = StructType([
        StructField("keywords", ArrayType(
            StructType([
                StructField("keyword", StringType()),
                StructField("score", DoubleType())
            ])
        )),
        StructField("user_id", StringType()),
    ])
    data_frame = frame_dict_rdd.toDF(col_names)
    return data_frame.filter("user_id is not null")


def handler_data_frame(data_frame):
    rdd = data_frame.rdd.map(lambda x: [(x[1], k, s) for (k, s) in x[0]]).flatMap(lambda x: x)
    schema = StructType([
        StructField("user_id", StringType()),
        StructField("keyword", StringType()),
        StructField("score", DoubleType()),
    ])
    data_frame = rdd.toDF(schema=schema)
    data_frame = data_frame.groupBy(["user_id", "keyword"]).agg(functions.sum('score').alias('score'))
    return data_frame.rdd.map(lambda x: (x.user_id, (x.keyword, x.score))).groupByKey().mapValues(list)


def handler_storage(row):
    user_id, keywords = row
    if not user_id or not keywords:
        return
    keywords_dict = dict(keywords)
    user_portrait_basic = mongo_database.user_portrait_basic.find_one({'_id': user_id})
    if user_portrait_basic and user_portrait_basic['keywords']:
        basic_keywords_dict = user_portrait_basic['keywords']
        keywords_dict = dict(Counter(keywords_dict) + Counter(basic_keywords_dict))
    current_datetime = datetime.now()
    mongo_database.user_portrait.save({
        '_id': user_id,
        'keywords': keywords_dict,
        'createTime': current_datetime
    })
    print('mongodb insert user_portrait success! user_id=%s' % user_id)
    # mongo_database.user_portrait_record.insert({
    #     'user_id': user_id,
    #     'keywords': keywords_dict,
    #     'create_time': current_datetime
    # })
    # print('mongodb insert user_portrait_record success! user_id=%s' % user_id)


def main():
    initialize_config()
    spark_context = get_spark_context()
    spark_session = get_spark_session(spark_context)
    try:
        spark_rdd = get_spark_rdd(spark_session)
        spark_rdd.cache()
        count = spark_rdd.count()
        print('data_count----------------------------%s' % count)
        if count == 0:
            return
        data_frame = to_data_frame(spark_rdd)
        data_frame.printSchema()
        data_frame.show(n=10)
        data_frame_count = data_frame.count()
        print('data_frame----------------------------%s' % data_frame_count)
        rdd = handler_data_frame(data_frame)
        rdd_count = rdd.count()
        print('rdd-----------------------------------%s' % rdd_count)
        # rows = rdd.collect()
        rows = rdd.toLocalIterator()
        with ThreadPoolExecutor(THREAD_POOL_SIZE) as pool:
            pool.map(handler_storage, rows)
    finally:
        spark_session.stop()
        spark_context.stop()


if __name__ == '__main__':
    main()


'''
pip3 install pyspark==2.4.0
pip3 install pymongo==3.7.1
'''
