# coding: utf-8

# pip install redis pyspark

import os
import time
import json
import numpy as np
from redis import StrictRedis
from operator import itemgetter
from datetime import timedelta, datetime
from pyspark.sql import SparkSession
from pyspark import SparkContext, SparkConf
from pyspark.sql.types import StructField, IntegerType, StructType, StringType, DoubleType

HOME_PATH = os.environ['HOME']
BASE_PATH = os.path.dirname(__file__)
JARS_PATH = os.path.join(BASE_PATH, 'jar')

SPARK_NAME = 'foreign-news'
# SPARK_HOST = 'spark://10.0.3.125:7077'
SPARK_HOST = 'spark://10.0.3.93:7077'
HBASE_HOST = 'hb-uf6i3710q15t89giw-001.hbase.rds.aliyuncs.com:2181'
HBASE_TABLE = 'news'
# redis-cli -h 10.0.3.20 -a crawl#@!
REDIS_HOST = '10.0.3.20'
REDIS_PORT = 6379
REDIS_AUTH = 'crawl#@!'
REDIS_DB = 10


redis = StrictRedis(
    host=REDIS_HOST,
    port=REDIS_PORT,
    password=REDIS_AUTH,
    db=REDIS_DB
)


MIN_ITEM_BAR = 1

end_date = datetime.now()
begin_date = end_date + timedelta(days=-1)
begin_timestamp, end_timestamp = int(begin_date.timestamp() * 1000), int(end_date.timestamp() * 1000)

print('begin_date=%s' % begin_date)
print('end_date=%s' % end_date)


def get_spark_context(level='INFO'):
    spark_conf = SparkConf()
    jars = [os.path.join(JARS_PATH, f) for f in os.listdir(JARS_PATH)]
    spark_conf.set('spark.jars', ','.join(jars))
    spark_conf.set("spark.cores.max", "10")
    spark_context = SparkContext(
        master=SPARK_HOST,
        appName=SPARK_NAME,
        conf=spark_conf
    )
    spark_context.setLogLevel(level)
    return spark_context


def get_spark_session(spark_context):
    return SparkSession(spark_context).newSession()


def get_spark_rdd(spark_session):
    hbase_conf = {
        'hbase.zookeeper.quorum': HBASE_HOST,
        'hbase.mapreduce.inputtable': HBASE_TABLE,
        'hbase.mapreduce.scan.timerange.start': str(begin_timestamp),
        'hbase.mapreduce.scan.timerange.end': str(end_timestamp)
    }
    # hbase.mapreduce.scan.row.start
    # hbase.mapreduce.scan.row.stop
    # hbase.mapreduce.scan.column.family
    # hbase.mapreduce.scan.columns
    # hbase.mapreduce.scan.timestamp
    # hbase.mapreduce.scan.timerange.start
    # hbase.mapreduce.scan.timerange.end
    # hbase.mapreduce.scan.maxversions
    # hbase.mapreduce.scan.cacheblocks
    # hbase.mapreduce.scan.cachedrows
    # hbase.mapreduce.scan.batchsize
    return spark_session.sparkContext.newAPIHadoopRDD(
        inputFormatClass='org.apache.hadoop.hbase.mapreduce.TableInputFormat',
        keyClass='org.apache.hadoop.hbase.io.ImmutableBytesWritable',
        valueClass='org.apache.hadoop.hbase.client.Result',
        keyConverter='org.apache.spark.examples.pythonconverters.ImmutableBytesWritableToStringConverter',
        valueConverter='org.apache.spark.examples.pythonconverters.HBaseResultToStringConverter',
        conf=hbase_conf
    )


def _handler_row_score(row_dict):
    """ 处理数据分数
    :param row_dict:
        action_id
            1 刷文章列表
            2 查看文章
            3 评论文章
            4 收藏文章
            5 退出文章详情
            6 打开app
            7 隐藏app
            8 取消收藏
            9 展开摘要
           10 收起摘要
    :return: row_dict
    """
    current_time = time.time()
    timestamp = row_dict.get('timestamp')
    action_id = row_dict.get('action_id')
    action_score_dict = {
        # 1: -1.0,
        2: 5.0,
        3: 10.0,
        4: 20.0,
        8: -2.0,
        9: 2.0
    }
    row_dict['score'] = 0.001
    if action_id not in action_score_dict:
        row_dict['article_id'] = 0
        return row_dict
    time_weight = 0.9 ** ((current_time - timestamp) / 86400)
    row_dict['score'] = action_score_dict[action_id] * time_weight
    return row_dict


def callback_parse_row(rows):
    row_dict = dict()
    row_dict_list = [json.loads(row) for row in rows]
    for row in row_dict_list:
        key = row['qualifier']
        value = row['value']
        row_dict[key] = value
    row_dict['user_id'] = int(row_dict['user_id'])
    row_dict['action_id'] = int(row_dict['action_id'])
    row_dict['article_id'] = int(row_dict['article_id'])
    row_dict['timestamp'] = int(row_dict['timestamp'])
    return _handler_row_score(row_dict)


def to_data_frame(rdd):
    frame_dict_split = rdd.map(lambda x: (x[0], x[1].split('\n')))
    frame_dict_cols = frame_dict_split.map(lambda x: (x[0], callback_parse_row(x[1])))
    col_names = ['row_key'] + frame_dict_cols.map(lambda x: [i for i in x[1]]).take(1)[0]
    return frame_dict_cols.map(lambda x: [x[0]] + [x[1][i] for i in x[1]]).toDF(col_names)


def handler_data_frame(data_frame):
    new_data_frame = data_frame\
        .filter('user_id > 0 and article_id > 0')\
        .groupBy('user_id', 'article_id')\
        .agg({'score': 'sum'})
    return new_data_frame.withColumnRenamed('sum(score)', 'score')


def cos_sim(vector_a, vector_b):
    """
    计算两个向量之间的余弦相似度
    :param vector_a: 向量 a
    :param vector_b: 向量 b
    :return: sim
    """
    vector_a = np.mat(vector_a)
    vector_b = np.mat(vector_b)
    num = float(vector_a * vector_b.T)
    return num / (np.linalg.norm(vector_a) * np.linalg.norm(vector_b))


def callback_user_sim(x):
    l1 = x[0][1]
    l2 = x[1][1]
    common_item = set([kv[0] for kv in l1]).intersection(set([kv[0] for kv in l2]))
    if len(common_item) >= MIN_ITEM_BAR:
        vector_1 = [kv[1] for kv in l1 if kv[0] in common_item]
        vector_2 = [kv[1] for kv in l2 if kv[0] in common_item]
        cos = np.around(cos_sim(vector_1, vector_2), 5)
        return x[0][0],  (x[1][0], cos)


def callback_prepare_rating(x):
    target_user = x[1][0][0]
    similarity = float(x[1][0][1])
    articles = x[1][1]
    return [(target_user, article[0], article[1], similarity) for article in articles]


def handler_user_sim(data_frame):
    begin_time = time.time()
    user_rating_rdd = data_frame.rdd.map(lambda x: (x.user_id, (x.article_id, x.score))).groupByKey().mapValues(list)
    user_rating_rdd = user_rating_rdd.filter(lambda kv: len(kv[1]) >= 2)
    print('user_rating_rdd end time: %s' % (time.time() - begin_time))
    begin_time = time.time()
    user_rdd_cross = user_rating_rdd.cartesian(user_rating_rdd)
    user_sim_rdd = user_rdd_cross.map(callback_user_sim).filter(lambda x: x is not None).groupByKey().mapValues(list)
    user_sim_rdd = user_sim_rdd.filter(lambda x: len(x[1]) >= 5)
    user_sim_rdd = user_sim_rdd.mapValues(lambda x: sorted(x, key=itemgetter(1), reverse=True))
    print('user_sim_rdd end time: %s' % (time.time() - begin_time))
    print(user_sim_rdd.take(10))
    begin_time = time.time()
    temp = user_sim_rdd.flatMapValues(lambda x: x).map(lambda x: (x[1][0], (x[0], x[1][1])))
    # join相似用户和用户对新闻评分
    temp2 = temp.join(user_rating_rdd)
    print('user sim join end time: %s' % (time.time() - begin_time))
    begin_time = time.time()
    temp3 = temp2.map(callback_prepare_rating).flatMap(lambda x: x)
    print('temp3 end time: %s' % (time.time() - begin_time))
    return temp3


def handler_storage(row_dict):
    print(row_dict)
    user_id = row_dict['user_id']
    data_list = row_dict['recommendations']
    article_id_list = [article_id for (article_id, rating) in data_list if rating != 0.0]
    if not article_id_list:
        return
    redis.rpush('user:%s' % user_id, *article_id_list)


def main():
    spark_context = get_spark_context()
    spark_session = get_spark_session(spark_context)
    try:
        spark_rdd = get_spark_rdd(spark_session)
        spark_rdd.cache()
        count = spark_rdd.count()
        print('data_count----------------------------%s' % count)
        if count == 0:
            return
        data_frame = to_data_frame(spark_rdd)
        data_frame.printSchema()
        data_frame.show(n=10)
        print('data_frame--------------------------------------------------------')
        new_data_frame = handler_data_frame(data_frame)
        new_data_frame.printSchema()
        new_data_frame.show(n=10)
        print('new_data_frame--------------------------------------------------------')
        user_rdd = handler_user_sim(new_data_frame)
        begin_time = time.time()
        schema = StructType([
            StructField("user_id", StringType(), True),
            StructField("news_id", IntegerType(), True),
            StructField("rating", DoubleType(), True),
            StructField("similarity", DoubleType(), True)
        ])
        for_rating = user_rdd.toDF(schema=schema)
        for_rating.createOrReplaceTempView("for_rating")
        sql = '''
            select 
            user_id, news_id, 
            sum(rating*similarity)/sum(similarity) as weighted_rating 
            from for_rating 
            group by user_id, news_id
        '''
        predict_rating = spark_session.sql(sql)
        print('predict_rating end time: %s' % (time.time() - begin_time))
        predict_rating.show(10)
    finally:
        spark_session.stop()
        spark_context.stop()


if __name__ == '__main__':
    # main()
    b = time.time()
    time.sleep(3)
    print(time.time() - b)
