# coding: utf-8

# pip install redis pyspark

import os
import time
import json
import logging;#logging.basicConfig(level=logging.NOTSET, format='[%(asctime)s:%(name)s:%(levelname)s]: %(message)s')
from datetime import timedelta, datetime
from redis import StrictRedis
from pyspark.sql import SparkSession
from pyspark.ml.recommendation import ALS
from pyspark import SparkContext, SparkConf
from concurrent.futures import ThreadPoolExecutor

HOME_PATH = os.environ['HOME']
BASE_PATH = os.path.dirname(__file__)
JARS_PATH = os.path.join(BASE_PATH, 'jar')

SPARK_NAME = 'foreign-news-als'
SPARK_HOST = 'spark://10.100.0.37:7077'
# SPARK_HOST = 'spark://10.0.3.93:7077'
# SPARK_HOST = 'spark://10.0.3.125:7077'
# HBASE_HOST = 'hb-uf6i3710q15t89giw-001.hbase.rds.aliyuncs.com:2181'
HBASE_HOST = 'hb-uf66z31i3t6sl387t-001.hbase.rds.aliyuncs.com:2181'
HBASE_TABLE = 'news'
# redis-cli -h 10.0.3.20 -a crawl#@!
# REDIS_HOST = '10.0.3.20'
# REDIS_PORT = 6379
# REDIS_AUTH = 'crawl#@!'
# REDIS_DB = 10
# redis-cli -h r-uf62e134a0c68014.redis.rds.aliyuncs.com -a G3EsmjZ6ULW9p4Zw
# REDIS_HOST = 'r-uf62e134a0c68014.redis.rds.aliyuncs.com'
REDIS_HOST = 'r-uf6jbdkfuzomjc1ku7.redis.rds.aliyuncs.com'
REDIS_PORT = 6379
REDIS_AUTH = 'G3EsmjZ6ULW9p4Zw'
REDIS_DB = 3
REDIS_READ_DB = 0
REDIS_USER_KEY = 'user:%s'
REDIS_USER_READ_KEY = 'alist_%s'

MAX_RECOMMEND_NUM = 100
MAX_RECOMMEND_QUEUE_NUM = 500
THREAD_POOL_SIZE = 100


logger = logging.getLogger(__file__)

redis = StrictRedis(
    host=REDIS_HOST,
    port=REDIS_PORT,
    password=REDIS_AUTH,
    db=REDIS_DB
)

redis_read = StrictRedis(
    host=REDIS_HOST,
    port=REDIS_PORT,
    password=REDIS_AUTH,
    db=REDIS_READ_DB
)


end_date = datetime.now()
begin_date = end_date + timedelta(days=-1)


# begin_timestamp, end_timestamp = int(begin_date.timestamp() * 1000), int(end_date.timestamp() * 1000)
begin_timestamp, end_timestamp = int((time.time() - 60 * 60 * 24) * 1000), int(time.time() * 1000)

logger.info('begin_date=%s' % begin_date)
logger.info('end_date=%s' % end_date)


def get_spark_context(level='ERROR'):
    spark_conf = SparkConf()
    jars = [os.path.join(JARS_PATH, f) for f in os.listdir(JARS_PATH)]
    spark_conf.set('spark.jars', ','.join(jars))
    logger.info(','.join(jars))
    # spark_conf.set('spark.num.executors', '1')
    spark_conf.set('spark.executor.cores', '1')
    spark_conf.set('spark.cores.max', '1')
    spark_conf.set('spark.executor.memory', '2g')
    spark_context = SparkContext(
        master=SPARK_HOST,
        appName=SPARK_NAME,
        conf=spark_conf
    )
    spark_context.setLogLevel(level)
    return spark_context


def get_spark_session(spark_context):
    return SparkSession(spark_context).newSession()


def get_spark_rdd(spark_session):
    hbase_conf = {
        'hbase.zookeeper.quorum': HBASE_HOST,
        'hbase.mapreduce.inputtable': HBASE_TABLE,
        'hbase.mapreduce.scan.timerange.start': str(begin_timestamp),
        'hbase.mapreduce.scan.timerange.end': str(end_timestamp)
    }
    # hbase.mapreduce.scan.row.start
    # hbase.mapreduce.scan.row.stop
    # hbase.mapreduce.scan.column.family
    # hbase.mapreduce.scan.columns
    # hbase.mapreduce.scan.timestamp
    # hbase.mapreduce.scan.timerange.start
    # hbase.mapreduce.scan.timerange.end
    # hbase.mapreduce.scan.maxversions
    # hbase.mapreduce.scan.cacheblocks
    # hbase.mapreduce.scan.cachedrows
    # hbase.mapreduce.scan.batchsize
    return spark_session.sparkContext.newAPIHadoopRDD(
        inputFormatClass='org.apache.hadoop.hbase.mapreduce.TableInputFormat',
        keyClass='org.apache.hadoop.hbase.io.ImmutableBytesWritable',
        valueClass='org.apache.hadoop.hbase.client.Result',
        keyConverter='org.apache.spark.examples.pythonconverters.ImmutableBytesWritableToStringConverter',
        valueConverter='org.apache.spark.examples.pythonconverters.HBaseResultToStringConverter',
        conf=hbase_conf
    )


def _handler_row_score(row_dict):
    """ 处理数据分数
    :param row_dict:
        action_id
            1 刷文章列表
            2 查看文章
            3 评论文章
            4 收藏文章
            5 退出文章详情
            6 打开app
            7 隐藏app
            8 取消收藏
            9 展开摘要
           10 收起摘要
    :return: row_dict
    """
    current_time = time.time()
    timestamp = row_dict.get('timestamp')
    action_id = row_dict.get('action_id')
    action_score_dict = {
        # 1: -1.0,
        2: 5.0,
        3: 10.0,
        4: 20.0,
        8: -2.0,
        9: 2.0
    }
    row_dict['score'] = 0.001
    if action_id not in action_score_dict:
        row_dict['article_id'] = 0
        return row_dict
    time_weight = 0.9 ** ((current_time - timestamp) / 86400)
    row_dict['score'] = action_score_dict[action_id] * time_weight
    return row_dict


def callback_parse_row(rows):
    row_dict = dict()
    row_dict_list = [json.loads(row) for row in rows]
    for row in row_dict_list:
        key = row['qualifier']
        value = row['value']
        row_dict[key] = value
    row_dict['user_id'] = int(row_dict['user_id'])
    row_dict['action_id'] = int(row_dict['action_id'])
    row_dict['article_id'] = int(row_dict['article_id'])
    row_dict['timestamp'] = int(row_dict['timestamp'])
    if 'content' not in row_dict:
        row_dict['content'] = ''
    return _handler_row_score(row_dict)


def to_data_frame(rdd):
    frame_dict_split = rdd.map(lambda x: (x[0], x[1].split('\n')))
    frame_dict_cols = frame_dict_split.map(lambda x: (x[0], callback_parse_row(x[1])))
    col_names = ['row_key'] + frame_dict_cols.map(lambda x: [i for i in x[1]]).take(1)[0]
    return frame_dict_cols.map(lambda x: [x[0]] + [x[1][i] for i in x[1]]).toDF(col_names)


def handler_data_frame(data_frame):
    new_data_frame = data_frame\
        .filter('user_id > 0 and article_id > 0')\
        .groupBy('user_id', 'article_id')\
        .agg({'score': 'sum', 'action_id': 'max'})
    return new_data_frame.withColumnRenamed('sum(score)', 'score')


def recommend(data_frame):
    als = ALS(
        userCol="user_id", itemCol="article_id", ratingCol="score",
        regParam=0.01, rank=20, seed=12, implicitPrefs=True
    )
    model = als.fit(data_frame)
    users = data_frame.select(als.getUserCol())
    return model.recommendForUserSubset(users, MAX_RECOMMEND_NUM)


def handler_storage(row_dict):
    user_id = row_dict['user_id']
    user_key = REDIS_USER_KEY % user_id
    user_read_key = REDIS_USER_READ_KEY % user_id
    values = []
    data_list = row_dict['recommendations']
    for (article_id, score) in data_list:
        if score == 0.0:
            continue
        if redis_read.sismember(user_read_key, article_id):
            continue
        values.append(score)
        values.append(article_id)
    if not values:
        return
    redis.zadd(user_key, *values)
    count = redis.zcard(user_key)
    if count <= MAX_RECOMMEND_QUEUE_NUM:
        return
    all_values = redis.zrange(user_key, 0, -1, True)
    if len(all_values) <= MAX_RECOMMEND_QUEUE_NUM:
        return
    rem_values = all_values[MAX_RECOMMEND_QUEUE_NUM:]
    redis.zrem(user_key, *rem_values)


def main():
    spark_context = get_spark_context()
    spark_session = get_spark_session(spark_context)
    try:
        spark_rdd = get_spark_rdd(spark_session)
        spark_rdd.cache()
        count = spark_rdd.count()
        logger.info('data_count----------------------------%s' % count)
        if count == 0:
            return
        data_frame = to_data_frame(spark_rdd)
        data_frame.printSchema()
        data_frame.show(n=10)
        logger.info('data_frame--------------------------------------------------------')
        logger.info(data_frame.groupBy('action_id').count().collect())
        new_data_frame = handler_data_frame(data_frame)
        new_data_frame.printSchema()
        new_data_frame.show(n=100)
        logger.info('new_data_frame--------------------------------------------------------')
        result_data_frame = recommend(new_data_frame)
        result_data_frame.printSchema()
        result_data_frame.show(n=10)
        logger.info('result_data_frame--------------------------------------------------------')
        # rows = result_data_frame.collect()
        rows = result_data_frame.toLocalIterator()
        with ThreadPoolExecutor(THREAD_POOL_SIZE) as pool:
            pool.map(handler_storage, rows)
    finally:
        spark_session.stop()
        spark_context.stop()


if __name__ == '__main__':
    main()
