# -*- coding: utf-8 -*-


from __future__ import division
import sys
sys.path.append('../')
from datetime import datetime as dtime
import pandas as pd
import numpy as np
import time
import re
import os
from sklearn.preprocessing import (
    scale,
    minmax_scale,
    normalize
)
from sklearn.utils import shuffle
from db.default import svm_data_output_file
from utils.load_data import load_data
from utils.progressbar import SingleStepProgressbar

# debug settings
# pd.set_option('display.max_rows', None)
# np.set_printoptions(threshold=np.NaN)
sunshine_mapping = dict(暂无信用分=0, 信用极低=1, 信用较低=2, 信用一般=3, 信用较好=4, 信用极好=5)


def __calc_avg_fans_rise_rate(data):
    """计算粉丝平均增长率, 单位是天"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    return data['n_fans'] / data['days']


def __calc_avg_tweets_rise_rate(data):
    """计算发博平均量, 单位是天"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    return data['tweets'] / data['days']


def __calc_avg_follows_rise_rate(data):
    """计算关注增长率, 单位是天"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    return data['n_follows'] / data['days']


def __calc_sunshine(data):
    """转化阳光信用积分"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    rsl = pd.DataFrame(columns=data.columns)
    score_mapping = dict(暂无信用分=0, 信用极低=1, 信用较低=2, 信用一般=3, 信用较好=4, 信用极好=5)
    for i in range(data.shape[0]):
        ele = data.iloc[i]
        rsl = rsl.append({'_id': ele['_id'], 'v_sunshine': score_mapping[ele['v_sunshine']]}, True)
    return rsl
    

def __calc_same_blog_time_count(data):
    """计算微博发布时间重复率"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    rsl = pd.DataFrame(columns=['_id', 'time_repeats'])
    for user_and_tweets in data.values:
        userid = user_and_tweets[0]
        tweets = eval(user_and_tweets[1])
        tweet_times = list(map(lambda ele: ele[1].split('\xa0')[0].strip(), tweets))
        repeats = len(tweet_times) - len(set(tweet_times))
        rsl = rsl.append({'_id': userid, 'time_repeats': repeats }, True)
    return rsl


def __calc_same_string_in_blog(data):
    """统计相同串码的微博重复数"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    strmatch = re.compile(r'[A-Z]{3}[0-9]{3}')
    rsl = pd.DataFrame(columns=['_id', 'label_repeats'])
    for user_and_tweets in data.values:
        userid = user_and_tweets[0]
        tweets = eval(user_and_tweets[1])
        labels = [strmatch.findall(tweet)[0]+':='+rtime.split('\xa0')[0].strip()
                    for tweet,rtime in tweets if strmatch.findall(tweet)]
        repeats = len(labels) - len(set(labels))
        rsl = rsl.append({'_id': userid, 'label_repeats': repeats}, True)
    return rsl
    

def __calc_follow_fan_ratio(data, epsilon=1e-3, alpha=1):
    """计算关注和粉丝比"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    return (data['n_follows'] + alpha) / (data['n_fans'] + epsilon)


def __calc_days(data):    
    """计算注册到当前的时间差值 -- 替代注册日期"""

    now = dtime.fromtimestamp(int(time.time())).date()
    days = list(map(lambda regtime: (now - dtime.fromtimestamp(regtime).date()).days, data['regtime']))
    return pd.DataFrame(days, columns=['days']) 


def __calc_at_ratio(data, scaler=False):
    """计算总评论中@的比例"""

    rsl = pd.DataFrame(columns=['_id', 'at_ratio'])
    at_find = re.compile('@')
    for userid, tweets in data[['_id', 'o_tweets']].values:
        at_ratio = len(at_find.findall(tweets))/len(eval(tweets))
        rsl = rsl.append({'_id': userid, 'at_ratio': at_ratio}, True)
    return rsl


def __calc_last_tweet_long(data):
    """计算最后一条发表的原创微博距离现在的时间, 时间以3月15号为截止"""

    rsl = pd.DataFrame(columns=['_id', 'last_tweet_long'])
    now = dtime.strptime('2018-3-15', '%Y-%m-%d').date()
    for userid, tweets in data[['_id', 'o_tweets']].values:
        tweets = eval(tweets)
        datestr = tweets[0][1].split('\xa0')[0]
        if '年' in datestr:
            last_tweet_time = dtime.strptime(datestr, '%Y年%m月%d日 %H:%M').date()
        elif '今天' in datestr or '分钟前' in datestr:
            last_tweet_time = 0
        elif '-' in datestr:
            last_tweet_time = dtime.strptime(datestr, '%Y-%m-%d %H:%M:%S').date()
        else:
            last_tweet_time = dtime.strptime('2018年'+datestr, '%Y年%m月%d日 %H:%M').date()
        diff_data = ((now - last_tweet_time).days) if last_tweet_time else last_tweet_time
        rsl = rsl.append({'_id': userid, 'last_tweet_long': diff_data}, True)
    return rsl


def __do(sources):
    """处理元数据的封装函数
    
    Arguments:
        source {str} -- 元数据文件路径
    
    Returns:
        pd.DataFrame
    """

    p = SingleStepProgressbar(100, 6, 'n_Ding~')
    p.start()
    ones = sources[['_id', 'n_fans', 'tweets', 'n_follows']]
    days = __calc_days(sources[['_id', 'regtime']])
    ones = pd.concat([ones, days], axis=1)
    p.forward()

    fans_rate = __calc_avg_fans_rise_rate(ones)
    follows_rate = __calc_avg_follows_rise_rate(ones)
    tweets_rate = __calc_avg_tweets_rise_rate(ones)
    follow_square_fan_ratio = __calc_follow_fan_ratio(ones)
    p.forward()

    sunshine = __calc_sunshine(sources[['_id', 'v_sunshine']])
    labels_count = __calc_same_string_in_blog(sources[['_id', 'o_tweets']])
    same_time_count = __calc_same_blog_time_count(sources[['_id', 'o_tweets']].copy())
    at_ratio = __calc_at_ratio(sources[['_id', 'o_tweets']])
    p.forward()
    # last_tweet_long = __calc_last_tweet_long(sources[['_id', 'o_tweets']])

    sources.drop('regtime', axis=1, inplace=True)
    sources.drop('o_tweets', axis=1, inplace=True)
    sources.drop('v_sunshine', axis=1, inplace=True)
    p.forward()
    sources.insert(loc=sources.shape[1], column='fan_increate_rate', value=fans_rate)
    sources.insert(loc=sources.shape[1], column='follow_increase_rate', value=follows_rate)
    sources.insert(loc=sources.shape[1], column='tweet_increase_rate', value=tweets_rate)
    sources.insert(loc=sources.shape[1], column='follow_fan_ratio', value=follow_square_fan_ratio)
    p.forward()
    sources = pd.merge(sources, sunshine, how='inner')
    sources = pd.merge(sources, labels_count, how='inner')
    sources = pd.merge(sources, same_time_count, how='inner')
    sources = pd.merge(sources, at_ratio, how='inner')
    p.forward()
    # sources = pd.merge(sources, last_tweet_long, how='inner')
    return sources


def prepare_svm_data(source):
    """获取训练数据并返回处理实例"""
    assert os.path.exists(source), 'File not found at %s' % source
    assert os.path.isfile(source), '%s is not a valid file' % source
    sources = load_data(source)
    assert 'o_tweets' in sources.columns,'%s is not meta data file' % source
    return __do(sources)


def mix_train_data_and_output(metasource, cluster_rsl_file, ratio):
    """处理原始数据集为SVM训练数据集, 
    
    并根据聚类网络输出结果文件,为原始无标签数据添加相应的正负标签
    
    Arguments:
        metasource {str} -- 原始数据集本地存储路径
        cluster_rsl_file {str} -- 聚类结果数据集本地存储路径
    """

    svm_data = prepare_svm_data(metasource)     # 加载原始数据并处理成svm数据格式
    labels = pd.read_csv(cluster_rsl_file, usecols=['_id'])  # 加载正样本结果
    p_n = labels.shape[0]  # 正样本数量
    n_n = round(p_n/ratio) # 负样本数量

    isin = svm_data['_id'].astype(labels['_id'].dtype).isin(labels['_id'])
    n_meta_data = svm_data[-isin]  # 剔除正样本后的总负样本数据
    n_data = n_meta_data.sample(n=n_n)      # 根据正负样本比值得到的最终负样本数据
    p_data = svm_data[isin]  # 根据正样本从总数据中提取正样本数据
    n_data['label'] = 0     # 给所有负样本添加标签 0 
    p_data['label'] = 1     # 给所有负样本添加标签 1

    raw_data = pd.concat([n_data, p_data], axis=0, ignore_index=True)
    rsl = shuffle(raw_data)
    rsl.to_csv(svm_data_output_file, index=None)
    print('Svm 训练数据处理完毕, 数据文件输出到 %s' % svm_data_output_file)
    exit()


def label_navy_data(metasource, navydata, ratio=1):
    """根据水军聚类样本数据集文件, 从原始数据集抽取水军数据, 并转换成svm可用格式
    
    Arguments:
        metasource {str} -- 原始数据集路径
        navydata {str} -- 水军数据集路径
    """
    # import random
    # n = random.random()
    raw_data = prepare_svm_data(metasource)
    navy_data = pd.read_csv(navydata, usecols=['_id'])
    n_p = round(navy_data.shape[0] * ratio)
    isin = raw_data['_id'].astype(navy_data['_id'].dtype).isin(navy_data['_id'])
    raw_rsl = raw_data[isin]
    raw_rsl['label'] = 1
    pizzle = raw_data[-isin]
    pizzle = pizzle.sample(n=n_p)
    pizzle['label'] = 0
    rsl = pd.concat([raw_rsl, pizzle], axis=0, ignore_index=True)
    return shuffle(rsl)



# ****************************************** 处理单个用户数据
def calc_one_data(data):
    assert isinstance(data, pd.DataFrame)
    
    one = data[['_id', 'tweets', 'n_follows', 'n_fans']]

    day = __calc_days(data[['_id', 'regtime']])
    one = pd.concat([one, day], axis=1)

    fan_rate = __calc_avg_fans_rise_rate(one)
    follow_rate = __calc_avg_follows_rise_rate(one)
    tweet_rate = __calc_avg_tweets_rise_rate(one)
    follow_square_fan_ratio = __calc_follow_fan_ratio(one)

    sunshine = __calc_sunshine(data[['_id', 'v_sunshine']])
    labels_count = __calc_same_string_in_blog(data[['_id', 'o_tweets']])
    same_time_count = __calc_same_blog_time_count(data[['_id', 'o_tweets']])
    at_ratio = __calc_at_ratio(data[['_id', 'o_tweets']])

    data.drop('regtime', axis=1, inplace=True)
    data.drop('o_tweets', axis=1, inplace=True)
    data.drop('v_sunshine', axis=1, inplace=True)

    data.insert(loc=data.shape[1], column='fan_increate_rate', value=fan_rate)
    data.insert(loc=data.shape[1], column='follow_increase_rate', value=follow_rate)
    data.insert(loc=data.shape[1], column='tweet_increase_rate', value=tweet_rate)
    data.insert(loc=data.shape[1], column='follow_fan_ratio', value=follow_square_fan_ratio)

    data = pd.merge(data, sunshine, how='inner')
    data = pd.merge(data, labels_count, how='inner')
    data = pd.merge(data, same_time_count, how='inner')
    data = pd.merge(data, at_ratio, how='inner')

    return data

