# -*- coding: utf-8 -*-


from __future__ import division
import sys
sys.path.append('../')
from datetime import datetime as dtime
import pandas as pd
import numpy as np
import time
import re
import os
from sklearn.preprocessing import (
    scale,
    minmax_scale,
    normalize
)
from db.default import default_data_output_file

# debug settings
pd.set_option('display.max_rows', None)
np.set_printoptions(threshold=np.NaN)


def __calc_avg_fans_rise_rate(data, scaler=True):
    """计算粉丝平均增长率, 单位是天"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    if scaler:
        return scale(data['n_fans'] / data['days'])
    else:
        return data['n_fans'] / data['days']

        

def __calc_avg_tweets_rise_rate(data, scaler=True):
    """计算发博平均量, 单位是天"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    if scaler:
        return scale(data['tweets']/data['days'])
    else:
        return data['tweets'] / data['days']
    


def __calc_avg_follows_rise_rate(data, scaler=True):
    """计算关注增长率, 单位是天"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    if scaler:
        return minmax_scale(data['n_follows']/data['days'])
    else:
        return data['n_follows'] / data['days']


def __calc_sunshine(data, scaler=True):
    """转化阳光信用积分"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    for score,key in enumerate(['暂无信用分', '信用极低','信用较低', '信用一般', '信用较好', '信用极好']):
        data['v_sunshine'][data['v_sunshine'] == key] = score
    return data
    

def __calc_same_blog_time_count(data):
    """计算微博发布时间重复率"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    rsl = pd.DataFrame(columns=['_id', 'time_repeats'])
    for user_and_tweets in data.values:
        userid = user_and_tweets[0]
        tweets = eval(user_and_tweets[1])
        tweet_times = list(map(lambda ele: ele[1].split('\xa0')[0].strip(), tweets))
        repeats = len(tweet_times) - len(set(tweet_times))
        rsl = rsl.append({'_id': userid, 'time_repeats': repeats }, True)
    return rsl


def __calc_same_string_in_blog(data):
    """统计相同串码的微博重复数"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    strmatch = re.compile(r'[A-Z]{3}[0-9]{3}')
    rsl = pd.DataFrame(columns=['_id', 'label_repeats'])
    for user_and_tweets in data.values:
        userid = user_and_tweets[0]
        tweets = eval(user_and_tweets[1])
        labels = [strmatch.findall(tweet)[0]+':='+rtime.split('\xa0')[0].strip()
                    for tweet,rtime in tweets if strmatch.findall(tweet)]
        repeats = len(labels) - len(set(labels))
        rsl = rsl.append({'_id': userid, 'label_repeats': repeats}, True)
    return rsl
    

def __calc_follow_fan_ratio(data, epsilon=1e-3, alpha=1, scaler=True):
    """计算关注和粉丝比"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    if scaler:
        return scale((data['n_follows'] + alpha) / (data['n_fans'] + epsilon))
    else:
        return (data['n_follows'] + alpha) / (data['n_fans'] + epsilon)


def __calc_fans_feature(data, scaler=True):
    """计算粉丝分布特征 -- 替代粉丝数"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    return minmax_scale(data['n_fans'])


def __calc_follows_feature(data, scaler=True):
    """计算关注分布特征 -- 替代关注数"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    if scaler:
        return scale(data['n_follows'])
    else:
        return data['n_follows'] - np.mean(data['n_follows'])


def __calc_tweets_feature(data, scaler=True):
    """计算微博数分布特征 -- 替代微博数"""

    assert isinstance(data, pd.DataFrame), '"Data" must be a pandas.DataFrame object'
    if scaler:
        return minmax_scale(data['tweets'])
    else:
        return data['tweets'] - np.mean(data['tweets'])


def __calc_days(data):    
    """计算注册到当前的时间差值 -- 替代注册日期"""

    now = dtime.fromtimestamp(int(time.time())).date()
    days = list(map(lambda regtime: (now - dtime.fromtimestamp(regtime).date()).days, data['regtime']))
    return pd.DataFrame(days, columns=['days']) 


def __calc_at_ratio(data, scaler=False):
    """计算总评论中@的比例"""

    rsl = pd.DataFrame(columns=['_id', 'at_ratio'])
    at_find = re.compile('@')
    for userid, tweets in data[['_id', 'o_tweets']].values:
        at_ratio = len(at_find.findall(tweets))/len(eval(tweets))
        rsl = rsl.append({'_id': userid, 'at_ratio': at_ratio}, True)
    if scaler:
        pass
    else:
        return rsl


def __calc_last_tweet_long(data, scaler=True):
    """计算最后一条发表的原创微博距离现在的时间, 时间以3月15号为截止"""

    rsl = pd.DataFrame(columns=['_id', 'last_tweet_long'])
    now = dtime.strptime('2018-3-15', '%Y-%m-%d').date()
    for userid, tweets in data[['_id', 'o_tweets']].values:
        tweets = eval(tweets)
        datestr = tweets[0][1].split('\xa0')[0]
        if '年' in datestr:
            last_tweet_time = dtime.strptime(datestr, '%Y年%m月%d日 %H:%M').date()
        elif '今天' in datestr or '分钟前' in datestr:
            last_tweet_time = 0
        elif '-' in datestr:
            last_tweet_time = dtime.strptime(datestr, '%Y-%m-%d %H:%M:%S').date()
        else:
            last_tweet_time = dtime.strptime('2018年'+datestr, '%Y年%m月%d日 %H:%M').date()
        diff_data = ((now - last_tweet_time).days) if last_tweet_time else last_tweet_time
        rsl = rsl.append({'_id': userid, 'last_tweet_long': diff_data}, True)

    if scaler:
        rsl['last_tweet_long'] = minmax_scale(rsl['last_tweet_long'].values)
    return rsl


def __do(sources):
    """处理元数据的封装函数
    
    Arguments:
        source {str} -- 元数据文件路径
    
    Returns:
        pd.DataFrame
    """

    ones = sources[['_id', 'n_fans', 'tweets', 'n_follows']]
    days = __calc_days(sources[['_id', 'regtime']])
    ones = pd.concat([ones, days], axis=1)
    fans_rate = __calc_avg_fans_rise_rate(ones)
    follows_rate = __calc_avg_follows_rise_rate(ones)
    tweets_rate = __calc_avg_tweets_rise_rate(ones)

    follow_square_fan_ratio = __calc_follow_fan_ratio(ones)
    sunshine = __calc_sunshine(sources[['_id', 'v_sunshine']])
    labels_count = __calc_same_string_in_blog(sources[['_id', 'o_tweets']])
    same_time_count = __calc_same_blog_time_count(sources[['_id', 'o_tweets']].copy())
    at_ratio = __calc_at_ratio(sources[['_id', 'o_tweets']])
    # last_tweet_long = __calc_last_tweet_long(sources[['_id', 'o_tweets']])

    sources.drop('regtime', axis=1, inplace=True)
    sources.drop('o_tweets', axis=1, inplace=True)
    sources.drop('v_sunshine', axis=1, inplace=True)
    sources.insert(loc=sources.shape[1], column='fan_increate_rate', value=fans_rate)
    sources.insert(loc=sources.shape[1], column='follow_increase_rate', value=follows_rate)
    sources.insert(loc=sources.shape[1], column='tweet_increase_rate', value=tweets_rate)
    sources.insert(loc=sources.shape[1], column='follow_fan_ratio', value=follow_square_fan_ratio)
    sources = pd.merge(sources, sunshine, how='inner')
    sources = pd.merge(sources, labels_count, how='inner')
    sources = pd.merge(sources, same_time_count, how='inner')
    sources = pd.merge(sources, at_ratio, how='inner')
    # sources = pd.merge(sources, last_tweet_long, how='inner')
    return sources


def prepare_svm_data(source):
    """获取训练数据并返回处理实例"""
    assert os.path.exists(source), 'File not found at %s' % source
    assert os.path.isfile(source), '%s is not a valid file' % source
    sources = pd.read_csv(source)
    return __do(sources)


def prepare_svm_data_output(source, dest=default_data_output_file):
    """处理训练数据并输出到指定文件
    
    Arguments:
        source {str} -- 要处理的元数据文件路径
    
    Keyword Arguments:
        dest {str} -- 处理后输出的文件路径 (default: {default_data_output_file})
    """

    rsl = get_smv_train_data(source)
    rsl.to_csv(dest, index=None)
    print('Output into %s' % dest)
    # exit()

