#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date    : 2018-08-21 10:35:51
# @Author  : yangchaojun (YYChildren@gmail.com)
# @Link    : https://git.mingchao.com/yangchaojun
# @Version : 

import pymysql
import pandas as pd
import re
from bs4 import BeautifulSoup
from hanziconv import HanziConv
from zhon import hanzi
import string
punctuation=hanzi.punctuation + string.punctuation
punctuation = set([i for i in punctuation])
import html
import pymysql
import jieba
from tqdm import tqdm
tqdm.pandas()

import nltk
with open('resources/corpus/stopwords-zh.txt', 'rb') as  file:
    stopwords = file.read().decode('utf-8').splitlines()
    stopwords = set(stopwords + nltk.corpus.stopwords.words("english"))
from pyhanlp import HanLP
from mtmodel.utils import store, pd_source
import json

def extract_text(x):
    soup = BeautifulSoup(x, 'html.parser')
    return soup.text

def clear_text(x):
    x = html.unescape(x)
    x = HanziConv.toSimplified(x)
    x = x.strip()
    return x

def remove_punctuation(x):
    x = "".join([i for i in x if i not in punctuation])
    return re.sub(r'\s+', ' ', x)

def get_help_content(x):
    x = "".join([i if i not in punctuation else " " for i in x])
    x = re.sub(r'\s+', ' ', x)
    x = x.strip()
    return x

def distinct_count(sentence):
    return len(set([i for i in sentence]))

def tokenize(x):
    x= [i for i in HanLP.segment(x) if i != ' ']
    return x

def join(L):
    return " ".join([j for i in L for j in i])

sql = "SELECT distinct game_id FROM game_source.s_game_comments_taptap_game WHERE source='taptap'"
games=pd_source.from_sql(sql)

for game_id in tqdm(games['game_id']):
    sql = """
    SELECT source, game_id, game_name, content FROM game_source.s_game_comments_taptap_game where game_id = %s
    """ % (game_id, )
    df = pd_source.from_sql(sql)
    
    df['content'] = df['content'].progress_apply(clear_text)
    df['words'] = df['content'].progress_apply(tokenize)
    df['content'] = df['content'].progress_apply(get_help_content)
    df['clen'] = df['content'].progress_apply(len)
    df['cdlen'] = df['content'].progress_apply(distinct_count)
    df['crepeat_rate']=df['clen'] / df['cdlen']
    df['wlen'] = df['words'].progress_apply(len)
    df['wdlen'] = df['words'].progress_apply(distinct_count)
    df['wrepeat_rate']=df['wlen'] / df['wdlen']

    df_good = df[(df['cdlen'] >= 150) & (df['wrepeat_rate'] < 2)]

    if len(df_good) == 0:
        continue

    df_good['summaries']=df_good['content'].progress_apply(lambda x: HanLP.extractSummary(x, 1, r'[ ]'))
    df_new = df_good[['source', 'game_id', 'game_name', 'summaries']].groupby(['source', 'game_id','game_name']).agg(join).reset_index()
    
    df_new['summaries'] = df_new['summaries'].progress_apply(lambda x: list(HanLP.extractSummary(x, 20, r'[ ]')))

    trg_db='game_process'
    trg_table='c_game_comment_summary'

    for index in tqdm(df_new.index):
        row = df_new.iloc[index]
        game_name = row['game_name']
        summaries = json.dumps(row['summaries'], ensure_ascii=False)
        kv = dict(
            source = str(row['source']),
            game_id = str(row['game_id']),
            game_name = str(row['game_name']), 
            summaries = summaries
            )
        store.to_mysql(kv, trg_db, trg_table)
