# encoding=utf-8
import pymysql
import jieba.analyse
from news_spiders.news_spiders import settings
from datetime import datetime, timedelta
import os
import time
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
import re
import sys
# sys.path.append(os.path.dirname(__file__))
sys.path.append("..")


os.chdir(os.path.dirname(__file__))
jieba.load_userdict('./userdict.txt')
jieba.analyse.set_stop_words('./stopwords_cn.txt')


def load_text(key):    # 返回数据库中读取的内容
    content = ''
    time_list = []
    db = pymysql.connect(host=settings.MYSQL_HOST, user=settings.MYSQL_USER,
                         passwd=settings.MYSQL_PASSWD, db=settings.MYSQL_DBNAME, charset='utf8')

    # 使用cursor()方法获取操作游标
    cursor = db.cursor()
    try:
        # SQL 查询语句,同时查所有新闻
        sql = "SELECT content,date FROM `%s`" % (key.replace(" ", "_") + '_sinanews')

        # 执行SQL语句
        cursor.execute(sql)
        # 获取所有记录列表
        results = cursor.fetchall()

        for it in results:
            content += it[0]
            time_list.append(time.mktime(it[1].timetuple()))
    except Exception as e:
        print('[SQL 查询出错！]')

    try:
        # SQL 查询语句,同时查所有新闻
        sql = "SELECT content,date FROM `%s`" % (key.replace(" ", "_") + '_neteasenews')

        # 执行SQL语句
        cursor.execute(sql)
        # 获取所有记录列表
        results = cursor.fetchall()

        for it in results:
            content += it[0]
            time_list.append(time.mktime(it[1].timetuple()))

    except Exception as e:
        print('[SQL 查询出错！]')
    return content, time_list


# 使用MeanShift聚类算法，根据时间去噪
def date_analyse(dateList):
    dataSet = np.array(dateList)
    dataSize = len(dataSet)
    print(dataSize)

    # The following bandwidth can be automatically detected using
    X = dataSet.reshape(-1, 1)
    bandwidth = estimate_bandwidth(X, quantile=0.8, n_samples=500)
    print(bandwidth)
    ms = MeanShift(bandwidth=bandwidth, bin_seeding=False)
    ms.fit(X)
    labels = ms.labels_

    labels_unique = np.unique(labels)
    # print(labels)
    n_clusters_ = len(labels_unique)

    print("Number of clusters : %d" % n_clusters_)

    # 进行分类

    clusters_array = []
    final_array = []
    for k in range(0, n_clusters_):
        arr = []
        for index in np.where(labels == k):
            arr.append(dataSet[index])
        clusters_array.append(arr[0])
        # 去除元素较少的簇
        if len(arr[0]) > dataSize / 15:
            final_array.extend(arr[0])
        print('label %d: %d' % (k, len(arr[0])))

    # print(clusters_array)
    # 过滤后的结果
    print(len(final_array))
    delta = timedelta(days=10)
    max_date = datetime.fromtimestamp(max(final_array)) + delta
    if max_date > datetime.now():
        max_date = datetime.now()
    min_date = datetime.fromtimestamp(min(final_array)) - delta
    max_date_str = max_date.strftime("%Y%m%d")
    min_date_str = min_date.strftime("%Y%m%d")
    print(min_date_str + ' - ' + max_date_str)
    return [min_date_str, max_date_str]


# 提取关键词 返回包含5个关键词的列表,以及时间区间
def get_keywords(keys, key=''):
    sentence = ''
    dateList = []
    daterange = []
    hot_words = []
    words_value = []

    if key != '':
        keys = [key]
    print(keys)
    for k in keys:
        try:
            s, d = load_text(k)
            sentence += s
            dateList += d
            print(dateList)
        except Exception as e:
            print('load_text error')

    sentence = re.sub(r'[0-9]|/d+', '', sentence)
    for x, w in jieba.analyse.extract_tags(sentence, topK=40, withWeight=True):
        print('%s %s' % (x, w))
        hot_words.append(x)
        words_value.append(w)

    if len(dateList) < 3:
        return hot_words, words_value, daterange
    daterange = date_analyse(dateList)
    return hot_words, words_value, daterange

# 返回json
def get_topwords(keys, key=''):
    response = {"nodes": [], "links": [], "categories": []}

    [hot_words, words_value, daterange] = get_keywords(keys, key)
    response['daterange'] = daterange
    size = len(hot_words)
    for i in range(size):
        node = {"id": str(i),
                "name": hot_words[i],
                "symbolSize": words_value[i] * 100 + 40,
                "value": words_value[i] * 100 + 40,
                "category": i // 10}
        response['nodes'].append(node)
    for i in range(size // 10):
        response['categories'].append({'name': str(i)})
    return response


if __name__ == "__main__":
    print(get_keywords('新疆棉花'))
