#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import sys

reload(sys)
sys.setdefaultencoding('utf8')
import requests
import codecs
import csv

"""
    key: keyword_user_active_length_v2
    数据内容:新库的ik分词器，新的提取关键词的模型,每个词的权重为用户主动搜索的包含该关键词的文档长度的平均值，
    原理是:某个关键词经常出现在短的query中，说明它比较重要。此外有增加的频率信息进行微调（频率小于2的不在考虑范围内)

    计算包含token的query的平均长度
"""

tokens_tf = {}
tokens_total_length = {}

with codecs.open('../data/test.csv', 'r', encoding='utf8') as input_file:
    count = 0
    for line in input_file:
        count += 1
        if count == 1:
            continue

        if count % 1000 == 0:
            print(count)

        item = line.strip('\n').split(',')
        query = item[0].strip('"').strip()
        try:
            response = requests.get("http://10.24.52.220:9200/_analyze?pretty",
                                    data=json.dumps({"analyzer": "ik_smart", "text": query}))
            if 'tokens' not in response.json():
                continue
            tokens = response.json()['tokens']
            if len(tokens) == 0:
                continue

            for token_item in tokens:
                if token_item['token'] in tokens_tf and token_item['token'] in tokens_total_length:
                    tokens_tf[token_item['token']] += 1
                    tokens_total_length[token_item['token']] += len(tokens)
                else:
                    tokens_tf[token_item['token']] = 1
                    tokens_total_length[token_item['token']] = len(tokens)
        except:
            print(query)

print('output' + '.' * 20)

with codecs.open('../data/token_average_length.csv', 'w', encoding='utf-8') as output_file:
    file_header = ['token', 'token_tf', 'token_total_count', 'query_average_length']
    dict_writer = csv.DictWriter(output_file, file_header)
    dict_writer.writeheader()
    count = 0
    for token, tf in tokens_tf.items():
        count += 1
        if count % 1000 == 0:
            print(count)

        dict_writer.writerow(
            {'token': token, 'token_tf': tokens_tf[token], 'token_total_count': tokens_total_length[token],
             'query_average_length': round(tokens_total_length[token] * 1.0 / tokens_tf[token], 2)})
