# -*- coding: utf-8 -*-
import codecs
import sys

from elasticsearch import Elasticsearch

reload(sys)
sys.setdefaultencoding('utf8')

"""
    库中每一张图片tag信息作为一个文档, query分词生成tokens, 统计每个token在多少文档中出现过(即df) 
"""


def check_contain_chinese(query):
    for ch in query:
        if u'\u4e00' <= ch <= u'\u9fff':
            return True
    return False


# es客户端
es = Elasticsearch([{'host': '10.24.52.220', 'port': 9200}, ], verify_certs=False, sniff_on_start=False,
                   sniff_on_connection_fail=False, sniffer_timeout=0)

with codecs.open('../data/token_average_length.csv', 'r', encoding='utf8') as input_file, \
        codecs.open('token_df.csv', 'w', encoding='utf8') as output_file:
    for line in input_file:
        item = line.strip('\n').split(',')
        token = item[0]

        if not check_contain_chinese(token):
            continue

        search_times = item[1]
        query = {
            "fields": [],
            "size": 1,
            "query": {
                "bool": {
                    "filter": [{
                        "term": {
                            "status": 0
                        }
                    }, {
                        "multi_match": {
                            "query": token,
                            "fields": [
                                "keyword",
                                "title",
                                "remark",
                                "keyword2"
                            ],
                            "type": "best_fields",
                            "operator": "AND",
                            "analyzer": "ik_smart"
                        }
                    }, {
                        "term": {
                            "enable_toutiaohao": 1
                        }
                    }]
                }
            }
        }
        response = es.search(index="buda", doc_type="all", body=query)
        output_file.write(token + "#" + search_times + "#" + str(response['hits']['total']) + "\n")
