#!/usr/bin/env python
# -*- encoding: utf-8 -*-

import threading
from elasticsearch import Elasticsearch
import re
import click
import logging
import time

logging.getLogger().setLevel(logging.INFO)
queries = []
mutex = threading.Lock()


@click.command()
@click.option('--config', help='configuration file of keywords', required=1)
@click.option('--host', help='elasticsearch host address', default='http://127.0.0.1:9200/',  required=1)
def main(config, host):
    readfile(config)
    run_once(host)


def run_once(host):
    global queries
    threads = []
    for query in queries:
        # 每个关键词启动一个线程
        t1 = threading.Thread(target=process_group_data, args=(host, query[0], query[1], query[2]))
        t1.setDaemon(True)
        t1.start()
        threads.append(t1)
    for t in threads:
        t.join()


def readfile(config):  #从文件中读出文件内容
    f = file(config)  #打开文件，这里和open效果一样
    global queries
    while True:
        line = f.readline()  #读取每行信息
        if len(line) == 0:
            break
        line = line.strip('\n').decode('utf-8')
        if len(line.split(':')) == 1:
            group_keyword = line.split(':')[0]
            query = {'query': {'match_phrase': {'_all': group_keyword}}}
            queries.append((query, group_keyword, False))
        elif len(line.split(':')) == 2:
            group_keyword = line.split(':')[1]
            if line.split(':')[0] == 'match_phrase':
                query = {'query': {'match_phrase': {'_all': group_keyword}}}
                queries.append((query, group_keyword, False))
            elif line.split(':')[0] == 'regexp':
                query = {'query': {'regexp': {'_all': group_keyword}}}
                queries.append((query, group_keyword, True))
            elif line.split(':')[0] == 'match':
                query = {'query': {'match': {'_all': group_keyword}}}
                queries.append((query, group_keyword, False))
    f.close()


def select_by_keyword(es, query='', fields=None, offset=0, limit=0):
    for record in es.search(index="resultdb", doc_type="result",
                            body=query, _source_include=fields or [],
                            from_=offset, size=limit
                            ).get('hits', {}).get('hits', []):
        yield record['_source']


def get(es, group_keyword, resultid, fields=None):
    ret = es.get(index="groupdb", doc_type="group", id="%s:%s" % (group_keyword, resultid),
                 _source_include=fields or [], ignore=404)
    return ret.get('_source', None)


def insert_group(es, group_keyword, resultid, title, url, project, date):
    obj = {
        'group_keyword': group_keyword,
        'resultid': resultid,
        'title': title,
        'url': url,
        'project': project,
        'date': date,
    }
    return es.index(index="groupdb", doc_type="group",
                    body=obj, id='%s:%s' % (group_keyword, resultid), refresh=True)


def process_group_data(host, query, group_keyword, is_regexp):
    time_format = '%Y-%m-%d'
    now = time.strftime(time_format, time.localtime())
    offset = 0
    limit = 20
    count = 0
    resultes = Elasticsearch(hosts=host)
    groupes = Elasticsearch(hosts=host)

    groupes.indices.create(index='groupdb', ignore=400)
    if not groupes.indices.get_mapping(index='groupdb', doc_type='group'):
        groupes.indices.put_mapping(index='groupdb', doc_type='group', body={
            "_all": {"enabled": True},
            "properties": {
                "group_keyword": {"type": "string", "index": "analyzed"},
                "resultid": {"type": "string", "index": "analyzed"},
            }
        })

    logging.info("thread about keyword %s", group_keyword)
    while True:
        results = select_by_keyword(resultes, query=query, offset=offset, limit=limit)
        results = list(results)
        nmatched = len(results)
        logging.info("loop: keyword %s get %d results", group_keyword, nmatched)
        if nmatched == 0:
            break
        offset += nmatched
        for result in results:
            if not result:
                continue
            project = result['project']
            taskid = result['taskid']
            url = result['url']
            title = result.get('result', {}).get('title', '')
            date = result.get('result', {}).get('date', now)
            resultid = '%s:%s' % (project, taskid)
            temp_list = []  # 用于存储 substr 。
            if is_regexp:
                # 取出匹配正则表达式 group_keyword 的值
                substrs = re.findall(group_keyword, str(result), re.I)
                for mach_keyword in substrs:
                    if temp_list.__contains__(mach_keyword):   # 判断同一个resultid，同一个substr是否已存储过，如存储过，则continue
                        continue
                    if group_keyword.startswith('cve') and not mach_keyword.__contains__('-'):
                        continue
                    count += 1
                    temp_list.append(mach_keyword)
                    try:
                        have_or_not = get(groupes, mach_keyword,resultid)
                    except:
                        have_or_not = None
                    with mutex:
                        if not have_or_not:
                            insert_group(groupes, mach_keyword, resultid, title, url, project, date)
            else:
                count += 1
                try:
                    have_or_not = get(groupes, group_keyword, resultid)
                except:
                    have_or_not = None
                with mutex:
                    if not have_or_not:
                        insert_group(groupes, group_keyword, resultid, title, url, project, date)
    logging.info("keyword %s match total %d results" %(group_keyword, count))

if __name__ == '__main__':
    main()
