#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# 输出正常url的host，保存到host_from_regular.txt
# 输出短地址的url，将其保存到ShortUrl.txt
# Author:fwh

from elasticsearch import Elasticsearch
import elasticsearch
import re
from urllib.parse import urlparse #python3 only
import datetime

#project_name=input("请输入所要查询的项目：")
def count_es(es):
    return es.count(index="resultdb", doc_type="result",
                    body={'query': {'bool': {'must': {'term': {'project': 'xuanwulab'}}}}}
                    ).get('count', 0)

#降序排列，确保新增数据（时间日期较新者）在前
def select(es, fields=None, offset=0, limit=0):
    for record in es.search(index="resultdb", doc_type="result",
                            body={'query':{'bool':{'must': {'term': {'project': 'xuanwulab'}}}},'sort':{'result.date':{"order":"desc"}}},
                            #body={"fields":["_parent","_source"],"query":{"bool":{"must":[],"must_not":[],"should":[]}},"from":0,"size":50,"sort":[{"vul_id":{"order":"desc"}}],"aggs":{},"version":True},
                            _source_include=fields or [],
                            from_=offset, size=limit).get('hits', {}).get('hits', []):
        yield record['_source']

if __name__ == '__main__':
    es = Elasticsearch(hosts="http://192.168.10.1:9200")
    try:
        count = count_es(es)
        size = 100
        (nround, nlast) = divmod(count, size)
        print('Task divided into %d rounds (%d items each round), with a tail of %d' % (nround, size, nlast))

        round_counter = 0
        host_list = []
        url_list = []
        while round_counter < nround:
            # if round_counter == 2:
            #     break
            try:
                result = list(select(es, offset= round_counter*size, limit=size))
            except elasticsearch.exceptions.ConnectionTimeout:
                print('elasticsearch.exceptions.ConnectionTimeout')
            except elasticsearch.exceptions.ConnectionError:
                print('elasticsearch.exceptions.ConnectionError')
            except elasticsearch.exceptions.TransportError:
                print('elasticsearch.exceptions.TransportError,maybe need to adjust max_result_window value')

            for item in result:
                content = item['result']['content']
                #regexpression refer to http://www.myexception.cn/HTML-CSS/639814.html
                url = re.findall(r''' <a(\s*)(.*?)(\s*)href(\s*)=(\s*)([\"\s]*)([^\"\']+?)([\"\s]+)(.*?)> ''', content,	 re.S | re.I)
                for u in url:
                    try:
                        #https://www.cnblogs.com/itlqs/p/6055365.html
                        hostname = urlparse(u[6]).netloc
                        if hostname in ['t.co','bit.ly','goo.gl','ow.ly','buff.ly','cmcc.in','url.cn']:
                            url_list.append(u[6])
                        else:
                            host_list.append(hostname)
                    except Exception as e:
                        print(e)
            round_counter += 1

        file_url = open('E:\Hosts\\xuanwulab\ShortUrl.txt', 'w')
        for u in url_list:
            file_url.write(u + '\n')
        file_url.close()

        file_host=open('E:\Hosts\\xuanwulab\host_from_regular.txt','w')
        for host in host_list:
            file_host.write(host+'\n')
        file_host.close()

    except Exception as e:
        print(e)

