#!/usr/bin/env python
# -*- encoding: utf-8 -*-

from elasticsearch import Elasticsearch
import elasticsearch
import re
import datetime
from urllib.parse import urlparse #python3 only
import urllib
from urllib import request,error
import socket


project_name=input("请输入所要查询的项目：")
def count_es(es):
    return es.count(index="resultdb", doc_type="result",
                    #body={"query": {"match_all": {}}}
                    #body={'query': {'bool': {'must': {'term': {'project': 'xuanwulab'}}}}}
                    body={'query': {'bool': {'must': {'term': {'project': project_name}}}}}
    #360AnQuanBoBao
    #TheRegister
    #eweek

                    ).get('count', 0)

#降序排列，确保新增数据（时间日期较新者）在前
def select(es, fields=None, offset=0, limit=0):
    for record in es.search(index="resultdb", doc_type="result",
                            #body={'query':{'bool':{'must_not':{'term':{'merge_flag':1}},'must': {'term': {'project': project}}}}},
                            body={'query':{'bool':{'must': {'term': {'project': project_name}}}},'sort':{'result.date':{"order":"desc"}}},
                            #body={"fields":["_parent","_source"],"query":{"bool":{"must":[],"must_not":[],"should":[]}},"from":0,"size":50,"sort":[{"vul_id":{"order":"desc"}}],"aggs":{},"version":True},
                            _source_include=fields or [],
                            from_=offset, size=limit).get('hits', {}).get('hits', []):
        yield record['_source']

# 将短地址转换为真实地址
def GetUrl(str):
    try:
        fp = request.urlopen(str, timeout=1)
        url = fp.geturl()
        return url
    except urllib.error.URLError as e:
        if isinstance(e.reason, socket.timeout):
            return ("There was an error: %r" % e)
        else:
            return e.reason

if __name__ == '__main__':
    es = Elasticsearch(hosts="http://192.168.10.1:9200")
    try:
        count = count_es(es)
        size = 100
        (nround, nlast) = divmod(count, size)
        print('Task divided into %d rounds (%d items each round), with a tail of %d' % (nround, size, nlast))

        round_counter = 0
        lists=[]
        while round_counter < nround:
            if round_counter == 3:
                break
            try:
                result = list(select(es, offset= round_counter*size, limit=size))
            except elasticsearch.exceptions.ConnectionTimeout:
                print('elasticsearch.exceptions.ConnectionTimeout')
            except elasticsearch.exceptions.ConnectionError:
                print('elasticsearch.exceptions.ConnectionError')
            except elasticsearch.exceptions.TransportError:
                print('elasticsearch.exceptions.TransportError,maybe need to adjust max_result_window value')

            for item in result:
                #print(item['result']['title'])
                content = item['result']['content']
                #regexpression refer to http://www.myexception.cn/HTML-CSS/639814.html
                url = re.findall(r''' <a(\s*)(.*?)(\s*)href(\s*)=(\s*)([\"\s]*)([^\"\']+?)([\"\s]+)(.*?)> ''', content,	 re.S | re.I)
                for u in url:
                    #print(u[6])
                    try:
                        #https://www.cnblogs.com/itlqs/p/6055365.html
                        #print(urlparse(u[6]).netloc)
                        hostname = urlparse(u[6]).netloc
                        if hostname in ['t.co','bit.ly','goo.gl','ow.ly','buff.ly','cmcc.in','url.cn']:
                            realurl = GetUrl(u[6])
                            if realurl.startswith('http'):
                                lists.append(urlparse(realurl).netloc)
                            else:
                                print(realurl)
                                #pass
                        else:
                            lists.append(hostname)
                    except:
                        print('URL parse exception')
            round_counter += 1

        #统计不同host的个数并降序排列，最终输出到txt文件
        hosts={}
        for x in lists:
            if x in hosts:
                hosts[x]+=1
            else:
                hosts[x]=1
        #host按照出现频率降序排列
        sorted_hosts=sorted(hosts.items(),key=lambda item:item[1],reverse=True)

        now=datetime.datetime.now()
        file=open('E:\Hosts\host_正则_'+project_name+'_'+now.strftime('%Y%m%d%H%M')+'.txt','w')
        file.write("不同host总数为："+str(len(sorted_hosts))+'\n')
        for i in range(len(sorted_hosts)):
            file.write(str(sorted_hosts[i][0])+": "+str(sorted_hosts[i][-1])+'\n')
        file.close()
    except:
        print('Error in getting count')

