#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# 输出正常url的host，保存到host_from_regular.txt
# 输出短地址的url，将其保存到ShortUrl.txt

from elasticsearch import Elasticsearch
import elasticsearch
import re
from urllib.parse import urlparse #python3 only
import datetime

# date_start = input("请输入所要查询的起始时间(XXXX-XX-XX)：")
# date_end = input("请输入所要查询的结束时间(XXXX-XX-XX)：")

def count_es(es):
    # return es.count(index="resultdb", doc_type="result",
    #                 body={'query': {
    #                     'bool': {
    #                         'must': [
    #                             {'term': {'project': 'xuanwulab'}},
    #                             {'range': {'result.date': {"from": date_start, "to": date_end}}}
    #                         ]
    #                     }
    #                 }
    #                 }
    #                 ).get('count', 0)
    return es.count(index="resultdb", doc_type="result",
                    body={'query': {
                        'bool': {
                            'must':
                                {'term': {'project': 'xuanwulab'}}
                        }
                    }
                    }
                    ).get('count', 0)

# 降序排列，确保新增数据（时间日期较新者）在前
def select(es, fields=None, offset=0, limit=0):
    # for record in es.search(index="resultdb",
    #                         doc_type="result",
    #                         body={'query': {
    #                             'bool': {
    #                                 'must': [
    #                                     {'term': {
    #                                         'project': 'xuanwulab'}
    #                                     },
    #                                     {'range': {
    #                                         'result.date': {"from": date_start, "to": date_end}
    #                                     }
    #                                     }
    #                                 ]
    #                             }
    #                         },
    #                             'sort': {
    #                                 'result.date': {
    #                                     "order": "desc"
    #                                 }
    #                             }
    #                         },
    for record in es.search(index="resultdb",
                            doc_type="result",
                            body={'query': {
                                'bool': {
                                    'must':
                                        {'term': {
                                            'project': 'xuanwulab'}
                                        }
                                }
                            },
                                'sort': {
                                    'result.date': {
                                        "order": "desc"
                                    }
                                }
                            },
                            _source_include=fields or [],
                            from_=offset, size=limit).get('hits', {}).get('hits', []):
        yield record['_source']


if __name__ == '__main__':
    es = Elasticsearch(hosts="http://192.168.10.1:9200")
    try:
        count = count_es(es)
        size = 100
        (nround, nlast) = divmod(count, size)
        print('Task divided into %d rounds (%d items each round), with a tail of %d' % (nround, size, nlast))

        round_counter = 0
        url_list1 = []  # 存放host为github.com的短url
        url_list2 = []  # 存放host为github.com的正常url
        while round_counter < nround:
            # if round_counter == 2:
            #     break
            try:
                result = list(select(es, offset=round_counter * size, limit=size))
            except elasticsearch.exceptions.ConnectionTimeout:
                print('elasticsearch.exceptions.ConnectionTimeout')
            except elasticsearch.exceptions.ConnectionError:
                print('elasticsearch.exceptions.ConnectionError')
            except elasticsearch.exceptions.TransportError:
                print('elasticsearch.exceptions.TransportError,maybe need to adjust max_result_window value')

            for item in result:
                content = item['result']['content']
                # regexpression refer to http://www.myexception.cn/HTML-CSS/639814.html
                url = re.findall(r''' <a(\s*)(.*?)(\s*)href(\s*)=(\s*)([\"\s]*)([^\"\']+?)([\"\s]+)(.*?)> ''',
                                 content, re.S | re.I)
                for u in url:
                    try:
                        # https://www.cnblogs.com/itlqs/p/6055365.html
                        hostname = urlparse(u[6]).netloc
                        if hostname in ['t.co', 'bit.ly', 'goo.gl', 'ow.ly', 'buff.ly', 'cmcc.in', 'url.cn']:
                            url_list1.append(u[6])
                        else:
                            if hostname == 'twitter.com':
                                url_list2.append(u[6])
                    except Exception as e:
                        print(e)
            round_counter += 1

        file_url_1 = open('E:\Hosts\\xuanwulab\\twitter\ShortUrl.txt', 'w')
        for u in url_list1:
            file_url_1.write(u + '\n')
        file_url_1.close()

        file_url_2 = open('E:\Hosts\\xuanwulab\\twitter\\url_from_regular.txt', 'w')
        for u in url_list2:
            file_url_2.write(u + '\n')
        file_url_2.close()

    except Exception as e:
        print(e)




