# -*- coding: utf-8 -*-

# python2


# 另外，在python2的字符编码问题时常会遇到
# “UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-5: ordinal not in range(128)”的编码错误。
# 此方法是将Python2的默认编码ASCII改为 utf-8。但此方法不是一劳永逸的，可能会使一些代码的行为变得怪异。
import json
import time

import elasticsearch.helpers
from elasticsearch import Elasticsearch
from six.moves.urllib.parse import urlparse, parse_qs

from . import utils


class ResultDB:
    __type__ = 'result'

    def __init__(self, hosts, index='pyspider'):
        self.index = index
        self.es = Elasticsearch(hosts=hosts)

        self.es.indices.create(index=self.index, ignore=400)
        # noinspection PyBroadException
        try:
            if not self.es.indices.get_mapping(index=self.index, doc_type=self.__type__):
                self.es.indices.put_mapping(index=self.index, doc_type=self.__type__, body={
                    "_all": {"enabled": True},
                    "properties": {
                        "taskid": {"enabled": False},
                        "project": {"type": "string", "index": "not_analyzed"},
                        "url": {"enabled": False},
                    }
                })
        except Exception:
            pass

    @property
    def projects(self):
        ret = self.es.search(index=self.index, doc_type=self.__type__,
                             body={"aggs": {"projects": {
                                 "terms": {"field": "project"}
                             }}}, _source=False)
        return [each['key'] for each in ret['aggregations']['projects'].get('buckets', [])]

    def save(self, project, taskid, url, result):
        obj = {
            'taskid': taskid,
            'project': project,
            'url': url,
            'result': result,
            'updatetime': time.time(),
        }
        # 将refresh设为true，使得添加的文档可以立即搜索到；
        # 默认为false，可能会导致下面的search没有结果
        return self.es.index(refresh=True, index=self.index, doc_type=self.__type__,
                             body=obj, id='%s:%s' % (project, taskid))

    def select(self, project, fields=None, offset=0, limit=0):
        offset = offset or 0
        limit = limit or 0
        if not limit:
            for record in elasticsearch.helpers.scan(self.es, index=self.index, doc_type=self.__type__,
                                                     query={'query': {'term': {'project': project}}},
                                                     _source_include=fields or [], from_=offset,
                                                     sort="updatetime:desc"):
                yield record['_source']
        else:
            for record in self.es.search(index=self.index, doc_type=self.__type__,
                                         body={'query': {'term': {'project': project}}},
                                         _source_include=fields or [], from_=offset, size=limit,
                                         sort="updatetime:desc"
                                         ).get('hits', {}).get('hits', []):
                yield record['_source']

    def select_by_query(self, query, fields=None, offset=0, limit=0):
        offset = offset or 0
        limit = limit or 0
        fields_set = ['result.title', 'result.author', 'result.editor', 'result.content', 'result.section',
                      'result.date', 'result.tags']
        if not limit:
            for record in elasticsearch.helpers.scan(self.es, index=self.index, doc_type=self.__type__,
                                                     query=query,
                                                     _source_include=fields or fields_set, from_=offset,
                                                     sort="updatetime:desc"):
                yield record['_source']['result']
        else:
            for record in self.es.search(index=self.index, doc_type=self.__type__,
                                         body=query,
                                         _source_include=fields or fields_set, from_=offset, size=limit,
                                         sort="updatetime:desc"
                                         ).get('hits', {}).get('hits', []):
                yield record['_source']['result']

    def count(self, project):
        return self.es.count(index=self.index, doc_type=self.__type__,
                             body={'query': {'term': {'project': project}}}
                             ).get('count', 0)

    def get(self, project, taskid, fields=None):
        ret = self.es.get(index=self.index, doc_type=self.__type__, id="%s:%s" % (project, taskid),
                          _source_include=fields or [], ignore=404)
        return ret.get('_source', None)

    def drop(self, project):
        self.refresh()
        for record in elasticsearch.helpers.scan(self.es, index=self.index, doc_type=self.__type__,
                                                 query={'query': {'term': {'project': project}}},
                                                 _source=False):
            self.es.delete(index=self.index, doc_type=self.__type__, id=record['_id'])

    def refresh(self):
        """
        Explicitly refresh one or more index, making all operations
        performed since the last refresh available for search.
        """
        self.es.indices.refresh(index=self.index)


def get_time(start, end):
    import datetime
    start_date = datetime.date(*start)
    end_date = datetime.date(*end)
    result = []
    curr_date = start_date
    while curr_date != end_date:
        result.append("%04d%02d%02d" % (curr_date.year, curr_date.month, curr_date.day))
        curr_date += datetime.timedelta(1)
    result.append("%04d%02d%02d" % (curr_date.year, curr_date.month, curr_date.day))
    return result


def get_client(url):
    parsed = urlparse(url)
    scheme = parsed.scheme.split('+')
    if len(scheme) == 1:
        raise Exception('wrong scheme format: %s' % parsed.scheme)
    else:
        engine, dbtype = scheme[0], scheme[-1]
        other_scheme = "+".join(scheme[1:-1])

    if dbtype not in ('taskdb', 'projectdb', 'resultdb'):
        raise LookupError('unknown database type: %s, '
                          'type should be one of ["taskdb", "projectdb", "resultdb"]', dbtype)

    if parsed.path.startswith('/?'):
        index = parse_qs(parsed.path[2:])
    else:
        index = parse_qs(parsed.query)
    if 'index' in index and index['index']:
        index = index['index'][0]
    else:
        index = 'pyspider'

    return ResultDB([parsed.netloc], index=index)


def get_editor_and_author(client, query, project=None, limit=0):
    if project:
        all_doc = [_['result'] for _ in client.select(project, limit=limit)]
    else:
        all_doc = client.select_by_query(query, limit=limit)

    all_len = {
        'all': 0,
        'editor': 0,
        'author': 0
    }
    all_info = {
        'author': {},
        'editor': {},
        '无来源': 0
    }
    all_doc_handled = {
        'author': {},
        'editor': {},
        '无来源': [],
    }

    utils.sys_print('\n')
    for doc in all_doc:
        all_len['all'] += 1
        if all_len['all'] % 100 == 0:
            utils.sys_print('\r已处理：%d\r' % all_len['all'])
        if doc.get(u'author'):
            author = json.dumps(doc[u'author']) if isinstance(doc.get(u'author'), (dict, list)) else doc[u'author']
            all_len['author'] += 1

            if all_info['author'].get(author) is None:
                all_info['author'][author] = 1
                all_doc_handled['author'][author] = [doc]
            else:
                all_info['author'][author] += 1
                all_doc_handled['author'][author].append(doc)
        if doc.get(u'editor'):
            editor = json.dumps(doc[u'editor']) if isinstance(doc.get(u'editor'), (dict, list)) else doc[u'editor']
            all_len['editor'] += 1

            if all_info['editor'].get(editor) is None:
                all_info['editor'][editor] = 1
                all_doc_handled['editor'][editor] = [doc]
            else:
                all_info['editor'][editor] += 1
                all_doc_handled['editor'][editor].append(doc)
        elif not (doc.get(u'author') and doc.get(u'editor')):
            all_info['无来源'] += 1
            all_doc_handled['无来源'].append(doc)

    utils.sys_print('数据库该项目库总共文章条数：%s \n'% json.dumps(all_len))
    return all_info, all_doc_handled, all_len

    # print '前1：', [_ for _ in client.select("secretchina_kanguanchang", limit=1)]

    # print get_time((2014, 7, 28), (2014, 8, 3))

# # 将查询出的文档添加到Elasticsearch中
# for _doc in user_docs:
#     try:
#         # 将refresh设为true，使得添加的文档可以立即搜索到；
#         # 默认为false，可能会导致下面的search没有结果
#         _es.index(index='blog_index', doc_type='user', refresh=True, body=_doc)
#         processed += 1
#         print('Processed: ' + str(processed), flush=True)
#     except:
#         traceback.print_exc()
#
# # 查询所有记录结果
# print('Search all...', flush=True)
# _query_all = {
#     'query': {
#         'match_all': {}
#     }
# }
# _searched = _es.search(index='blog_index', doc_type='user', body=_query_all)
# print(_searched, flush=True)
#
# # 输出查询到的结果
# for hit in _searched['hits']['hits']:
#     print(hit['_source'], flush=True)
#
# # 查询姓名中包含jerry的记录
# print('Search name contains jerry.', flush=True)
# _query_name_contains = {
#     'query': {
#         'match': {
#             'name': 'jerry'
#         }
#     }
# }
# _searched = _es.search(index='blog_index', doc_type='user', body=_query_name_contains)
# print(_searched, flush=True)
