from elasticsearch import  Elasticsearch
from elasticsearch.helpers import bulk
from elasticsearch_dsl import Search,Keyword, Integer,Date, connections,DocType, Index, A, Q, Text
from echart.fanwen_api import foshan_news_api
from vis_sys.settings import FW_LOG_INDEX, FW_LOG_TYPE, ELASTIC_HOST, FW_NEW_INDEX , FW_NEW_TYPE,ARTICLE_LIST, NANHAI,\
    ARTICLE_INDEX, ARTICLE_TYPE, MIN_SCORE,EMOTION_LINE, FW_EMOTION_TYPE, FW_EMOTION_INDEX, FOSHAN_CITY_CODE, ZONES, \
    SANSHUI, GUANGZHOU_CITY_CODE, HOTWORD_INDEX, HOTWORD_TYPE, APPCLASS_INDEX, APPCLASS_TYPE
from datetime import datetime, timedelta
import json
connections.connections.create_connection(hosts=[ELASTIC_HOST])

import time

def init_log_index():
    """
    创建index, type
    :return: 
    """
    #connections.connections.create_connection(hosts=[ELASTIC_HOST])

    logs = Index('article_type')
    logs.settings(
        number_of_shards=1,
        number_of_replicas=0,
    )

    logs.doc_type(ArticletypeStatic)
    logs.create()

class ImportLog(DocType):
    """
    type　ORM 对象创建
    """
    application_id = Keyword()
    begin_id = Keyword()
    end_id = Keyword()
    update_count = Integer()
    created = Date()

    class Meta:
        # 这个是必须
        index=FW_LOG_INDEX
        doc_type=FW_LOG_TYPE

    def find_last_id(cls):
        s = cls.search().sort("-created")[0]
        res = s.execute()
        return res[0].end_id

    def save(self, **kwargs):
        # if there is no date, use now
        if self.created is None:
            self.created = datetime.now()
        return super(ImportLog, self).save(**kwargs)

class EmotionLog(DocType):
    """
    type 凡闻每天正负面文章统计记录
    """
    zone_id = Keyword()
    zone = Keyword()
    total = Integer()
    readcount = Integer()
    agreecount = Integer()
    forwardcount = Integer()
    commentcount = Integer()
    rearticlecount = Integer()
    remediacount = Integer()
    collectcount = Integer()
    emotion = Keyword()
    created = Date()

    class Meta:
        # 这个是必须
        index=FW_EMOTION_INDEX
        doc_type=FW_EMOTION_TYPE

    def emotion_total(cls, begin_date, end_date, emotion='1', zone_id=None):
        s = cls.search()
        days = datetime.strptime(end_date, '%Y-%m-%d') - datetime.strptime(begin_date, '%Y-%m-%d')
        if zone_id:
            s = s.filter("term", zone_id=zone_id)
            days = days.days
        else:
            days = days.days * 5
        s = s.filter("term", emotion=emotion)
        s = s.query('range', created={
            "gte": begin_date,
            "lte": end_date
        })
        a = A("terms", field="zone", size=12)  # 返回统计数量
        a.metric('count_per_zone', 'sum', field='total')
        s.aggs.bucket("group_by_zone", a)
        s = s.sort("created")
        response = s[:days].execute()
        result = {'total':{},'data':{}}
        for hit in response.hits:
            date_str = datetime.strftime(hit.created, "%Y-%m-%d")
            if result['data'].get(date_str):
                result['data'][date_str].append(hit.to_dict())
            else:
                result['data'][date_str] = [hit.to_dict()]

        for item in response.aggregations.group_by_zone.buckets:
            result['total'][item.key] = item.count_per_zone.value
        return result


    def save(self, **kwargs):
        # if there is no date, use now
        if self.created is None:
            self.created = datetime.now()
        return super(EmotionLog, self).save(**kwargs)

class ArticletypeStatic(DocType):
    """
    统计每天来源分类
    """
    category_id = Keyword()
    articletype = Keyword()
    count = Integer()
    created = Date()

    class Meta:
        # 这个是必须
        index=ARTICLE_INDEX
        doc_type=ARTICLE_TYPE


    def save(self, **kwargs):
        # if there is no date, use now
        if self.created is None:
            self.created = datetime.now()
        return super(ArticletypeStatic, self).save(**kwargs)


    def article_static(cls, category_id, begin_date, end_date):
        s = cls.search()
        s = s.filter("term", category_id=category_id)
        s = s.query('range', created={
            "gte": begin_date,
            "lte": end_date
        })
        a = A("terms", field="articletype", size=10)  # 返回统计数量
        a.metric('count_per_articletype', 'sum', field='count')
        s.aggs.bucket("group_by_articletype", a)
        response = s[0].execute()
        result = {}
        for item in response.aggregations.group_by_articletype.buckets:
            result[item.key] = item.count_per_articletype.value

        return result


    def article_static_per_day(cls, category_id, begin_date, end_date):
        s = cls.search()
        s = s.filter("term", category_id=category_id)
        s = s.query('range', created={
            "gte": begin_date,
            "lte": end_date
        })
        s.aggs.bucket("group_by_time", "date_histogram", field="created", interval="day")
        a = A("terms", field="articletype", size=10)  # 返回统计数量
        a.metric('count_per_articletype', 'sum', field='count')
        s.aggs['group_by_time'].bucket("group_by_articletype", a)
        response = s[0].execute()
        result = []
        if response.hits.total > 0:

            for item in response.aggregations.group_by_time.buckets:
                tmp_res = {
                    'date': item.key_as_string.split('T')[0],
                    'sources': []
                }

                for count in item.group_by_articletype.buckets:
                    tmp_res['sources'].append({
                        'key':count.key,
                        'value':count.count_per_articletype.value
                    })

                result.append(tmp_res)
        return result

class News(object):

    last_id = '0'

    def __init__(self, host, index_name, type_name):
        self.host = host
        self.index_name = index_name
        self.type_name = type_name
        self.es = Elasticsearch([self.host])
        self.find_last_id()

    def recreate_index(self, new_index, new_type, settings, property):
        """
        根据旧的mapping, 修改然后创建新的表
        :return: True or False
        """
        created =  False
        res = self.get_mapping()
        for key, val in res[self.index_name]['mappings'][self.type_name]['properties'].items():
            new_val = property['properties'].get(key)
            if new_val:
                if new_val == "":
                    property['properties'][key] = val
            else:
                property['properties'][key] = {'type': "keyword", "index": False}

        settings['mappings'][new_type] = {'properties' : property['properties']}

        try:
            if not self.es.indices.exists(new_index):
                res = self.es.indices.create(index=new_index, ignore=400, body=settings)
                if res.get('error'):
                    print(res)
                else:
                    created = True
        except Exception as e:
            print(e)

        return created

    def get_mapping(self):
        try:
            return self.es.indices.get_mapping(index=self.index_name, doc_type=self.type_name)
        except Exception as e:
            return {'error': e}


    def search(self):
        return Search(using=self.es, index=self.index_name,doc_type=self.type_name)

    def get_article(self,id):
        res = None
        try:
            res = self.es.get(index=self.index_name,doc_type=self.type_name, id=id)
        except:
            pass
        return res


    def import_data(self, begin_id=None):

        if begin_id:
            tmp_begin_id = begin_id
        else:
            # Get the last id as begin_id
            tmp_begin_id = self.last_id

        # 记录更新
        fw_log = ImportLog(
            meta={'id': tmp_begin_id},
            application_id="367",
            start_id=tmp_begin_id
        )
        total_items = 0
        while True:
            res = foshan_news_api(count='50', start=tmp_begin_id)
            if len(res['data']) == 0:
                break
            insert_data = []
            for data in res['data']:
                insert_data.append({
                    "_index": self.index_name,
                    "_type": self.type_name,
                    "_id": data['id'],  # _id 也可以默认生成，不赋值
                    "_source": data
                })
            tmp_begin_id = res['data'][-1]['id']
            success, _ = bulk(self.es, insert_data, index=self.index_name, raise_on_error=True)
            print('reslut:{},start: {}'.format(res['result'], tmp_begin_id))
            total_items += success
            print('Performed %d actions, total:%d' % (success, total_items))
            break

        # 更新最新id
        self.last_id = tmp_begin_id

        fw_log.end_id = tmp_begin_id
        fw_log.update_count = total_items
        fw_log.save()

    def find_last_id(self):
        last_id = '0'
        try:
            s = self.search().sort("-id")[0]
            res = s.execute()
            last_id = res[0].id
        except Exception as e:
            print(e)

        self.last_id = last_id

        return last_id

def copy_news_index():
    # 复制新的数据表
    settings = {
        "settings": {
            "number_of_shards": 1,
            "number_of_replicas": 0,
        },
        "mappings": {
        }
    }

    property = {
        "dynamic": False,
        "properties": {
            "id": {"type": "long"},
            "content": {"type": "text"},
            "news_intro": {
                "type": "text",
                "analyzer": "ik_max_word",
                "search_analyzer": "ik_smart"
            },
            "news_title": {
                "type": "text",
                "analyzer": "ik_max_word",
                "search_analyzer": "ik_smart"
            },
            "news_auth": {
                "type": "text",
                "analyzer": "ik_max_word",
                "search_analyzer": "ik_smart"
                # "fields": {
                #     "pinyin": {
                #       "type": "text",
                #       "analyzer": "pinyin"
                #     }
                # }
            },
            "news_desc": {
                "type": "text",
                "analyzer": "ik_max_word",
                "search_analyzer": "ik_smart"
            },
            "keywords": "",
            "subtitle": "",
            # "news_date":{"type": "date","format": "yyyy-MM-dd HH:mm:ss"},
            "update_time": {"type": "date", "format": "yyyy-MM-dd HH:mm:ss.SSS || yyyy/MM/dd || epoch_millis"},
            "creation_time": {"type": "date", "format": "yyyy-MM-dd HH:mm:ss.SSS || yyyy/MM/dd || epoch_millis"},
            "VideoSize": {"type": "integer"},
            "VioceSize": {"type": "integer"},
            "class1": {"type": "keyword"},
            "class2": {"type": "keyword"},
            "articletype": {"type": "keyword"},
            "news_paper_name": {"type": "keyword"},
            "category_id": {"type": "keyword"},
            "page": {"type": "keyword"},
            "city": {"type": "keyword"},
            "contentwordscount": {"type": "long"},
            "paperID": {"type": "keyword"},
            "pinyin": {"type": "keyword"},
            "applicationid": {"type": "keyword"},
            "typeName": {"type": "keyword"},
            "article_id": {"type": "keyword"},
        }
    }
    fw_news = News(host="http://172.16.8.244:9200", index_name='news_pro', type_name='foshan')
    print(fw_news.recreate_index(new_index="fw_news_pro", new_type="foshan", settings=settings, property=property))


def import_data():
    fw_news = News(host=ELASTIC_HOST, index_name=FW_NEW_INDEX, type_name=FW_NEW_TYPE)
    fw_news.import_data()
    print("get last id:", fw_news.last_id)


def articletype_static(article_list):
    """
    统计每天文章来源数量
    :param article_list: 
    :return: 
    """
    today = (datetime.today()-timedelta(days=1)).strftime("%Y-%m-%d")
    fw_news = News(host=ELASTIC_HOST, index_name=FW_NEW_INDEX, type_name=FW_NEW_TYPE)
    results = []
    for category_id in article_list:
        size = 30  # 返回汇总数量
        article_type_staic = {
            'website': 0,
            'news': 0,
            'weibo': 0,
            'weixin': 0,
            'webbbs': 0,
            'webapp': 0,
            "webprofession": 0,
            "webgov": 0,
            'other': 0,
        }

        s = fw_news.search()
        s = s.filter("term", category_id=category_id)
        s = s.query('range', update_time={
            "gte": "{} 00:00:00.000".format(today),
            "lte": "{} 23:59:59.999".format(today)
        })

        sum_other = 1
        while sum_other > 0:
            a = A("terms", field="articletype", size=size)  # 返回统计数量
            s.aggs.bucket("group_by_articletype", a)
            # print(json.dumps(s.to_dict()))
            response = s[0].execute()
            sum_other = response.aggregations.group_by_articletype.sum_other_doc_count
            size += 20

        # print(response.hits.total)
        for term in response.aggregations.group_by_articletype.buckets:
            has_key = False
            for key in article_type_staic.keys():
                if term.key.startswith(key):
                    article_type_staic[key] += term.doc_count
                    has_key = True
                    break
            if not has_key:
                article_type_staic['other'] += term.doc_count

        results.append({
            'category_id': category_id,
            'data': article_type_staic
        })

    return results


def topN_media_static(top_n, beginDate, endDate, category_id):
    fw_news = News(host=ELASTIC_HOST, index_name=FW_NEW_INDEX, type_name=FW_NEW_TYPE)
    s = fw_news.search()
    s = s.filter("term", category_id=category_id)
    s = s.query('range', update_time={
        "gte": "{} 00:00:00.000".format(beginDate),
        "lte": "{} 23:59:59.999".format(endDate)
    })
    s.aggs.bucket("group_by_articletype", "terms", field="articletype", size=30)
    a = A("terms", field="news_paper_name", size=top_n)  # 返回统计数量
    s.aggs['group_by_articletype'].bucket("group_by_media", a)
    response = s[0].execute()

    result = {
        'webbbs': [],
        'webapp':[],
        'web': [],
        'news': [],
        'weibo': [],
        'weixin': [],
    }

    if response.hits.total > 0:

        for item in response.aggregations.group_by_articletype.buckets:
            print(item.key)
            for source in result.keys():
                if item.key.startswith(source):
                    if len(result[source]) == 0:
                        for count in item.group_by_media.buckets:
                            result[source].append({
                                'key': count.key,
                                'value': count.doc_count
                            })
                    else:
                        for count in item.group_by_media.buckets:
                            has_insert = False
                            for i in range(0, len(result[source])-1):
                                if i >= top_n:
                                    break
                                if count.doc_count > result[source][i]['value']:
                                    result[source].insert(i,{
                                        'key': count.key,
                                        'value': count.doc_count
                                    })
                                    has_insert = True
                                    break

                            if len(result[source]) < top_n and (not has_insert):
                                result[source].append({
                                    'key': count.key,
                                    'value': count.doc_count
                                })

                    break

    for key, value in result.items():
        result[key] = result[key][:top_n]

    return result


def get_article(id, from_item=0, to_item=50, category_id=None, beginDate=None, endDate=None, emotion='all', source='all'):
    """
    根据文章题目来找相关报道，相关值设置在setting.MIN_SCORE
    :param id: 文章id
    :param category_id: 栏目id,默认没有
    :param from_item: 返回条目起始
    :param to_item: 返回条目最好
    :param beginDate: 
    :param endDate: 
    :param emotion: 情感,all, bad, good
    :param source: 来源：webapp, weixin,weibo等
    :return: 
    
    """
    fw_news = News(host=ELASTIC_HOST, index_name=FW_NEW_INDEX, type_name=FW_NEW_TYPE)
    article = fw_news.get_article(id=id)
    if article:
        data = article['_source']
        s = fw_news.search()
        s = s.source(fields={
            'includes': ["id", "articletype", "news_title", "VideoSize"]
        })

        if category_id:
            s = s.filter("term", category_id=category_id)

        if beginDate and endDate:
            s = s.query('range', update_time={
                "gte": "{} 00:00:00.000".format(beginDate),
                "lte": "{} 23:59:59.999".format(endDate)
            })
        if emotion == 'bad':
            s = s.query('range', VideoSize={
                "lte": EMOTION_LINE
            })
        elif emotion == 'good':
            s = s.query('range', VideoSize={
                "gte": EMOTION_LINE
            })

        s = s.query("match", news_title=data['news_title'])

        res = s[from_item:to_item].execute()
        result = []

        for hit in res.hits:
            if hit.meta.score > MIN_SCORE:
                if source == 'all':

                        result.append({
                            'id': hit.id,
                            'score': hit.meta.score,
                            'articletype': hit.articletype,
                            'news_title': hit.news_title,
                            "VideoSize": hit.VideoSize
                        })
                elif hit.articletype.startswith(source):
                    result.append({
                        'id': hit.id,
                        'score': hit.meta.score,
                        'articletype': hit.articletype,
                        'news_title': hit.news_title,
                        "VideoSize": hit.VideoSize
                    })

        return result
    return None


def get_zone_news_static(category_id, zones, beginDate, endDate):


    local_news = News(host=ELASTIC_HOST, index_name=FW_NEW_INDEX, type_name=FW_NEW_TYPE)
    s = local_news.search()
    s = s.filter("term", category_id=category_id)
    s = s.filter("terms", city=[FOSHAN_CITY_CODE, GUANGZHOU_CITY_CODE])

    s = s.query('range', update_time={
        "gte": "{} 00:00:00.000".format(beginDate),
        "lte": "{} 23:59:59.999".format(endDate)
    })

    for data in zones:
        s = s.filter("match", news_desc=data['zone'])
        res = s[1].execute()
        data['total'] = res.hits.total

    return zones


class MessageNews(object):
    def __init__(self, host, index_name, type_name):
        self.host = host
        self.index_name = index_name
        self.type_name = type_name
        self.es = Elasticsearch([self.host])

    def search(self):
        return Search(using=self.es, index=self.index_name, doc_type=self.type_name)

    def hotword_static(cls, begin_date, end_date, size=20):
        s = cls.search()
        s1 = s.query('range', publishTime={
            "gte": begin_date,
            "lte": end_date
        })
        a = A("terms", field="keywords", size=size)  # 返回统计数量
        s1.aggs.bucket("group_by_articletype", a)
        response = s1[0].execute()
        res = response.aggregations.group_by_articletype.buckets
        s2 = s.query('range', publishTime={
            "gte": begin_date,
            "lte": end_date
        })
        result = []
        for item in res:
            temp_s = s2.query("match", keywords=item.key)
            last_r = temp_s.sort('-publishTime')[0].execute()
            first_r = temp_s.sort('publishTime')[0].execute()
            result.append({
                'key': item.key,
                'doc_count': item.doc_count,
                'first_article': {
                    'article_id': first_r.hits[0].id,
                    'first_publish_time': first_r.hits[0].publishTime,
                    'acticle_url': first_r.hits[0].url
                },
                'last_article': {
                    'article_id': last_r.hits[0].id,
                    'first_publish_time': last_r.hits[0].publishTime,
                    'acticle_url': last_r.hits[0].url
                },

            })
        return result


class TextClass(DocType):
    """
    type　创建分类
    """
    article_id = Keyword()
    class_name = Keyword()
    model_name = Keyword()
    title = Text()
    score = Integer()

    class Meta:
        # 这个是必须
        index=APPCLASS_INDEX
        doc_type=APPCLASS_TYPE

    def find_last_id(cls):
        s = cls.search()[0]
        res = s.execute()
        return res[0].article_id

    def find(cls, ids):
        s = cls.search()
        s = s.filter("terms", article_id=ids)
        return s.execute()

if __name__ == '__main__':
    #today = datetime.strptime("2019-09-05","%Y-%m-%d")
    #import_data()
    #category_id = "122576"
    # res = articletype_static(ARTICLE_LIST)
    # for item in res:
    #     for key, count in item['data'].items():
    #         article = ArticletypeStatic(
    #             category_id=item['category_id'],
    #             articletype=key,
    #             count=count,
    #         )
    #         article.save()

    # article = ArticletypeStatic()
    # res = article.article_static_per_day(NANHAI, '2019-09-05','2019-09-11')
    # print(res)
    # s = article.search()
    # s = s.query('range', created={
    #     "gte": "2019-09-10",
    #     "lte": "2019-09-10"
    # })
    # response = s.delete()
    # response = s.execute()
    # print(json.dumps(s.to_dict()))
    # for hit in response.hits:
    #     print(hit.created)

    #topN_media_static(10,'2019-09-10','2019-09-16', NANHAI)
    # res = get_article('374975830', from_item=0, to_item=100, beginDate='2019-09-05', endDate='2019-09-17',source='web')
    # for item in res:
    #     print(item)
    #e_log = EmotionLog()
    #e_log.emotion_total('2019-09-17','2019-09-23', emotion='51')

    print(get_zone_news_static('126967', ZONES['126967'],'2019-10-24','2019-10-28'))
