# -*- coding: utf-8 -*-

from __future__ import absolute_import
import datetime

from django.conf import settings
from celery.utils.log import get_task_logger
from celery import chain

from financial_daily.celery import app
from financial_daily.news_importers import CrawledNewsImporter
from financial_daily.utils import TextCounter

from news.models import NewsEdition, NewsCategory, NewsDetail, DailySummary
from news.models import NewsCluster, ClusterSummary

from financial_daily.clusterer import NewsClusterer

logger = get_task_logger(__name__)
date_str = datetime.date.today().strftime('%Y-%m-%d')

@app.task
def import_daily_headlines(date=date_str):
    CrawledNewsImporter.import_daily_headlines(date)

@app.task
def import_daily_frontpage(date=date_str):
    CrawledNewsImporter.import_daily_frontpage(date)

@app.task
def import_stock_news_and_more(date=date_str):
    #同步每日头条、新闻摘要、新闻聚类、聚类摘要
    CrawledNewsImporter.import_daily_stock_news(date)
    #CrawledNewsImporter.import_daily_industry_news(date),
    #CrawledNewsImporter.import_daily_industry_research(date),
    CrawledNewsImporter.import_daily_company_annoucements(date)
   
@app.task
def generate_daily_news_summary(category, edition, date=date_str):
    conditions_map = {
        'date': date,
        'category': category,
    }
    news_list = NewsDetail.objects.filter(**conditions_map)
    if news_list is None or news_list.count() == 0:
        return

    keyword_str_list = [news.keywords for news in news_list]

    top_keywords = TextCounter.get_top_keywords(keyword_str_list, 
                                                settings.NEWS_SUMMARIZER_PARAMS['max_keywords_returned'])
    top_keywords_str = settings.NEWS_KEYWORDS_DELIMITER.join(top_keywords)
    domain_count = news_list.values('publisher').distinct().count()
    DailySummary.objects.get_or_create(date=date,
                               category=category,
                               edition=edition,
                               defaults={
                                   'doc_count': news_list.count(),
                                   'top_keywords': top_keywords_str,
                                   'domain_count': domain_count,
                               })

@app.task
def cluster_daily_news(category, date=date_str):
    conditions_map = {
        'date': date,
        'category':category,
    }
    news_list = NewsDetail.objects.filter(**conditions_map)
    if news_list is None or len(news_list) == 0:
        # 如果因为网络原因一时无法获得新闻，直接返回，避免删除已有聚类
        return
    news_keywords=[news.keywords.split(
        settings.NEWS_KEYWORDS_DELIMITER) for news in news_list
    ] 
    news_clusterer = NewsClusterer()
    clust_list = news_clusterer.cluster(news_keywords)
    
    # 聚类完成，删除之前的当日聚类
    NewsCluster.objects.filter(date=date).delete()
    ClusterSummary.objects.filter(date=date).delete()
    
    # 重新添加聚类
    cluster_id = -1
    for clust in clust_list:
        cluster_id += 1
        cluster_keywords = []
        for member in clust:
            news_cluster = NewsCluster.objects.create(date=date,
                                                      cluster_id=cluster_id,
                                                      news=news_list[member])
             
            cluster_keywords.append(news_list[member].keywords) 
        
        top_keywords = TextCounter.get_top_keywords(cluster_keywords, 
                                                settings.NEWS_SUMMARIZER_PARAMS['max_keywords_returned'])
        top_keywords_str = settings.NEWS_KEYWORDS_DELIMITER.join(top_keywords)
        
        cluster_summary = ClusterSummary.objects.create(date=date,
                                                        cluster_id=cluster_id,
                                                        keywords=top_keywords_str,
                                                        doc_count=len(clust),
                                                        display_news=news_list[list(clust)[0]])

@app.task
def import_daily_headlines_summary_cluster(edition, date=date_str):
    #同步每日头条、新闻摘要、新闻聚类、聚类摘要
    chain(import_daily_headlines.si(date),
          generate_daily_news_summary.si(NewsCategory.HEADLINE,
                                         edition,
                                         date),
          cluster_daily_news.si(NewsCategory.HEADLINE,
                                date)
    )()
    
