# -*- coding: utf-8 -*-
import logging
import datetime

from django.conf import settings

from financial_daily.summarizer import NewsSummarizer
from financial_daily.utils import StringUtils, RedisUtils
from financial_daily.entity_processors import StockNameProcessor, IndustryNameProcessor

from basic_info.models import Stock, Institute
from news.models import NewsCategory, NewsDetail, NewsStocksMap

logger=logging.getLogger('__name__')

class CrawledNewsImporter:
    '''
    导入爬取新闻
    '''
    @staticmethod
    def import_daily_headlines(date):
        CrawledNewsImporter.__import_daily_crawled_data(date,
                                                        NewsCategory.HEADLINE,
                                                        StockNameProcessor())

    @staticmethod
    def import_daily_frontpage(date):
        CrawledNewsImporter.__import_daily_crawled_data(date,
                                                        NewsCategory.FRONTPAGE,
                                                        StockNameProcessor())

    @staticmethod
    def import_daily_stock_news(date): 
        CrawledNewsImporter.__import_daily_crawled_data(date,
                                                        NewsCategory.STOCK_NEWS,
                                                        StockNameProcessor())

    @staticmethod
    def import_daily_industry_news(date):
        CrawledNewsImporter.__import_daily_crawled_data(date,
                                                        NewsCategory.INDUSTRY_NEWS,
                                                        IndustryNameProcessor())

    @staticmethod
    def import_daily_industry_research(date):
        CrawledNewsImporter.__import_daily_crawled_data(date,
                                                        NewsCategory.INDUSTRY_RESEARCH,
                                                        IndustryNameProcessor())

    @staticmethod
    def import_daily_company_annoucements(date):
        CrawledNewsImporter.__import_daily_crawled_data(date,
                                                        NewsCategory.ANNOUCEMENT,
                                                        StockNameProcessor())

    @staticmethod
    def __import_daily_crawled_data(date,
                                    category,
                                    external_term_processor):

        redis_conn = RedisUtils.connect_redis()
        processsed_news_url_hash_set = NewsDetail.get_processsed_news_url_hash_set(category, date)

        logger.debug("%d from %s already added on %s", *(len(processsed_news_url_hash_set), category, date))
        doc_set = RedisUtils.get_daily_news_by_category(redis_conn, date, category)
        with NewsSummarizer() as summarizer: 

            #pol_checker = PolarityCheck(generic_config['data_files']['pola_check_conf'])

            # add exteranl term processor
            summarizer.add_term_processor(external_term_processor)
            # classifier = NewsClassifier()
            # summarizer.add_term_processor(classifier)

            logger.debug("%d %s to add on %s", *(len(doc_set), category, date))

            for doc_url in doc_set:
                url_hash = StringUtils.get_string_md5(doc_url)
                if url_hash in processsed_news_url_hash_set:
                    continue  # this article was already processed

                title = RedisUtils.get_doc_title(redis_conn, doc_url)
                content = RedisUtils.get_doc_content(redis_conn, doc_url)

                if len(content) > settings.NEWS_FILE_LENGTH_CUTOFF:
                    content = content[0:settings.NEWS_FILE_LENGTH_CUTOFF]

                publisher = RedisUtils.get_doc_publisher(redis_conn, doc_url)
                pub_time = RedisUtils.get_doc_pubtime(redis_conn, doc_url)

                # news details
                news, created = NewsDetail.objects.get_or_create(
                        date=date,
                        category=category,
                        url_hash=url_hash,
                        defaults={
                            'url': doc_url,
                            'title': title,
                            'publisher': publisher,
                            'published_time': datetime.datetime.fromtimestamp(int(pub_time))
                            })

                if not created:
                    logger.warn("%s from %s was already added, what's wrong???", doc_url, category)

                try:
                    # 设置当前文档的doc id
                    external_term_processor.set_current_doc_id(url_hash)
                    #classifier.set_current_doc_id(url_hash)

                    keywords = summarizer.get_keywords(title, content, True, False)

                    if content == None or len(content) == 0:
                        sents = ''
                    else:
                        sents = summarizer.get_gist_sentences(title, content)

                    #try:
                    #  polarity, token_info = pol_checker.get_text_polarity(title)
                    #except Exception as e:
                    #  polarity = pol_checker.UNKNOWN

                    #news_classes = classifier.get_classes_as_string()

                    news.keywords = settings.NEWS_KEYWORDS_DELIMITER.join(keywords)
                    news.static_summary = u'。'.join(sents)
                    news.add_succeeded = True

                    entity_list = external_term_processor.get_entity_list_from_current_doc()
                    for entity_name in entity_list:
                        try:
                            stock = Stock.objects.get(short_name=entity_name)
                            NewsStocksMap.objects.get_or_create(news=news, stock=stock)
                        except Stock.DoesNotExist:
                            logger.warn("Stock name %s does not exist!", entity_name)

                    stock_codes = RedisUtils.get_doc_stock_code(redis_conn, doc_url)
                    if stock_codes is not None:
                        for sec_code in stock_codes.split(' '):
                            if sec_code.isnumeric():
                                try:
                                    stock = Stock.objects.get(code=sec_code)
                                    NewsStocksMap.objects.get_or_create(news=news, stock=stock)
                                except Stock.DoesNotExist:
                                    logger.warn("Stock code %s does not exist!", sec_code)

                except Exception as e:
                    news.add_succeeded = False
                    logger.exception("Error in processing %s: %s. This url was ignored.", *(doc_url, e))
                finally:
                    news.save()

            logger.info("Done %s text processing. %d news added.", *(category, NewsDetail.objects.filter(date=date, category=category).count()))

