# -*- coding: utf-8 -*-

from __future__ import absolute_import
import datetime

from django.conf import settings
from celery.utils.log import get_task_logger
from financial_daily.celery import app

from basic_info.models import Stock, Institute
from institutes.models import DailyCounsel, DailyMeeting
from news.models import NewsCategory, NewsDetail

from financial_daily.utils import StringUtils, FTPUtils, RedisUtils
from financial_daily.summarizer import NewsSummarizer
from financial_daily.entity_processors import InstitutionNameProcessor

logger = get_task_logger(__name__)
date_str = datetime.date.today().strftime('%Y-%m-%d')

@app.task
def generate_daily_tougu_report(edition, date=date_str):
    
    ftp_conn = FTPUtils.connect_ftp()
    column_size = len(settings.FTP_TOUGU_COLUMNS)
    try:
        edition_suffix = FTPUtils.convert_to_edition_suffix(edition) # default

        lines = FTPUtils.get_file_as_string(ftp_conn, 
                    date.replace('-', '') + edition_suffix + '.txt').split(settings.FTP_LINE_DELIMITER)
        for line in lines:
            line = line.strip()
            if len(line) > 0:
                columns = line.split(settings.FTP_COLUMN_DELIMITER)
            if len(columns) >= column_size:
                institute = columns[settings.FTP_TOUGU_COLUMNS['institute']]
                
                author = columns[settings.FTP_TOUGU_COLUMNS['author']]
                content = ''.join(columns[settings.FTP_TOUGU_COLUMNS['content']:]) # join the rest of columns

                DailyCounsel.objects.get_or_create(
                    date=date,
                    institute=institute,
                    author=author,
                    defaults={
                        'content': content,
                    }
                )

    finally:
        FTPUtils.close_ftp(ftp_conn)

    logger.info("Done processing daily tougu data. %d added.",  DailyCounsel.objects.filter(date=date).count())

@app.task
def import_daily_meeting(date=date_str):
    
    redis_conn = RedisUtils.connect_redis()
    processsed_news_url_hash_set = NewsDetail.get_processsed_news_url_hash_set(NewsCategory.MORNING_REPORT,
                                                                               date)
    doc_set = RedisUtils.get_daily_morning_reports(redis_conn, date)
    doc_sents_map = dict()
    with NewsSummarizer() as summarizer:

        institute_name_processor = InstitutionNameProcessor()
        summarizer.add_sent_processor(institute_name_processor)

        logger.info("%d morning reports to add on %s.", *(len(doc_set), date))

        for doc_url in doc_set:
            url_hash = StringUtils.get_string_md5(doc_url)
            if url_hash in processsed_news_url_hash_set:
                continue  # this article was already processed

            try:
                title = RedisUtils.get_doc_title(redis_conn, doc_url)
                content = RedisUtils.get_doc_content(redis_conn, doc_url)
                publisher = RedisUtils.get_doc_publisher(redis_conn, doc_url)
                pub_time = RedisUtils.get_doc_pubtime(redis_conn, doc_url)

                if title != None and len(title) > 0:
                    # 设置当前文档的doc id
                    institute_name_processor.set_current_doc_id(doc_url, title=title)
                    sentences = summarizer.get_sentences(title, content)
                    doc_sents_map[doc_url] = sentences

            except Exception as e:
                logger.exception("Error in processing %s. This url was ignored.", doc_url)

    # all stock names in reports
    stock_doc_id_map = institute_name_processor.get_stock_doc_id_map()
    for sec_name, doc_map in stock_doc_id_map.iteritems():
        for (url, institute), sent_id in doc_map.iteritems():
            DailyMeeting.objects.get_or_create(date=date,
                                               institute=Institute.objects.get(short_name=institute), 
                                               stock=Stock.objects.get(short_name=sec_name), 
                                               defaults={
                                                   'static_summary': doc_sents_map[url][sent_id],
                                                   'url': url,
                                               })

    logger.info("Done adding Morning reports. %d stocks added.", DailyMeeting.objects.filter(date=date).count()) 

