# coding: utf-8
import tushare as ts
from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
import os
import logging
import pandas
import sys
import json
from cachetools import cached,LRUCache
import conf.config_default as conf


logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                    datefmt='%a, %d %b %Y %H:%M:%S',
                    filename='ts_news.log',
                    filemode='a')
#初始化Cache 丢弃类型为last recent use
cache = LRUCache(maxsize=50)


def fetchTsNews(toprecords=10):
    news=ts.get_latest_news(top=toprecords,show_content=True) # 显示最新top条新闻，并打印出新闻内容
    sorted_news=news.sort_values(by=['time']).reset_index(drop=True)#排序，使数据按发生时间升序排列
    return sorted_news

def initial_cache(cache,news):
    for i in range(len(news)):
        key = news['title'].iloc[i]
        value = news['time'].iloc[i]
        cache[key] = value
        #缓存数据插入

def compare_cache(cache,news,keycolumn='title',valuecolumn='time'):
    data_not_in_cache=[]
    #倒序读取news数据，先读取最新的去更新缓存.LRU模式最旧的数据会被舍弃
    for i in reversed(range(len(news))):
        key = news[keycolumn].iloc[i]
        value = news[valuecolumn].iloc[i]
        if(cache.get(key) is None):#如果未命中缓存，则更新缓存且加入待写入数据集
            cache[key]=value
            data_not_in_cache.append(news.iloc[i])
        elif(cache.get(key) == value):#如果命中缓存，则剩余需要写入数据全部在缓存内，则跳出循环
            break
    return data_not_in_cache

def writeTsNews(news):
    if not (news is None):
        try:
            DataPath = conf.configs['filedb']['NewsPath']
            # filename = str(datetime.now())+".csv"
            filename = 'ts_news.csv'
            file_path = os.path.abspath(os.path.join(os.path.abspath(DataPath), filename))
            logging.info("start wrting csv......")
            logging.info("file path to write is %s" % file_path)
            news.to_csv(file_path, index=False, sep=',', encoding='utf-8', mode='a')
            logging.info("end writing...........")
        except Exception as e:
            logging.error('trace info is %s' % e)

def insert_to_es(df):
    from elasticsearch import Elasticsearch
    es = Elasticsearch("http://10.1.23.4:9200/") #es url 后续config
    for i in range(len(df)):
        #print(df.iloc[i].to_dict())
        res = es.index(index="ts_news", doc_type="content", id=datetime.now(), body=df.iloc[i].to_dict())

def job_function():
    #获取N条数据
    news =fetchTsNews(50)
    print(news)

    if (cache.currsize==0): #如果缓存为空，则全部读入缓存
        initial_cache(cache,news)
        data_to_write=news
    else: #缓存不为空则查询缓存，若缓存满则最旧的数据会被丢弃。
        data_to_write=compare_cache(cache,news)

    print(data_to_write)
    #写入csv文件
    writeTsNews(data_to_write)
    #写入elasticsearch
    insert_to_es(data_to_write)

    notice = ts.get_notices()  # 显示新闻地雷重要通知
    if not (notice is None):
        logging.info("notices fetched: %s" % notice)
    else:
        logging.info("no notices fetched ....")


def start_fetch_tsNews():

    # BlockingScheduler
    sched = BlockingScheduler()
    # Schedule job_function to be called every interval
    sched.add_job(job_function, 'interval', seconds=60)
    # Schedule job start
    sched.start()



if __name__ == '__main__':
    start_fetch_tsNews()






