import pymongo as pg
import numpy as np
import pandas as pd
import string
import datetime
from Crawler import crawler_tushare_new as ctn
import jieba
jieba.load_userdict("finance_dict.txt")
import jieba.posseg as pseg
from operator import itemgetter
import operator
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
from gensim import models, corpora
from sklearn.svm import SVC,SVR
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer


# 获取集合
def getCollection(dataBaseName,collectionName):
    client = pg.MongoClient('localhost', 27017, connect=False)
    db = client.get_database(dataBaseName)
    collection = db.get_collection(collectionName)
    return collection

# 定义一个函数获取平台每条新闻的日期，标题和正文
def getNewsDatas(dataBaseName, collectionName):
    collection = getCollection(dataBaseName, collectionName)
    data = collection.find({},{'_id':0,'Date':1,'Title':1,'Article':1})
    # 定义一个列表 dataList
    dataList = []
    for item in data:
        if item.get('Date') != '':
            dataList.append(item)
    return dataList
# 将各个平台新闻合并到一个列表中
def mergeNews():
    dataList = []
    cnstockNewsList = getNewsDatas('Cnstock_Stock','cnstock_news_company')
    jrjNewsList = getNewsDatas('Jrj_Stock','jrj_news_company')
    nbdNewsList = getNewsDatas('NBD_Stock', 'nbd_news_company')
    sinaNewsList = getNewsDatas('Sina_Stock', 'sina_news_company')
    stcnNewsList = getNewsDatas('Stcn_Stock', 'stcn_news_company')
    # dataList.sort(key = operator.itemgetter('Date'), reverse = True)  # 对列表内字典排序默认为升序， reverse=True为降序
    # newList = sorted(dataList, key = lambda k: k['Date'], reverse = True) # 对列表内字典排序
    dataList = cnstockNewsList + jrjNewsList + nbdNewsList + sinaNewsList + stcnNewsList
    newList = sorted(dataList, key = itemgetter('Date'), reverse = True)
    return newList
# 创建停用词list
def stopwordslist(filepath):
    stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
    return stopwords
# 进行分词去停用词
def newsParticiple():
    list = mergeNews()
    newList = []
    stopwords = stopwordslist('Chinese_Stop_Words.txt')  # 这里加载停用词的路径
    for item in list:
        sentence = ((item.get('Date')[0:10]).replace('-','') + ',' + (item.get('Article')))
        # sentence = (item.get('Article'))
        sentence_seged = jieba.cut_for_search(sentence.strip())
        outstr = []
        for word in sentence_seged:
            if word not in stopwords:
                if word != '\t' and word != ' ':
                    outstr.append(word)
        # try:
        #     datetime.datetime.strptime(outstr[0], '%Y%m%d').date()
        # except BaseException:
        #     continue
        newList.append(outstr)
    return newList

# 获取当天的新闻数据
def newsTodayParticiple(allList):
    todayList = []
    # mis = datetime.timedelta(days=1)
    # nowTime = (str(datetime.datetime.today().date()-mis)).replace('-','')
    for item in allList:
        try:
            datetime.datetime.strptime(item[0], '%Y%m%d').date()
        except BaseException:
            print("异常=========")
            continue
        if item[0] == allList[0][0]:
            todayList.append(item)
    return todayList

def dataNormalization(argsList):
    # 将分词去停用词后的列表转换成向量再归一化处理
    list = []
    for item in argsList:
        items = item[1:-1]
        newsText = " ".join(items)
        list.append(newsText)

    cv = CountVectorizer()
    cv_fit = cv.fit_transform(list)

    transformer = TfidfTransformer()
    tfidf = transformer.fit_transform(cv_fit)
    trainList = tfidf.toarray()
    return trainList
# return trainList
# for item in dataNormalization():
#     print(item)
# for doc in corpus_lsi: # both bow->tfidf and tfidf->lsi 转化实际上在这里才开始执行
#      print(doc)
# ====================================================================================================
#length = max(dictionary) + 1 #列表的最大长度
# vector = []
# for content in corpus_lsi:
#     sentense_vectors = np.zeros(length)
#     for co in content:
#         sentense_vectors[co[0]]=co[1]
#     vector.append(sentense_vectors)
# ======================================================================================================
'''
①Lsi保存和加载训练模型
有时候训练一个模型要很久的时间，为了防止以后使用需要重新训练，可以使用scikit-learn的joblib来把模型保存到本地。之后需要用的时候，在加载就行了
lsi.save('/tmp/model.lsi') # same for tfidf, lda, ...
lsi = models.LsiModel.load('/tmp/model.lsi')

②
import sklearn.externals.joblib as joblib
# 这里拿sklearn.svm作为示例。
from sklearn.svm import SVC
svc = SVC()
# X_train, y_train为测试数据
svc.fit(X_train, y_train)
# 训练完成之后，使用joblib进行保存，svc是你的模型的名字，foo.m是文件的名字
joblib.dump(svc,'foo.m')

# 当需要使用的时候，从foo.m中加载即可。
svc = joblib.load('foo.m')
'''
# ======================================================================================================
def predictStock():
    nowTime = str(datetime.datetime.today().date())
    codeAndNameList = ctn.queryAllStockCodeAndName()
    for item in codeAndNameList:
        # 获取训练数据与标签
        name = item.get("name")
        code = item.get('code')
        oneList2 = ctn.queryOneStockHistAllDateData(code)[1:-1]
        if len(oneList2) <= 0:
            continue
        oneList1 = ctn.queryOneStockHistAllDateData(code)[0:len(oneList2)]
        X = []
        y = []
        test_X1 = []
        test_X = []
        test_X1.append(oneList2[-1].get('volume '))
        test_X.append(test_X1)
        test_XT = np.array(test_X)
        for i,j in zip(oneList1,oneList2):
            X1 = []
            X1.append(i.get('volume '))
            X.append(X1)
            y.append(j.get('close'))
        train_X = np.array(X)
        train_y = np.array(y)
        # 训练模型
        clf = SVR()
        try:
            clf.fit(train_X, train_y)
        except BaseException:
            continue
        # 预判结果
        try:
            result = clf.predict(test_XT)
        except:
            continue

        #获取collection连接并保存结果数据
        collection = getCollection('Stock_Result','stock_predict_datas')
        # collection.drop()
        if len(result) > 0:
            for rt in result:
                collection.insert({'getNewsDate':oneList2[-1].get('date'),'date':nowTime,'code':code,'name':name,'result':rt})
predictStock()




