# -*- coding: utf-8 -*-
"""
Created on Tue May 11 14:43:59 2021

@author: 野生期货分析师
"""

# 每日更新数据
import sys
import schedule
import os, json, requests
import datetime
from pandas import read_pickle, concat
from tqdm import tqdm
from pathlib import Path
from tjdutils.utils import save_pickle, check_path, TjdDate
from hf_monitor import Monitor
from corrector import Corrector
from ftplib import FTP
from pandas import to_datetime
import pandas as pd


sys.path.append("../")


def send_msg(text_to_send, name_to_at):
    wx_url = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=765f9e09-edbe-4e54-82c3-8ad081519564"
    """艾特全部，并发送指定信息"""
    ater = "@" + name_to_at
    data = json.dumps({"msgtype": "text", "text": {"content": text_to_send, "mentioned_list": [ater]}})
    print(wx_url)
    r = requests.post(wx_url, data, auth=('Content-Type', 'application/json'))
    print(r.json)


def get_all_collector():
    ready = False
    dir_path = "..\..\output\collector"
    dir_path = check_path(dir_path)
    all_paths = os.listdir(dir_path)
    c_all_path = os.path.join(dir_path, 'all_collector.pkl')
    for path in all_paths:
        if 'all_collector' in path:
            print('yes')
            path_data_dict = read_pickle(c_all_path)
            ready = True
    if not ready:
        path_data_dict = {}
        for path in all_paths:
            if '.pkl' in path:
                s_path = os.path.join(dir_path, path)
                data = read_pickle(s_path)
                y_name = data['y_name']
                print(y_name, '>', s_path)
                path_data_dict[y_name] = s_path
        save_pickle(path_data_dict, c_all_path, long_str=True)
    return path_data_dict


def monitor_every_targets(all_collector, x, y):
    all_daily_targets = {}
    all_daily_corrector = {}
    all_y_names = list(all_collector.keys())
    for k in tqdm(all_y_names):
        v = all_collector[k]
        main_collector = read_pickle(check_path(v))
        # main_collector是一个字典包含"cycle_key","df","dict","forecast"
        # "cycle_key"是所用到的参数,"df"是评价dataframe,
        # "dict"储存的是具体的位置
        best_key_value = main_collector['df']
        # 对key排序
        sorted_best_value = best_key_value.sort_values(by='triangle_area', ascending=True)
        best_key = sorted_best_value.index[0]  # 获取最好的索引

        all_summary_path = main_collector['dict']
        y_name = main_collector['y_name']
        # 把字典的key变成字符串格式,否则不好索引
        new_summary_path = {}
        for kk, vv in all_summary_path.items():
            new_summary_path[str(kk)] = vv
        # 字典value里面是个元组，元组的第二个元素是地址
        address = new_summary_path[str(best_key)][1]
        # 得到按照时间滚动的dateframe，重要的评价表
        summary_df = read_pickle(check_path(address))
        # 日度还原的类的实现
        mit = Monitor(summary_df, x, y, y_name)
        daily_y, _not_use = mit.get_md_and_tensor()
        all_daily_targets[y_name] = daily_y
        # 日度修正的类的实现
        ctor = Corrector(y, daily_y, y_name, 'sigma3')
        y_hf_se = ctor.go_correct()
        print(y_hf_se.shape)
        connected_daily_y = ctor.go_connect()
        all_daily_corrector[y_name] = connected_daily_y
    return all_daily_targets, all_daily_corrector


def save_and_upload_to_sql(all_daily_targets, all_daily_corrector):
    path = check_path("../output/monitor/")
    today = TjdDate().dt['td']
    today_dir = os.path.join(path, today)
    td_dir_path = Path(today_dir)
    if td_dir_path.exists():
        print("已存在")
    else:
        os.makedirs(today_dir)
    y_name_list = list(all_daily_corrector.keys())
    y_path_list = []
    for y_name in y_name_list:
        y_path = os.path.join(today_dir, y_name + ".xlsx")
        y_daily = all_daily_targets[y_name]
        y_corre = all_daily_corrector[y_name]
        dt_to_upload = concat([y_corre, y_daily], axis=1)
        dt_to_upload.index = to_datetime(dt_to_upload.index)
        dt_to_upload.to_excel(y_path)
        y_path_list.append(y_path)


def update_main():
    pass


def download_latest_pkl_from_ftp(dir_path, filename):
    ftp = FTP()  # 设置变量
    ftp.set_debuglevel(2)  # 打开调试级别2，显示详细信息
    ftp.connect("192.168.1.202")  # 连接的ftp sever和端口
    ftp.login()  # 连接的用户名，密码
    ftp.encoding = "GB2312"
    ftp.cwd("基础数据/吴卫专用/")  # 进入远程目录
    bufsize = 1024  # 设置的缓冲区大小
    # 需要下载的文件
    file_handle = open(filename, "wb").write  # 以写模式在本地打开文件
    # 接收服务器上文件并写入本地文件
    name_in_dir = filename.split("/")[-1]
    text = "RETR %s" % name_in_dir
    ftp.retrbinary(text, file_handle, bufsize)
    ftp.set_debuglevel(0)  # 关闭调试模式
    ftp.quit()  # 退出ftp
    print(filename, "download sucess")


def job():
    # td = str(datetime.date.today())
    td = "2021-11-08"
    download_file = "../input/feature_for_ppi_%s.pkl" % td
    print(download_file)
    print('loading...', download_file)
    local_filename = check_path(download_file)
    ftp_dir_path = "基础数据/吴卫专用/"  # 选择一个需要更新数据的文件夹
    download_latest_pkl_from_ftp(ftp_dir_path, local_filename)
    # 获取数据源
    x = read_pickle(local_filename)
    y = pd.read_excel('../../input/y集合.xlsx', index_col=0)
    # y = up.select_all_table() #
    # 获取所有需要被日度还原的y
    all_collector = get_all_collector()
    # 得到未修正的和修正后的数据
    all_daily_targets, all_daily_corrector = monitor_every_targets(all_collector, x, y)
    return all_daily_targets, all_daily_corrector


if __name__ == "__main__":
    all_daily_targets, all_daily_corrector = job()
    df = pd.DataFrame(all_daily_corrector)
    df.to_excel('df_上市公司申万.xlsx')