import json
import logging

import pandas as pd
import pymysql
import torch
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.float_format', '{:,}'.format)
pd.set_option('expand_frame_repr', False)

logging.basicConfig(level=logging.INFO, filename="stock_train_dataset.log", filemode="w",
                    format='%(asctime)s | %(filename)s - {%(lineno)d} - [%(levelname)s] - %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S', encoding='utf-8')

mysql_host = "127.0.0.1"
mysql_port = 3306


def get_connect():
    connect = pymysql.connect(
        host=mysql_host,
        port=mysql_port, user='root', passwd='hxsoft.net',
        database='stock')
    cursor = connect.cursor()
    return connect, cursor


def get_result_data():
    connect, cursor = get_connect()
    cursor.execute("select * from stock_result where stock_high != ''")
    rows = cursor.fetchall()
    if rows is not None:
        seq_list = []
        value_list = []
        for row in rows:
            json_list = json.loads(row[6])
            item_list = []
            for index in range(len(json_list)):
                item = json_list[index]
                item_list.append(item['open'])
                item_list.append(item['high'])
                item_list.append(item['low'])
                item_list.append(item['close'])
                item_list.append(item['zl1'])
                item_list.append(item['zl2'])
            seq_list.append(item_list)
            value_list.append(row[5])
        sep_data = torch.tensor(seq_list, dtype=torch.float32)
        value_data = torch.tensor(value_list, dtype=torch.float32).reshape(-1, 1)
        return split_data(sep_data, value_data)
    else:
        return None


def split_data(seq_data, value_data):
    """
    split data
    :param seq_data:     时序数据
    :param value_data:   值数据
    :return:
    """
    scaler1 = StandardScaler()
    scaler1.fit(seq_data)
    seq_data = scaler1.fit_transform(seq_data)

    scaler2 = StandardScaler()
    scaler2.fit(value_data)
    value_data = scaler2.transform(value_data)

    train_x, tmp_x, train_y, tmp_y = train_test_split(seq_data, value_data, train_size=0.8, random_state=0,
                                                      shuffle=True)
    val_x, test_x, val_y, test_y = train_test_split(tmp_x, tmp_y, train_size=0.5, random_state=0, shuffle=True)

    # scale_x = StandardScaler()
    # train_x = scale_x.fit_transform(train_x.reshape(-1, 6)).reshape(-1, 5, 6)
    # val_x = scale_x.fit_transform(val_x.reshape(-1, 6)).reshape(-1, 5, 6)
    # test_x = scale_x.fit_transform(test_x.reshape(-1, 6)).reshape(-1, 5, 6)
    # #
    # scale_y = StandardScaler()
    # train_y = scale_y.fit_transform(train_y)
    # val_y = scale_y.fit_transform(val_y)
    # test_y = scale_y.fit_transform(test_y)

    return torch.tensor(train_x, dtype=torch.float32), torch.tensor(train_y, dtype=torch.float32), torch.tensor(val_x,
                                                                                                                dtype=torch.float32), torch.tensor(
        val_y, dtype=torch.float32), torch.tensor(test_x, dtype=torch.float32), torch.tensor(test_y,
                                                                                             dtype=torch.float32), scaler2
