from config import conf
import os
from os.path import join
import pandas as pd

SOURCE_DATA_DIR = conf.get('dir', 'source_data_dir')

ONLY_SEMIFINAL=False
def load_basic_dataset(split ):
    """
    加载最原始的数据，返回pandas dataframe
    :param split: train/test
    :return:
    """
    assert split in ['train', 'test']
    file_name = 'Test_Data.csv' if split == 'test' else 'Train_Data.csv'
    path = join(SOURCE_DATA_DIR, file_name)
    if ONLY_SEMIFINAL:
        if split == 'test':
            path = r'/home/njuciairs/wangshuai/data/金融实体负面信息分析复赛数据/round2_test.csv'
        else:
            path = r'/home/njuciairs/wangshuai/data/金融实体负面信息分析复赛数据/Round2_train1.csv'
        df = pd.read_csv(path)
    else:
        if split == 'test':
            path = r'/home/njuciairs/wangshuai/data/金融实体负面信息分析复赛数据/round2_test.csv'
            df = pd.read_csv(path)
        else:
            path1 = r'/home/njuciairs/wangshuai/data/Train_Data.csv'
            df1 = pd.read_csv(path1)
            path2 = r'/home/njuciairs/wangshuai/data/金融实体负面信息分析复赛数据/Round2_train1.csv'
            df2 = pd.read_csv(path2)
            df = pd.concat([df1, df2], ignore_index=True)
    print("basic dataset len:",len(df))
    return df


def load_train_val_dataset(split_ratio):
    """
    加载划分好的训练集和验证集，若文件不存在，则先划分好再读取文件
    :param split_ratio:
    :return:训练集和验证集
    """
    train_path = join(SOURCE_DATA_DIR, 'tmp', 'train_dataset_round2 %.2f' % (split_ratio))
    val_path = join(SOURCE_DATA_DIR, 'tmp', 'val_dataset_round2 %.2f' % (1 - split_ratio))
    if not os.path.exists(train_path) or not os.path.exists(val_path):
        print('split the dataset')
        if not os.path.exists(join(SOURCE_DATA_DIR, 'tmp')):
            os.mkdir(join(SOURCE_DATA_DIR, 'tmp'))
        df = load_basic_dataset(split='train')
        df = df.sample(frac=1.0)
        df = df.reset_index()
        split_index = len(df) * split_ratio
        train_dataset = df.loc[0:split_index]
        val_dataset = df.loc[split_index:]
        train_dataset.to_csv(train_path, index=False)
        val_dataset.to_csv(val_path, index=False)
    train_dataset = pd.read_csv(train_path)
    val_dataset = pd.read_csv(val_path)
    return train_dataset, val_dataset


def load_train_val_dataset_cross(test_number, cross_number, tmpdir='tmp_round2_full'):  # tmpdir='tmp'
    """
    加载划分好的训练集和验证集，若文件不存在，则先划分好再读取文件
    :param split_ratio:
    :return:训练集和验证集
    """
    cross_dir = join(SOURCE_DATA_DIR, tmpdir, 'cross_%s' % (cross_number))
    if not os.path.exists(cross_dir):
        print('split the dataset')
        if not os.path.exists(join(SOURCE_DATA_DIR, tmpdir)):
            os.mkdir(join(SOURCE_DATA_DIR, tmpdir))
        os.mkdir(cross_dir)
        df = load_basic_dataset(split='train')
        df = df.sample(frac=1.0)
        df = df.reset_index()
        batch_size = int(len(df) / cross_number)
        for i in range(cross_number):
            start = batch_size * i
            end = batch_size * (i + 1)
            if i == cross_number - 1:
                end = batch_size * cross_number + batch_size
            split = df.loc[start:end]
            split.to_csv(join(cross_dir, '%d' % (i + 1)), index=False)
            print('saved split %d' % (i + 1), join(cross_dir, '%d' % (i + 1)))
    test_df = pd.read_csv(join(cross_dir, '%d' % (test_number)))
    train_dfs = []
    for i in range(1, cross_number + 1):
        p = join(cross_dir, '%d' % (i))
        if i != test_number:
            train_dfs.append(pd.read_csv(p))
    train_df = pd.concat(train_dfs)
    return test_df, train_df
