import sys, os
import pandas as pd
# import modin.pandas as pd
import json
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.datasets import load_svmlight_file
import pickle

# deepctr_torch 已修改
from deepctr_torch.models import DeepFM, PNN, WDL, DCN, NFM, xDeepFM
from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names

import torch

def libsvm_to_df(data_path, header):
    """读取libsvm格式数据为pd.
    """
    X_train, y_train = load_svmlight_file(data_path)
    mat = X_train.todense()
    df1 = pd.DataFrame(mat)
    df1.columns = header
    df2 = pd.DataFrame(y_train)
    df2.columns = ['target']
    df = pd.concat([df2, df1], axis=1) # 第一列为target
    return df

def run(mode="train", model_name="DeepFM", model_path="deepfm_hidden-512_256_drop-0.2.pt", train_postfix="", dev_postfix="",
        version="ver1_", save_folder="./save/"):

    assert model_name in ["DeepFM", "PNN", "WDL", "DCN", "NFM", "xDeepFM"]
    if not os.path.exists(save_folder): os.mkdir(save_folder)
    # 读取各列名称
    with open("data/train_dev_test/keys.json", "r", encoding="utf-8") as f1:
        header = json.load(f1)["_feature"]
        header = [k for k,v in header.items() if v>=0]

    if mode == "train":
        df1 = libsvm_to_df(data_path = f"data/train_dev_test/train{train_postfix}.txt", header=header)
        df2 = libsvm_to_df(data_path = f"data/train_dev_test/dev{dev_postfix}.txt", header=header)
        # df = pd.concat([df2, df1])
        # del df1, df2
    else:
        df = libsvm_to_df(data_path = f"data/train_dev_test/test{dev_postfix}.txt", header=header)

    # 离散型
    realtimeFeature_ = [name for name in header if name.startswith("realtimeFeature_")]
    rcin_summaryPredictResults_ = [name for name in header if name.startswith("rcin_summaryPredictResults_")]
    rcin_intentTypeList = [name for name in header if name.startswith("rcin_intentTypeList:")]

    sparse_feat = ["oi_virtualPaymentType", "oi_codTimeType"]

    sparse_feat_from_excel = [
        "third_id",
        "orderid",
        "realtimeFeature",
        "entry",
        "order_invoice_type",
        "stock",
        "order_type",
        "order_state",
        "oin_user_profession",
        "oin_robot_ask_average_session_duration",
        "oin_super_long_message_scale",
        "oin_long_message_scale",
        "oin_sensitive_words_scale",
        "oin_user_sex",
        "oin_mail_satisfied_scale",
        "oin_person_robot_ask_times_scale",
        "oin_user_cycl_lifecycle",
        "oin_user_score",
        "oin_average_message_count",
        "oin_user_stage",
        "oin_mail_unsatisfied_scale",
        "oin_mail_resolution_scale",
        "oin_message_send_interval",
        "oin_person_robot_ask_average_session_duration",
        "oin_user_anger_scale",
        "oin_user_education",
        "oin_phone_again_after_robot_ask_24h_intervel",
        "oin_user_age",
        "oin_invalid_ask_average_intervel",
        "oin_robot_ask_times_scale",
        "oin_phone_again_after_robot_ask_24h_scale",
        "oin_user_anxiety_scale",
        "oin_short_message_scale",
        "oin_middle_message_scale",
        "oin_user_level",
        "oin_robot_history_invalid_ask_scale",
        "rcin_longMsgProportion",
        "rcin_summaryPredictResults",
        "rcin_chatDuration",
        "rcin_entry",
        "rcin_createTime",
        "rcin_midMsgProportion",
        "rcin_shortMsgProportion",
        "rcin_superLongMsgProportion",
        "rcin_msgCount",
        "oi_orderType",
        "oi_idShipmentType",
        "oi_discount",
        "oi_orderBulk",
        "oi_paymentWay",
        "oi_idCompanyBranch",
        "oi_isNewOrder",
        "oi_moneyBalance",
        "oi_idInvoiceHeaderType",
        "oi_status",
        "oi_promotionPrice",
        "oi_idInvoicePutType",
        "oi_orderBankFactPrice",
        "oi_userLevel",
        "oi_bigItemCodTime",
        "oi_pricePeriod",
        "oi_status2",
        "oi_parentId",
        "oi_splitType",
        "oi_isRegister",
        "oi_idInvoiceContentsType",
        "oi_isPutBookInvoice",
        "oi_idArea",
        "oi_isUseBalance",
        "oi_idTown",
        "oi_ver",
        "oi_unionId",
        "oi_rePrice",
        "oi_couponDiscount",
        "oi_idProvince",
        "oi_scareBuyState",
        "oi_idInvoiceType",
        "oi_initFactPrice",
        "oi_jingDouDiscount",
        "oi_isJdShip",
        "oi_cODTime",
        "oi_idPickSite",
        "oi_isCodInform",
        "oi_cost",
        "oi_idDelivery",
        "oi_idCity",
        "oi_totalFee",
        "oi_idInvoiceContentTypeBook",
        "oi_idPaymentType",
        "oi_payMoney",
        "oi_jingDouRate",
        "oi_zzweight",
        "oi_jingDouCount",
        "oi_sxzzweight",
        "oi_clientSystemName",
        "oi_clientBrowserName",
        "ci_caseType",
        "ci_caseState",
        "ci_owner",
        "ci_outLineId",
        "ci_source",
        "ci_callBackFlag",
        "ci_curTransferId",
        "ci_orderType",
        "ci_orderSourceFlag",
        "ci_smartFollowFlag",
        "ci_cusLevel",
        "ci_curUpgradeCareId",
        "ci_curUpgradeLv2Id",
        "ei_sku",
        "ei_order",
        "ei_pin"]

    sparse_features = realtimeFeature_ + rcin_summaryPredictResults_ + sparse_feat + \
                        sparse_feat_from_excel + rcin_intentTypeList

    sparse_features = [item for item in sparse_features if item in header]

    if mode == "train":
        # 直接读取全量数据的处理结果
        if os.path.exists(f"ver1_all_LabelEncoders.pkl"):
            with open(f"ver1_all_LabelEncoders.pkl", "rb") as f1:
                feat_lbe = pickle.load(f1)
        else:
            feat_lbe = {}
            for feat_name in tqdm(sparse_features, desc = "Train LabelEncoder fitting"):
                lbe = LabelEncoder()
                lbe = lbe.fit(pd.concat([df1[feat_name], df2[feat_name]], axis=0))
                
                df1[feat_name] = lbe.transform(df1[feat_name])
                df2[feat_name] = lbe.transform(df2[feat_name])
                
                feat_lbe[feat_name] = lbe
            with open(f"{version}LabelEncoders.pkl", "wb") as f1:
                pickle.dump(feat_lbe, f1)
    else:
        with open(f"{version}LabelEncoders.pkl", "rb") as f1:
            feat_lbe = pickle.load(f1)
        for feat_name in tqdm(sparse_features, desc = "Test LabelEncoder transforming"):
            # 为了解决 unseen index
            lbe_dict = dict(zip(feat_lbe[feat_name].classes_, feat_lbe[feat_name].transform(feat_lbe[feat_name].classes_)))
            df[feat_name] = df[feat_name].apply(lambda x: lbe_dict.get(x, 0))

    # 数值型
    dense_features = [name for name in header if name not in sparse_features]

    mms = MinMaxScaler(feature_range=(0, 1))

    vocabulary_size_map = {feat_name:len(feat_lbe[feat_name].classes_) for feat_name in sparse_features}
    if mode == "train":
        df1[dense_features] = mms.fit_transform(df1[dense_features])
        df2[dense_features] = mms.fit_transform(df2[dense_features])
    else:
        df[dense_features] = mms.fit_transform(df[dense_features])

    fixlen_feature_columns = [SparseFeat(feat_name, vocabulary_size=vocabulary_size_map[feat_name], embedding_dim=4)
                            for i, feat_name in enumerate(sparse_features)] + [DenseFeat(feat_name, 1,)
                            for feat_name in dense_features]
    dnn_feature_columns = fixlen_feature_columns
    linear_feature_columns = fixlen_feature_columns

    feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)

    # 3. generate input data for model
    if mode == "train":
        # train, dev = train_test_split(df, test_size=0.2, random_state=123)
        train, dev = df1, df2
        print(f"df1.shape {df1.shape}; df2.shape {df2.shape}")
        train_model_input = {name:train[name] for name in feature_names}
        dev_model_input = {name:dev[name] for name in feature_names}
    else:
        test_model_input = {name:df[name] for name in feature_names}

    target = ['target']

    # 4. Define Model, train, predict and evaluate
    DEVICE = "cuda" if torch.cuda.is_available() else 'cpu'
    # DEVICE = "cpu"
    model_path = save_folder + version + model_path
    if model_name == "DeepFM":
        model = DeepFM(linear_feature_columns, dnn_feature_columns,
                        dnn_hidden_units=(512, 256), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation="prelu",
                        task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)

    elif model_name == "PNN":
        model = PNN(dnn_feature_columns, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-5, l2_reg_dnn=0,
                    init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='prelu', use_inner=True, use_outter=False,
                    kernel_type='mat', task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)

    elif model_name == "WDL":
        model = WDL(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 128),
                 l2_reg_linear=1e-5,
                 l2_reg_embedding=1e-5, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0.3, dnn_activation='prelu',
                 dnn_use_bn=True,
                 task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)

    elif model_name == "DCN":
        model = DCN(linear_feature_columns,
                 dnn_feature_columns, cross_num=2,
                 dnn_hidden_units=(128, 128), l2_reg_linear=0.00001,
                 l2_reg_embedding=0.00001, l2_reg_cross=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024,
                 dnn_dropout=0.3,
                 dnn_activation='prelu', dnn_use_bn=True,
                 task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)

    elif model_name == "NFM":
        model = NFM(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(128, 128),
                 l2_reg_embedding=1e-5, l2_reg_linear=1e-5, l2_reg_dnn=0, init_std=0.0001, seed=1024, bi_dropout=0.3,
                 dnn_dropout=0.3, dnn_activation='prelu',
                 task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)

    elif model_name == "xDeepFM":
        model = xDeepFM(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 256),
                 cin_layer_size=(256, 128,), cin_split_half=True, cin_activation='prelu', l2_reg_linear=0.00001,
                 l2_reg_embedding=0.00001, l2_reg_dnn=0, l2_reg_cin=0, init_std=0.0001, seed=1024, dnn_dropout=0.3,
                 dnn_activation='prelu', dnn_use_bn=True,
                 task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)


    model.compile("adam", "focal_loss", metrics=['acc_top1', 'acc_top3'])
    
    if mode == "train":
        label_weight=None
        
        top10 = [5, 4, 1, 8, 22, 20, 29, 33, 15, 25]
        label_weight_norm_manul = [0] + [0.2/10 if i in top10 else 0.8/(121-10) for i in range(1,122)]
        label_weight = torch.tensor(label_weight_norm_manul).to(DEVICE)
       
        model.fit(x=train_model_input, y=train[target].values,
                validation_data = (dev_model_input, dev[target].values),
                batch_size=1024, epochs=20, verbose=1, label_weight=label_weight)
    else:
        model.load_state_dict((torch.load(model_path, map_location=torch.device(DEVICE))), strict=False)
        eval_result = model.evaluate(test_model_input, df[target].values, batch_size=2048)
        for k,v in eval_result.items():
            print(f"{k}: {v:.4f}")
    return None

if __name__ == "__main__":
    # run(mode="train", model_name="DeepFM", model_path="DeepFM_drop-0.3.pt", train_postfix="_1000", dev_postfix="_1000", version="ver1_")
    # run(mode="train", model_name="PNN", model_path="PNN_drop-0.3.pt", train_postfix="_1000", dev_postfix="_1000", version="ver1_")
    run(mode="train", model_name="WDL", model_path="WDL_drop-0.3.pt", train_postfix="_1000", dev_postfix="_1000", version="ver1_")
    # run(mode="train", model_name="DCN", model_path="DCN_drop-0.3.pt", train_postfix="_1000", dev_postfix="_1000", version="ver1_")
    # run(mode="train", model_name="NFM", model_path="NFM_drop-0.3.pt", train_postfix="_1000", dev_postfix="_1000", version="ver1_")
    # run(mode="train", model_name="xDeepFM", model_path="xDeepFM_drop-0.3.pt", train_postfix="_1000", dev_postfix="_1000", version="ver1_")
    
    # print("dev")
    # run(mode="test", model_name="DeepFM", model_path="deepfm_hidden-512-256_drop-0.3.pt", train_postfix=None, dev_postfix="_5000_in_dev", version="ver2_")
    
    # print("test:")
    # run(mode="test", model_name="DeepFM", model_path="deepfm_hidden-512-256_drop-0.3.pt", train_postfix=None, dev_postfix="", version="ver1_all_")
    # run(mode="test", model_name="DeepFM", model_path="deepfm_hidden-512-256_drop-0.3.pt", train_postfix="", dev_postfix="", version="ver1_")

    