import pandas as pd
import warnings
import lightgbm as lgb
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold
import numpy as np

warnings.filterwarnings('ignore')
text_to_value_mapping = {'一加': 2,
                         '华为': 7,
                         'OPPO': 0,
                         'vivo': 1,
                         '小米': 8,
                         '荣耀': 9,
                         '三星': 3,
                         '魅族': 10,
                         '其他': 6,
                         '中兴': 4,
                         '中国移动': 5}


def pre_data(data):
    data['近三年换终端次数'] = data['近三年换终端次数'].fillna(0.0)
    data['近三月限速次数'] = data['近三月限速次数'].fillna(0.0)
    data['上月限速次数'] = data['上月限速次数'].fillna(0.0)
    data['性别'] = data['性别'].replace([-1, 9], 1)

    # 当前终端价格处理
    data['当前终端品牌'] = data['当前终端品牌'].fillna(data['上次终端品牌'])
    data.loc[pd.isnull(data['当前终端价格']) & pd.isnull(data['当前终端品牌']), '当前终端价格'] = -1
    brand_mean_prices = data.groupby('当前终端品牌')['当前终端价格'].mean()
    data['当前终端价格'] = data.apply(
        lambda row: brand_mean_prices[row['当前终端品牌']] if pd.isnull(row['当前终端价格']) else row['当前终端价格'],
        axis=1)

    # 上次终端价格处理
    data['上次终端品牌'] = data['上次终端品牌'].fillna(data['上上次终端品牌'])
    data.loc[pd.isnull(data['上次终端价格']) & pd.isnull(data['上次终端品牌']), '上次终端价格'] = -1
    brand_mean_prices = data.groupby('上次终端品牌')['上次终端价格'].mean()
    data['上次终端价格'] = data.apply(
        lambda row: brand_mean_prices[row['上次终端品牌']] if pd.isnull(row['上次终端价格']) else row['上次终端价格'],
        axis=1)

    # 上上次终端价格处理
    data.loc[pd.isnull(data['上上次终端价格']) & pd.isnull(data['上上次终端品牌']), '上上次终端价格'] = -1
    brand_mean_prices = data.groupby('上上次终端品牌')['上上次终端价格'].mean()
    data['上上次终端价格'] = data.apply(
        lambda row: brand_mean_prices[row['上上次终端品牌']] if pd.isnull(row['上上次终端价格']) else row['上上次终端价格'],
        axis=1)

    data = data.fillna(-1)

    selected_features_brand = data.filter(regex='品牌').columns
    # 分别对每一列进行映射转换
    for column in selected_features_brand:
        data[column] = data[column].map(text_to_value_mapping)
    data = data.fillna(-1)
    # # 多项式特征构造
    # file_path = 'polynomialFeatures_rules.txt'  # 文件路径
    #
    # # # 使用with语句打开文件，确保文件在使用后自动关闭
    # with open(file_path, 'r', encoding='utf-8') as file:
    #     content = file.read()
    #
    # lines = content.split("\n")
    #
    # del_col = []
    # for line in lines:
    #     if line.__contains__('^2'):
    #         line = line.replace('^2', '')
    #         new_name = line + '^2'
    #         data[new_name] = data[line].apply(lambda x: x ** 2)
    #     elif line.__contains__(' '):
    #         feature_name_list = line.split(' ')
    #         new_name = feature_name_list[0] + ' ' + feature_name_list[1]
    #         data[new_name] = data[feature_name_list[0]] * data[feature_name_list[1]]
    return data.drop(['标签月', '客户类型', '用户标识'], axis=1)


def Accuracy_score(preds, dtrain):
    labels = dtrain.get_label()
    preds = preds.argmax(axis=1)  # 对预测结果进行argmax操作，得到类别标签
    accuracy = accuracy_score(labels, preds)
    return 'accuracy', accuracy, True


def lgb_model(xtrain, ytrain, test, k, seed):
    feats = [colum for colum in xtrain.columns]
    # cat_features = ['用户状态', '城县农标志', '是否4G客户', '性别', '是否全球通客户', '上月是否集团统付', '上次终端是否双卡槽', '上上次终端是否双卡槽', '是否订购5G套餐',
    #                 '用户常驻地是否在5G基站3公里范围',
    #                 '是否资费敏感客户']
    # categorical_features = [feats.index(i) for i in cat_features]
    folds = StratifiedKFold(n_splits=k, shuffle=True, random_state=seed)
    offline_score = []
    output_preds = []
    for i, (train_index, test_index) in enumerate(folds.split(xtrain, ytrain)):
        train_y, valid_y = ytrain[train_index], ytrain[test_index]
        train_X, valid_X = xtrain[feats].iloc[train_index, :], xtrain[feats].iloc[test_index, :]
        print(len(valid_X))
        print(len(valid_y))

        dtrain = lgb.Dataset(train_X, label=train_y)

        dvalid = lgb.Dataset(valid_X, label=valid_y)

        parameters = {'metric': 'multi_logloss',
                      'objective': 'multiclass',
                      'num_class': 11,  # 类别数量
                      'learning_rate': 0.05,
                      'boosting_type': 'gbdt',
                      'max_depth': -1,
                      'num_leaves': 128,
                      'feature_fraction': 0.8,
                      'bagging_fraction': 0.8,
                      'min_data_in_leaf': 200,
                      'is_unbalance': True,
                      # 'categorical_feature': categorical_features,
                      'verbose': 3,
                      'early_stopping_rounds': 200,
                      # 'verbose_eval':500
                      }
        lgb_model = lgb.train(params=parameters,
                              train_set=dtrain,
                              num_boost_round=10000,
                              valid_sets=[dtrain, dvalid],
                              feval=Accuracy_score
                              )
        offline_score.append(lgb_model.best_score['valid_1']['accuracy'])
        pres = lgb_model.predict(test[feats], num_iteration=lgb_model.best_iteration)
        output_preds.append(pres)

    print('OOF-MEAN-Accuracy score:%.6f, OOF-STD:%.6f' % (np.mean(offline_score), np.std(offline_score)))
    array = np.array(output_preds)
    output_preds = list(np.mean(array[:2], axis=0))
    return output_preds


if __name__ == '__main__':
    data = pd.read_csv('testa/toUser_train_data.csv')
    data = pre_data(data)
    data_a = pd.read_csv('testa/test_A.csv')
    test_a = pre_data(data_a)

    ytrain = data['新5G终端品牌']
    xtrain = data.drop(['新5G终端品牌'], axis=1)
    output_preds1 = lgb_model(xtrain, ytrain, test_a, 5, 0)
    print('output_preds1_len', len(output_preds1))
    output_preds2 = lgb_model(xtrain, ytrain, test_a, 5, 1111111)
    print('output_preds1_len', len(output_preds1))
    output_preds3 = lgb_model(xtrain, ytrain, test_a, 5, 2021)
    fused_prob = [[x + y + z for x, y, z in zip(row_a, row_b, row_c)] for row_a, row_b, row_c in
                  zip(output_preds1, output_preds2, output_preds3)]

    preds = np.argmax(fused_prob, axis=1)

    data_a['新5G终端品牌'] = preds
    res_data = data_a[['用户标识', '新5G终端品牌']]
    reversed_dict = {value: key for key, value in text_to_value_mapping.items()}
    res_data['新5G终端品牌'] = res_data['新5G终端品牌'].map(reversed_dict)
    res_data.to_csv('submit_A_lgb_new.csv', index=None)
