# import ctypes

# ctypes.cdll.LoadLibrary('C:/WINDOWS/System32/vcomp140.dll')

# ctypes.cdll.LoadLibrary('C:/WINDOWS/System32/xgboost.dll')
import xgboost as xgb
# from xgboost import Booster as bst
from turtle import forward
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.decomposition import KernelPCA
from sklearn.decomposition import MiniBatchDictionaryLearning 
from sklearn.decomposition import IncrementalPCA
# from lce import LCEClassifier
import m2cgen as m2c
import glob
import os
import matplotlib.pyplot as plt
import time

import sys  # 导入sys模块
sys.setrecursionlimit(100000) # 拉高递归限制，以便增加训练迭代次数
st = time.time()
searcher = 0
        
if __name__ == "__main__":

    training_inputs = np.loadtxt(open("featurePH.csv","rb"),delimiter=",") 
    print(training_inputs)
    training_outputs = np.loadtxt(open("labelPH.csv","rb"),delimiter=",") 
    print(training_outputs)
    print("Reading PH CSV")
    testing_inputs = np.loadtxt(open("feature_testPH.csv","rb"),delimiter=",") 
    testing_outputs = np.loadtxt(open("label_testPH.csv","rb"),delimiter=",") 
    print("Reading PH CSV")

    xgb_train=xgb.DMatrix(training_inputs,label=training_outputs)
    xgb_test=xgb.DMatrix(testing_inputs,label=testing_outputs)


    if(searcher==0):
        # 设置模型超参数
        # params = {
        #     'objective':'multi:softmax',
        #     # 'num_class':2,
        #     'n_estimators': 30,
        #     'gamma': 0,                  # 用于控制是否后剪枝的参数,越大越保守，一般0.1、0.2这样子。
        #     'max_depth': 2,               # 构建树的深度，越大越容易过拟合
        #     'lambda': 0,                   # 控制模型复杂度的权重值的L2正则化项参数，参数越大，模型越不容易过拟合。
        #     'learning_rate': 0.53,
        #     'eval_metric': 'rmse'
        # }
        params={
            'objective':'multi:softmax',
            'n_estimators': 300,
            'gamma': 0,  
            'eta':0.53,
            'max_depth':8,
            'lambda': 0,
            # 'num_class':6,
            'learning_rate': 0.03,
            'num_class':7,
            'early_stopping_rounds':10,

        }

        watchlist=[(xgb_train,'train'),(xgb_test,'test')]
        # 设置训练轮次，这里设置60轮
        num_round=5
        xgb_model = xgb.XGBRegressor(**params)
        xgb_model.fit(training_inputs, training_outputs, eval_set=[(testing_inputs, testing_outputs)],
                     verbose=True)


        # 模型预测
        pred=xgb_model.predict(testing_inputs)
        # print(pred)
        print("预测结果：",pred)
        print("实际label：",testing_outputs)
        # 模型评估
        error_rate=np.sum(pred!=testing_outputs)/testing_outputs.shape[0]
        print('测试集总错误率:{}'.format(error_rate))
        accuracy=1-error_rate
        print('测试集总准确率：%.4f' %accuracy)

        code = m2c.export_to_c(xgb_model)
        # 指定要保存的文件路径和文件名
        file_path = "xgb_squat_PH.c"
          # 打开文件，以写入模式写入代码
        with open(file_path, "w") as file:
          file.write(code)
        f = open("xgb_squat_PH.c", 'r')
        xgb_model.save_model('xgb_model_PH.model')
  
    
    if(searcher==1):
       result= (np.zeros((5000,5000)))
       result_total= (np.zeros((5000,5000)))
       for itrt in range(2,6):
        for j in range (1,8,1): 
         print('Iteration Now:',itrt,j)
         # 设置模型超参数
         params = {
             'objective':'multi:softmax',
             'n_estimators': 300,
             'gamma': 0,                  # 用于控制是否后剪枝的参数,越大越保守，一般0.1、0.2这样子。
             'max_depth': itrt,              # 构建树的深度，越大越容易过拟合
             'lambda': 0,                   # 控制模型复杂度的权重值的L2正则化项参数，参数越大，模型越不容易过拟合。
             'learning_rate': j/10,
             # 'subsample':0.8,
            #  'eval_metric': 'rmse'
             'num_class':6,
             'early_stopping_rounds':5, 
         }
        #  params={
               
        #     'objective':'multi:softmax',
        #     'n_estimators': 300,
        #     'eta':j/100,
        #     'gamma': 0, 
        #     'max_depth':itrt,
        #     # 'num_class':6,
        #     'num_class':7,
        #     'early_stopping_rounds':5, 

        #  }
         # 设置训练轮次，这里设置60轮
         xgb_model = xgb.XGBRegressor(**params)
         xgb_model.fit(training_inputs, training_outputs, eval_set=[(testing_inputs, testing_outputs)],
                     verbose=False)#控制终端显示训练过程

        
         # 模型预测
         pred=xgb_model.predict(testing_inputs)
         print("预测结果：",pred)
         print("实际label：",testing_outputs)
         # 模型评估
         error_rate=np.sum(pred!=testing_outputs)/testing_outputs.shape[0]
         accuracy_total=1-error_rate
         print('测试集总准确率：%.4f' %accuracy_total)

         code = m2c.export_to_c(xgb_model)
         # 指定要保存的文件路径和文件名
         file_path = "xgb_squat_PH.c"
           # 打开文件，以写入模式写入代码
         with open(file_path, "w") as file:
           file.write(code)
         f = open("xgb_squat_PH.c", 'r')
         
         xgb_model.save_model('xgb_model_PH.model')
     
    
    training_inputs2 = np.loadtxt(open("featureCFW.csv","rb"),delimiter=",") 
    # print(training_inputs)
    training_outputs2 = np.loadtxt(open("labelCFW.csv","rb"),delimiter=",") 
    print("Reading CFW CSV")
    testing_inputs2 = np.loadtxt(open("feature_testCFW.csv","rb"),delimiter=",") 
    testing_outputs2 = np.loadtxt(open("label_testCFW.csv","rb"),delimiter=",") 
    print("Reading CFW CSV")

    xgb_train2=xgb.DMatrix(training_inputs2,label=training_outputs2)
    xgb_test2=xgb.DMatrix(testing_inputs2,label=testing_outputs2)


    if(searcher==0):
        # 设置模型超参数
        # params = {
        #     'objective':'multi:softmax',
        #     # 'num_class':2,
        #     'n_estimators': 30,
        #     'gamma': 0,                  # 用于控制是否后剪枝的参数,越大越保守，一般0.1、0.2这样子。
        #     'max_depth': 2,               # 构建树的深度，越大越容易过拟合
        #     'lambda': 0,                   # 控制模型复杂度的权重值的L2正则化项参数，参数越大，模型越不容易过拟合。
        #     'learning_rate': 0.53,
        #     'eval_metric': 'rmse'
        # }
        params={
            'objective':'multi:softmax',
            'n_estimators': 500,
            'gamma': 0,  
            'eta':0.53,
            'max_depth':8,
            'lambda': 0,
            # 'num_class':6,
            'learning_rate': 0.03,
            'num_class':7,
            'early_stopping_rounds':10,

        }

        watchlist=[(xgb_train2,'train'),(xgb_test2,'test')]
        # 设置训练轮次，这里设置60轮
        num_round=5
        xgb_model = xgb.XGBRegressor(**params)
        xgb_model.fit(training_inputs2, training_outputs2, eval_set=[(testing_inputs2, testing_outputs2)],
                     verbose=True)


        # 模型预测
        pred=xgb_model.predict(testing_inputs2)
        # print(pred)
        print("预测结果：",pred)
        print("实际label：",testing_outputs2)
        # 模型评估
        error_rate=np.sum(pred!=testing_outputs2)/testing_outputs2.shape[0]
        print('测试集总错误率:{}'.format(error_rate))
        accuracy=1-error_rate
        print('测试集总准确率：%.4f' %accuracy)

        code = m2c.export_to_c(xgb_model)
        # 指定要保存的文件路径和文件名
        file_path = "xgb_squat_CFW.c"
          # 打开文件，以写入模式写入代码
        with open(file_path, "w") as file:
          file.write(code)
        f = open("xgb_squat_CFW.c", 'r')
        xgb_model.save_model('xgb_model_CFW.model')
  
    
    if(searcher==1):
       result= (np.zeros((5000,5000)))
       result_total= (np.zeros((5000,5000)))
       for itrt in range(2,6):
        for j in range (1,8,1): 
         print('Iteration Now:',itrt,j)
         # 设置模型超参数
         params = {
             'objective':'multi:softmax',
             'n_estimators': 300,
             'gamma': 0,                  # 用于控制是否后剪枝的参数,越大越保守，一般0.1、0.2这样子。
             'max_depth': itrt,              # 构建树的深度，越大越容易过拟合
             'lambda': 0,                   # 控制模型复杂度的权重值的L2正则化项参数，参数越大，模型越不容易过拟合。
             'learning_rate': j/10,
             # 'subsample':0.8,
            #  'eval_metric': 'rmse'
             'num_class':6,
             'early_stopping_rounds':5, 
         }
        #  params={
               
        #     'objective':'multi:softmax',
        #     'n_estimators': 300,
        #     'eta':j/100,
        #     'gamma': 0, 
        #     'max_depth':itrt,
        #     # 'num_class':6,
        #     'num_class':7,
        #     'early_stopping_rounds':5, 

        #  }
         # 设置训练轮次，这里设置60轮
         xgb_model = xgb.XGBRegressor(**params)
         xgb_model.fit(training_inputs2, training_outputs2, eval_set=[(testing_inputs2, testing_outputs2)],
                     verbose=False)#控制终端显示训练过程

        
         # 模型预测
         pred=xgb_model.predict(testing_inputs2)
         print("预测结果：",pred)
         print("实际label：",testing_outputs2)
         # 模型评估
         error_rate=np.sum(pred!=testing_outputs2)/testing_outputs2.shape[0]
         accuracy_total=1-error_rate
         print('测试集总准确率：%.4f' %accuracy_total)

         code = m2c.export_to_c(xgb_model)
         # 指定要保存的文件路径和文件名
         file_path = "xgb_squat_CFW.c"
           # 打开文件，以写入模式写入代码
         with open(file_path, "w") as file:
           file.write(code)
         f = open("xgb_squat_CFW.c", 'r')
         
         xgb_model.save_model('xgb_model_CFW.model')


      ###############################################
    training_inputs3 = np.loadtxt(open("featureCFR.csv","rb"),delimiter=",") 
    # print(training_inputs)
    training_outputs3 = np.loadtxt(open("labelCFR.csv","rb"),delimiter=",") 
    print("Reading CFR CSV")
    testing_inputs3 = np.loadtxt(open("feature_testCFR.csv","rb"),delimiter=",") 
    testing_outputs3 = np.loadtxt(open("label_testCFR.csv","rb"),delimiter=",") 
    print("Reading CFR CSV")

    xgb_train3=xgb.DMatrix(training_inputs3,label=training_outputs3)
    xgb_test3=xgb.DMatrix(testing_inputs3,label=testing_outputs3)


    if(searcher==0):
        # 设置模型超参数
        params={
            'objective':'multi:softmax',
            'n_estimators': 500,
        #   'gamma': 0,                  # 用于控制是否后剪枝的参数,越大越保守，一般0.1、0.2这样子。
        #   'max_depth': 2,               # 构建树的深度，越大越容易过拟合
        #   'lambda': 0,                   # 控制模型复杂度的权重值的L2正则化项参数，参数越大，模型越不容易过拟合。 
        #   'eta':0.53,
            'max_depth':8,
            'lambda': 0,
            'learning_rate': 0.03,
            'num_class':7,
            'early_stopping_rounds':10,
        }

        watchlist=[(xgb_train3,'train'),(xgb_test3,'test')]
        num_round=5
        xgb_model = xgb.XGBRegressor(**params)
        xgb_model.fit(training_inputs3, training_outputs3, eval_set=[(testing_inputs3, testing_outputs3)],
                     verbose=True)

 
        # 模型预测
        pred=xgb_model.predict(testing_inputs3)
        # print(pred)
        print("预测结果：",pred)
        print("实际label：",testing_outputs3)
        # 模型评估
        error_rate=np.sum(pred!=testing_outputs3)/testing_outputs3.shape[0]
        print('测试集总错误率:{}'.format(error_rate))
        accuracy=1-error_rate
        print('测试集总准确率：%.4f' %accuracy)

        code = m2c.export_to_c(xgb_model)
        # 指定要保存的文件路径和文件名
        file_path = "xgb_squat_CFR.c"
          # 打开文件，以写入模式写入代码
        with open(file_path, "w") as file:
          file.write(code)
        f = open("xgb_squat_CFR.c", 'r')
        xgb_model.save_model('xgb_model_CFR.model')
  
    
    if(searcher==1):
       result= (np.zeros((5000,5000)))
       result_total= (np.zeros((5000,5000)))
       for itrt in range(2,6):
        for j in range (1,8,1): 
         print('Iteration Now:',itrt,j)
         # 设置模型超参数
         params = {
             'objective':'multi:softmax',
             'n_estimators': 300,
             'gamma': 0,                  # 用于控制是否后剪枝的参数,越大越保守，一般0.1、0.2这样子。
             'max_depth': itrt,              # 构建树的深度，越大越容易过拟合
             'lambda': 0,                   # 控制模型复杂度的权重值的L2正则化项参数，参数越大，模型越不容易过拟合。
             'learning_rate': j/10,
             # 'subsample':0.8,
            #  'eval_metric': 'rmse'
             'num_class':6,
             'early_stopping_rounds':5, 
         }
        #  params={
               
        #     'objective':'multi:softmax',
        #     'n_estimators': 300,
        #     'eta':j/100,
        #     'gamma': 0, 
        #     'max_depth':itrt,
        #     # 'num_class':6,
        #     'num_class':7,
        #     'early_stopping_rounds':5, 

        #  }
         xgb_model = xgb.XGBRegressor(**params)
         xgb_model.fit(training_inputs3, training_outputs3, eval_set=[(testing_inputs3, testing_outputs3)],
                     verbose=False)#控制终端显示训练过程

        
         # 模型预测
         pred=xgb_model.predict(testing_inputs3)
         print("预测结果：",pred)
         print("实际label：",testing_outputs3)
         # 模型评估
         error_rate=np.sum(pred!=testing_outputs3)/testing_outputs3.shape[0]
         accuracy_total=1-error_rate
         print('测试集总准确率：%.4f' %accuracy_total)

         code = m2c.export_to_c(xgb_model)
         # 指定要保存的文件路径和文件名
         file_path = "xgb_squat_CFR.c"
           # 打开文件，以写入模式写入代码
         with open(file_path, "w") as file:
           file.write(code)
         f = open("xgb_squat_CFR.c", 'r')
         
         xgb_model.save_model('xgb_model_CFR.model')
         
et = time.time()
print("训练时间：",et-st)