import pandas as pd
from Utils.Config import EXTRACT_OPTION,DATASET_PATH
from sklearn.model_selection import train_test_split
from dataanalyze.plot import plotbar,plothistogram
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
import numpy as np
class base_extractor:
    #balanceop是正样本与负样本的比例 1：正样本，负样本均衡，去除多余的负样本(非僵尸) 2，不保持平衡，按照原始数据来 3 用复制的方式把负样本跟正样本补齐
    def __init__(self, option=EXTRACT_OPTION.base_train_total, dropnull=True, balanceop = 1):
        #首先我们要读取base_train_sum，base_verify1进行合并
        self.base_verify1 = pd.read_csv(DATASET_PATH+'\\'+'base_verify1.csv',encoding='gbk').dropna()
        del self.base_verify1['控制人ID']
        self.base_train_sum = pd.read_csv(DATASET_PATH+'\\'+'base_train_sum.csv',encoding='gbk').dropna()
        self.base_train_total = pd.concat([self.base_verify1,self.base_train_sum]).reset_index(drop=True)
        #初始融合表数据的信息
        # print('初始融合表数据的信息')
        # print(self.base_train_total.info())
        #初始融合表正负样本比例信息
        # print('初始融合表正负样本比例信息')
        # print( self.base_train_total['flag'].value_counts())
        self.data_df_flag =  self.base_train_total[['flag','ID']]
        if option==EXTRACT_OPTION.base_train_total:
            print('base mode')
            base_verify1_dir = DATASET_PATH+'\\'+'base_verify1.csv'
            self.data_df = self.base_train_total
            data_df = self.data_df
            data_df =  data_df.dropna()
            # print(data_df.info())
            #y就是label
            data_df_flag = data_df['flag']
            # data_df_out_flag = data_df.drop('flag',axis=1)
            data_df_out_flag = data_df
            positiveindex = np.where(data_df['flag'] == 1)
            positivesample = data_df.iloc[positiveindex]


            print(positivesample['注册时间'].value_counts())

            #开始进行特征工程
            '''
            1 年份独热编码
            '''

            data_res  = pd.get_dummies(data_df_out_flag['注册时间'],prefix='year')
            data_res = pd.concat([data_df_out_flag['flag'],data_res], axis=1)
            '''
            2 注册资本归一化
            '''
            registered_capital = data_df_out_flag['注册资本']
            registered_capital_normal= (registered_capital-registered_capital.min())/(registered_capital.max()-registered_capital.min())
            data_res = pd.concat([data_res,registered_capital_normal],axis=1)
            # print(data_res)
            '''
            3 行业独热编码
            '''
            Industry_category =  pd.get_dummies(data_df_out_flag['行业'])
            data_res = pd.concat([data_res,Industry_category],axis=1)

            '''
            4区域独热编码
            '''
            region_catrgory = pd.get_dummies(data_df_out_flag['区域'])
            data_res = pd.concat([data_res,region_catrgory],axis=1)
            # print(data_res)

            '''
            5 企业类型独热编码
            '''
            company_category =  pd.get_dummies(data_df_out_flag['企业类型'])
            data_res = pd.concat([data_res, company_category], axis=1)

            # print(data_df_out_flag['注册时间'])
            # print(data_res)
            '''
            6 控制人类型独热编码
            '''
            controller_category = pd.get_dummies(data_df_out_flag['控制人类型'])

            data_res = pd.concat([data_res, controller_category], axis=1)

            '''
            控制人ID太多了没法独热编码，效果定也比较差
            '''
            '''
            7 控制人持股比例 加进来
            '''
            data_res = pd.concat([data_res,data_df_out_flag['控制人持股比例']],axis=1)
            '''
            最后把X，Y明确一下
            '''
            self.X = data_res
            self.Y = data_df_flag
            #正负样本均衡

            # print(data_res)
            # print(data_res)

            print()

            # 注册资本
            # plothistogram(data_df_out_flag['注册资本'],'注册资本','频率','分布')
            # # data_zombie_df = data_df.loc[data_df['flag']==1]
            # # data_nozombie_df = data_df.loc[data_df['flag']==0]
            #
            #
            # #行业
            # # industry_cat_dict = dict(data_df_out_flag['行业'].value_counts())
            # # plotbar(list(industry_cat_dict.keys()), list(industry_cat_dict.values()), '行业分布', '行业',
            # #         '数量', isrotate=True)
            # #区域
            # # region_dict = dict(data_df_out_flag['区域'].value_counts())
            # # print(region_dict)
            # # plotbar(list(region_dict.keys()), list(region_dict.values()), '区域分布', '区域',
            # #        '数量', isrotate=True)
            # # company_cat_dict = dict(data_df_out_flag['企业类型'].value_counts())
            # # print(company_cat_dict)
            # # plotbar(list(company_cat_dict.keys()), list(company_cat_dict.values()), '企业类型分布', '类型',
            # #         '数量', isrotate=True)
            # # manipulator_cat_dict = dict(data_df_out_flag['控制人类型'].value_counts())
            # # print(manipulator_cat_dict)
            # # plotbar(list(manipulator_cat_dict.keys()), list(manipulator_cat_dict.values()), '控制人类型分布', '类型',
            # # '数量', isrotate=False)
            # #控制人持股比例
            # # plothistogram(data_df_out_flag['控制人持股比例'], '控制人持股比例分布', '类型',  '数量')
            # # print(data_df_out_flag['控制人持股比例'].value_counts())
            # # manipulator_share_ratio_dict = dict(data_df_out_flag['控制人持股比例'].value_counts())
            # # plotbar(list(manipulator_share_ratio_dict.keys()), list(manipulator_share_ratio_dict.values()), '控制人持股比例', '类型',
            # #                  '数量', isrotate=True)
            # x_train, x_test, y_train, y_test = train_test_split(data_df_out_flag, data_df_flag, test_size=0.30, random_state=1)
            # print(x_train.shape)
            # print(pd.DataFrame(y_train).describe())
            #
            # hitmapTemp = data_df
            # hitmapData = hitmapTemp.corr()
            # f, ax = plt.subplots(figsize=(12, 12))
            # print(hitmapData)
            # sns.heatmap(hitmapData, vmax=1, square=True)
            # plt.title('note: lighter the block is,stronger the relevance of two behavior type is ')
            # plt.show()
        elif option==EXTRACT_OPTION.patient_total:
            self.knowledge_train_sum = pd.read_csv(DATASET_PATH+'\\'+'knowledge_train_sum.csv',encoding='gbk')
            self.paient_information_verify1 = pd.read_csv(DATASET_PATH+'\\'+'paient_information_verify1.csv',encoding='gbk')
            self.patient_total = pd.concat([self.knowledge_train_sum,self.paient_information_verify1],axis=0)
            self.knowledge_train_sum_withflag = pd.merge(self.data_df_flag, self.patient_total, on='ID').dropna()
            del  self.knowledge_train_sum_withflag['ID']
            print('专利信息')
            print(self.knowledge_train_sum_withflag['flag'].value_counts())
            self.knowledge_train_sum_flag = self.knowledge_train_sum_withflag['flag']
            self.X = self.knowledge_train_sum_withflag
            self.Y = self.knowledge_train_sum_flag
            self.knowledge_train_sum_withflag.to_csv('patent.csv',index=False)
        elif option == EXTRACT_OPTION.money_info_total:
            self.money_report_train_sum = pd.read_csv(DATASET_PATH+'\\'+'money_report_train_sum.csv',encoding='gbk')
            self.money_information_verify1 = pd.read_csv(DATASET_PATH+'\\'+'money_information_verify1.csv',encoding='gbk')
            self.money_info_total = pd.concat([self.money_report_train_sum,self.money_information_verify1],axis=0)
            self.money_info_total_withflag = pd.merge(self.data_df_flag,self.money_info_total,on='ID').dropna()
            del self.money_info_total_withflag['ID']
            print('跟钱有关的信息的')
            # print(self.money_info_total_withflag['flag'].value_counts())
            #先把年份信息删除了
            del self.money_info_total_withflag['year']
            self.X = self.money_info_total_withflag
            self.Y = self.money_info_total_withflag['flag']
            self.money_info_total_withflag.to_csv('moneyinfowithflag.csv',index=False)
        elif option == EXTRACT_OPTION.year_report_total:
            self.year_report_train_sum = pd.read_csv(DATASET_PATH+'\\'+'year_report_train_sum.csv',encoding='gbk')
            self.year_report_verify1 = pd.read_csv(DATASET_PATH+'\\'+'year_report_verify1.csv',encoding='gbk')
            self.year_report_total = pd.concat([self.year_report_train_sum,self.year_report_verify1],axis=0)
            self.year_report_total_withflag = pd.merge(self.data_df_flag,self.year_report_total,on='ID').dropna()
            del  self.year_report_total_withflag['ID']
            print('年报信息')
            print(self.year_report_total_withflag['flag'].value_counts())
            del self.year_report_total_withflag['year']
            self.X = self.year_report_total_withflag
            self.Y = self.year_report_total_withflag['flag']
        if balanceop == 1:
            # 此处需特别注意！！！
            # 这个返回值的[0]是一个列表类型的，就是记录符合要求的索引的集合
            positiveindex = np.where(self.X['flag'] == 1)
            negativeindex = np.where(self.X['flag'] == 0)
            negetivesample = self.X.iloc[negativeindex]
            # 打乱负样本
            # negetivesample = negetivesample.sample(frac=1).reset_index(drop=True)
            self.X = pd.concat([self.X.iloc[positiveindex], negetivesample[:len(positiveindex[0])]])
            data_index = self.X['flag']
            print('当前融合信息示例')
            print(self.X)
            # print(data_res.info())
            del self.X['flag']
            self.Y = data_index
            print('数据的形状,',self.X.shape)
            print('标签的形状,',self.Y.shape)

        elif balanceop == 2:

            del self.X['flag']



    #探查一下地理区位因素对僵尸的影响，为了防止地域歧视貌似数据挺平均的，呵呵
    def extract_geo_info(self):
        data_geo = self.data_df[['区域','flag']]
        data_geo_zombie = data_geo.loc[data_geo['flag']==1]
        data_geo_nonzombie = data_geo.loc[data_geo['flag']==0]
        print('分析一下地理信息')
        print('僵尸')
        print(data_geo_zombie['区域'].value_counts())
        print('非僵尸')
        print(data_geo_nonzombie['区域'].value_counts())
        # geo_dict = {}
        # for each in data_geo['区域'].unique():
        #     geo_dict[each] = data_geo[each].value_counts()
        # print(geo_dict)
    def extract_ParticularYear_info(self):
        data_year = self.data_df[['注册时间','flag']]
        data_year_zombie = data_year.loc[data_year['flag']==1]
        data_year_nonzombie = data_year.loc[data_year['flag']==0]
        # print(data_year_zombie['注册时间'].value_counts())
        # print(data_year_nonzombie['注册时间'].value_counts())
    def extract_industry_info(self):
        data_geo = self.data_df[['行业', 'flag']]
        data_geo_zombie = data_geo.loc[data_geo['flag'] == 1]
        data_geo_nonzombie = data_geo.loc[data_geo['flag'] == 0]
        print('分析一下行业信息')
        print(data_geo_zombie['行业'].value_counts())
        print(data_geo_nonzombie['行业'].value_counts())




