import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score


train_df = pd.read_csv('D:\protein_data.csv')

print(train_df.sample(5))#显示数据集中的随机5行

print("Shape of the dataset: ", train_df.shape)#数据集的行数和列数

print("Data types of the columns: ", train_df.dtypes)#数据集中每列的数据类型

print("Missing values in each column: ", train_df.isnull().sum())#每列中的缺失值数量

train_df=train_df.dropna()#处理缺失值

X = train_df[['classification', 'crystallizationTempK']]
y = train_df['macromoleculeType']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)


# fig = plt.figure()#创建图形底板
#
# df2 =  train_df[(train_df['classification'] == "HYDROLASE") | (train_df['classification'] == "TRANSFERASE")
#            | (train_df['classification'] == "OXIDOREDUCTASE") | (train_df['classification'] == "LYASE")
#            | (train_df['classification'] == "IMMUNE SYSTEM") | (train_df['classification'] == "TRANSCRIPTION") | (train_df['classification'] == "TRANSPORT PROTEIN")]
#
# df2["classification"].value_counts().plot.barh()
# # plt.savefig('./sample1.jpg')#第一个图形是一个水平柱状图，展示了不同分类下的样本数量
# sns.barplot( x = "classification", y = df2.classification.index, data = df2 )
#
#
# # plt.savefig('./sample2.jpg')#第二个图形是一个堆叠柱状图，展示了不同分类下的结构分子量
# df2.groupby(["classification"])["structureMolecularWeight",].std()
# plt.figure(figsize=(12,8))
# ax = sns.barplot( x = "classification", y = "structureMolecularWeight", data = df2 )
#
# # plt.savefig('./sample3.jpg')第三个图形是一个带有分类和宏观分子类型的柱状图，展示了不同分类和宏观分子类型下的平均结构分子量
# df2.groupby(["classification","macromoleculeType"])["structureMolecularWeight"].mean()
# plt.figure(figsize=(15,9))
# ax = sns.barplot( x = "classification", y = "structureMolecularWeight",hue="macromoleculeType", data = df2 )#柱状图
#
#
# # plt.savefig('./sample4.jpg')#第四个图形是一个密度图，展示了不同分类下的结构分子量的核密度，structureMolecularWeight 是一个表示结构分子量的数值变量
# (sns.FacetGrid(df2,hue= "classification",height= 7.5,xlim= (0, 300000)).map(sns.kdeplot, "structureMolecularWeight",fill = True).add_legend())
#
# # plt.savefig('./sample5.jpg')
# #散点图矩阵展示了不同特征之间的相关性和回归关系。
# sns.pairplot(df2,kind="reg")
#
# plt.show()
# # plt.savefig('./sample.jpg')

'''

特征工程一

'''
# Chunksize for reading data
chunksize = 10000  # Adjust this based on your system's memory capacity根据系统的内存容量进行调整

# Initialize empty arrays for storing results
X_selected = None
Y_all = []

# Step 2: Encoding categorical variables (One-hot encoding)
categorical_columns = ['classification', 'experimentalTechnique', 'macromoleculeType', 'crystallizationMethod', 'pdbxDetails']

for chunk in pd.read_csv('D:\pdb_data_no_dups.csv', chunksize=chunksize):
    # Step 1: 处理缺失值
    # Example: Impute missing values with the mean for numeric columns
    chunk = chunk.fillna(chunk.mean(numeric_only=True))

    # One-hot encoding for categorical columns列分类
    chunk = pd.get_dummies(chunk, columns=categorical_columns, drop_first=True)

    # Check if 'macromoleculeType' column exists in the chunk检查区块中是否存在“大分子类型”列
    if 'macromoleculeType' in chunk.columns:
        # Step 3: Feature selection (SelectKBest)特征选择
        X = chunk.drop(columns=['macromoleculeType'])  # Exclude the target variable
        Y = chunk['macromoleculeType']

        skb = SelectKBest(score_func=f_classif, k=8)
        X_selected_chunk = skb.fit_transform(X, Y)

        # Check if any features were selected检查是否选择了特征
        if X_selected_chunk.shape[1] > 0:
            # Append the selected features and labels添加被选择的特征和标签
            if X_selected is None:
                X_selected = X_selected_chunk
            else:
                X_selected = np.concatenate((X_selected, X_selected_chunk), axis=0)

            Y_all.extend(Y)

# Step 4: Feature transformation (e.g., log transformation for skewed features)特征变换（例如，倾斜特征的对数变换）
# You can apply transformations to specific columns if needed

# Step 5: Handling missing values (fill NaN with mean)缺失值处理（用平均值填充NaN）
X_selected = pd.DataFrame(X_selected).fillna(pd.DataFrame(X_selected).mean()).to_numpy()

# Ensure X_selected is a 2D array确保X_selected是2D阵列
if X_selected.ndim == 1:
    X_selected = X_selected.reshape(-1, 1)

# Step 6: Scaling特征降维
if X_selected.shape[1] > 0:
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X_selected)

    # Step 7: Model building (train-test split, model selection, and training)
    X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y_all, test_size=0.2, random_state=42)

    # Example: Using a Random Forest Classifier
    clf = RandomForestClassifier(n_estimators=100, random_state=42)
    clf.fit(X_train, Y_train)
    Y_pred = clf.predict(X_test)

    # Evaluate the model
    accuracy = accuracy_score(Y_test, Y_pred)
    print("Accuracy:", accuracy)
else:
    print("No features selected.")

'''

特征工程二
 
'''
from sklearn.feature_selection import SelectKBest,RFE, SelectFromModel

# 处理数据
delete = ['structureId','classification', 'experimentalTechnique', 'crystallizationMethod', 'pdbxDetails', 'publicationYear']
# 删除指定列
data = train_df.drop(columns=delete, inplace=False)
print(data.sample(5))# 显示数据集中的随机5行

# 特征缩放：标准化
scaler = StandardScaler()
data1 = scaler.fit_transform(data)
# 将多维数组转换成列表并添加列名
df = pd.DataFrame(data1)
df.columns = [col for col in train_df.columns if col not in delete]
print(df.sample(5))# 显示数据集中的随机5行

# 使用过滤法进行特征选择
# Initialize empty arrays for storing results
X_selected = None
Y_all = []
# electKBest是过滤思想常用的包
features = [col for col in df.columns] # if col not in delete
X = df.loc[:,features]
Y = df.loc[:,'macromoleculeType']
# 过滤思想
skb = SelectKBest(score_func=f_classif, k=5)
X_selected_df = skb.fit_transform(X, Y)

if X_selected_df.shape[1] > 0:
        # 添加被选择的特征
        if X_selected is None:
            X_selected = X_selected_df
        else:
            X_selected = np.concatenate((X_selected, X_selected_df), axis=0)
        Y_all.extend(Y)
# 缺失值处理（用平均值填充NaN）
X_selected = pd.DataFrame(X_selected).fillna(pd.DataFrame(X_selected).mean()).to_numpy()

# 确保X_selected是2D阵列
if X_selected.ndim == 1:
    X_selected = X_selected.reshape(-1, 1)

print(X_selected)

