import pandas as pd
import re
from nltk.stem.porter import PorterStemmer
ps = PorterStemmer()
from bs4 import BeautifulSoup
from nltk.stem import WordNetLemmatizer

#读取数据
pd.set_option('display.max_rows',None)
data = pd.read_csv(r"C:\Users\Aiomi\Desktop\drug_text_data\drug_text_data\train_F3WbcTw.csv")
#选择2~4列
data = data.iloc[:,1:4]

print(data["drug"].value_counts())
#选择了data中名为”~“的保留
data = data[(data['drug']=='ocrevus')|
(data['drug']=='gilenya')|(data['drug']=='ocrelizumab')
|(data['drug']=='entyvio')|
(data['drug']=='humira')|(data
['drug']=='fingolimod')|(data['drug']=='remicade')|(data['drug']=='opdivo')|(data['drug']=='tarceva')|(data['drug']=='cladribine')]

data_ocrevus = data[(data['drug']=='ocrevus')]
data_gilenya = data[(data['drug']=='gilenya')]
data_ocrelizumab = data[(data['drug']=='ocrelizumab')]
data_entyvio = data[(data['drug']=='entyvio')]
data_humira = data[(data['drug']=='humira')]
data_fingolimod = data[(data['drug']=='fingolimod')]
data_remicade = data[(data['drug']=='remicade')]
data_opdivo = data[(data['drug']=='opdivo')]
data_tarceva = data[(data['drug']=='tarceva')]
data_cladribine = data[(data['drug']=='cladribine')]

from nltk.corpus import stopwords
stop = stopwords.words('english')
#清洗
def clean_data(raw_data): # 传统方法 停用词
    # 1. Delete HTML
    text = BeautifulSoup(raw_data, 'html.parser').get_text()
    # 2. Make a space
    text = re.sub('[^a-zA-Z]', ' ', text)
    # 3. lower letters
    text = text.lower().split()
    # 5. Stopwords
    meaningful_text = [w for w in text if not w in stop]
    # 6. lemmitization
    lemmatizer = WordNetLemmatizer()
    lemmitize_words = [lemmatizer.lemmatize(w) for w in meaningful_text]
    # 7. space join words
    return( ' '.join(lemmitize_words))

data["updated_text"] = data["text"].apply(clean_data)

x = data['updated_text']
y = data['drug']

from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
y = le.fit_transform(y)
#随机种子42 测试集15% 训练集85%
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.15, random_state=42)
print(f'Rows in train set: {len(x_train)}\nRows in test set: {len(x_test)}')
#在训练集上划分验证集15%
from sklearn.model_selection import train_test_split
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.15, random_state=42)
print(f'Rows in train set: {len(x_train)}\nRows in valid set: {len(x_valid)}')

from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features=2500)
#将文本转换为词频向量
x_train = cv.fit_transform(x_train).toarray()
x_valid = cv.transform(x_valid).toarray()
x_test = cv.transform(x_test).toarray()

from sklearn.metrics import classification_report, confusion_matrix
from xgboost import XGBClassifier


clf1 = XGBClassifier(
    objective='multi:softmax',  # 多分类目标函数
    num_class=10,  # 类别数
    max_depth=6,  # 树的最大深度
    learning_rate=0.1,  # 学习率
    n_estimators=100  # 树的个数
)
clf1.fit(x_train, y_train)

y_pred = clf1.predict(x_test)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))

from sklearn.neural_network import MLPClassifier

# 定义模型
clf2 = MLPClassifier(hidden_layer_sizes=(128,), max_iter=100, alpha=1e-4,
                    solver='adam', verbose=10, tol=1e-4, random_state=42,
                    learning_rate_init=.1)

# 训练模型
clf2.fit(x_train, y_train)

# 预测并评估模型
y_pred = clf2.predict(x_test)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))

from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix

# 创建SVM模型对象
svm = SVC(kernel='linear', C=1, random_state=42)

# 训练模型
svm.fit(x_train, y_train)

# 在验证集上进行预测
y_pred = svm.predict(x_valid)

# 输出分类报告和混淆矩阵
print(classification_report(y_valid, y_pred))
print(confusion_matrix(y_valid, y_pred))

from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix

knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(x_train, y_train)

y_pred = knn.predict(x_test)

print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))