import nltk
import re
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler

# 下载NLTK需要的数据
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')

# 例句
sentence = "Apple is headquartered in Cupertino, California. Tim Cook is the CEO of Apple."

# 分句
sentences = sent_tokenize(sentence)

# 停用词
stop_words = set(stopwords.words('english'))

# 词形还原器
lemmatizer = WordNetLemmatizer()

# 数据预处理
def preprocess(text):
    text = re.sub(r'[^\w\s]', '', text)  # 移除特殊字符
    tokens = word_tokenize(text.lower())  # 分词并转换为小写
    tokens = [lemmatizer.lemmatize(token) for token in tokens if token not in stop_words]  # 词形还原并去除停用词
    return ' '.join(tokens)

preprocessed_sentences = [preprocess(sent) for sent in sentences]

# 训练数据
X_train = preprocessed_sentences
y_train = ['headquartered_in', 'CEO_of']

# 特征提取
vectorizer = CountVectorizer()
X_train_vectorized = vectorizer.fit_transform(X_train)

# 训练模型
classifier = make_pipeline(StandardScaler(with_mean=False), SVC(kernel='linear'))
classifier.fit(X_train_vectorized, y_train)

# 测试数据
test_sentence = "Tim Cook works in California."
preprocessed_test_sentence = preprocess(test_sentence)
X_test = [preprocessed_test_sentence]

# 特征提取
X_test_vectorized = vectorizer.transform(X_test)

# 预测
predicted_relation = classifier.predict(X_test_vectorized)
print("Predicted relation:", predicted_relation[0])
