import zhconv
import jieba
from snownlp import SnowNLP
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import re


stop_words = ["联赛", "组", "级", "族", "足球", "足联", "国家", "(", ")"]
# 构建正则表达式模式
pattern = "|".join(map(re.escape, stop_words))  # 使用 re.escape 避免特殊字符问题
# 提高指定词的权重
target_words = ["甲", "乙", "丙", "丁"]
# 定义等价词汇映射表
equivalent_words = {
    "纽西兰": "新西兰",
    "欧罗巴": "欧洲",
    "杯赛": "杯",
    "二 队": "后备",
    "二队": "后备",
    "防卫 队": "卫队",
    "防卫队": "卫队",
    "联队": "联",
    "东部": "东",
    "南部": "南",
    "西部": "西",
    "北部": "北",
    "威尔士": "威尔斯",
    "蓝十字": "阿苏尔",
    "FOC法斯塔": "法斯塔",
    "尼科平斯": "尼科平斯",
    "尼雪平": "尼科平斯",
    "羊毛 科技": "羊绒 工业",
}

# 权重
weight = 1

# text1 = "巴西甲组联赛"
# text2 = "巴西乙级联赛"

# text1 = "墨西哥女子足球甲级联赛"
# text2 = "墨西哥女子超级联赛"

# text1 = "纽西兰北部联赛"
# text2 = "新西兰北部联赛"

# text1 = "智利甲组联赛"
# text2 = "委内瑞拉甲级联赛"

# text1 = "欧洲联赛"
# text2 = "欧足联欧罗巴联赛"

# text1 = "阿根廷杯"
# text2 = "阿根廷杯赛"

# text1 = "澳大利亚NPL新南威尔士州"
# text2 = "澳大利亚新南威尔斯国家超级联赛"

# text1 = "瓜达拉哈拉芝华士(女)"
# text2 = "瓜达拉哈拉"

# text1 = "奥克兰(后备)"
# text2 = "奥克兰FC二队"

# text1 = "巴洛克斯"
# text2 = "巴拉卡斯中央"

# text1 = "贝尔格拉诺防卫队"
# text2 = "贝尔格拉诺卫队"

# text1 = "诺士郡"
# text2 = "诺茨郡"

# text1 = "塞雷那"
# text2 = "拉塞雷纳体育"

# text1 = "羊绒工业"
# text2 = "羊毛科技"

# text1 = "遊牧民族联"
# text2 = "游牧民联"

# text1 = "竞技联"
# text2 = "竞技联队"

text1 = "西斯普林斯"
text2 = "西部斯普林斯"

def preprocess_text(text, equivalent_words):
    '''将文本中的词汇替换为标准词汇'''
    for word, standard_word in equivalent_words.items():
        text = text.replace(word, standard_word)
    return text

def cut_words(text):
    # 繁体转简体
    text = zhconv.convert(text, 'zh-hans')
    # 先截取掉不需要的文字
    re_text = re.sub(pattern, "", text)
    # 使用jieba进行分词
    words = " ".join(jieba.cut(re_text))
    # 再使用SnowNLP进行进一步处理并返回处理结果
    return " ".join(SnowNLP(words).words)

print(f'{text1} : {cut_words(text1)}')
print(f'{text2} : {cut_words(text2)}')

# 将分词后的文本放入列表
documents = [cut_words(text1), cut_words(text2)]
print(f"分词后: {documents}")
# 预处理文本
documents = [preprocess_text(text, equivalent_words) for text in documents]
print(f"预处理后: {documents}")

# 使用 TfidfVectorizer 转换文本
vectorizer = CountVectorizer(token_pattern=r'(?u)\b\w+\b')
tfidf_matrix = vectorizer.fit_transform(documents)
print("\n余弦相矩阵：", tfidf_matrix.toarray())

# 获取词汇表
vocabulary = vectorizer.get_feature_names_out()
print("词汇表：", vocabulary)
print(f"对比词汇表: {documents}")

# 使用列表推导式检查是否存在匹配
matches = [item for item in vocabulary if any(keyword in item for keyword in target_words)]

if len(matches) <= 1:
    weight = 1
elif len(matches) >= 2:
    weight = 5
print(f"{matches} | 权重 >>> : {weight}")

for word in target_words:
    if word in vocabulary:
        word_index = list(vocabulary).index(word)
        # 提高权重，例如乘以 10
        tfidf_matrix[:, word_index] *= weight

print("词频矩阵：")
print(tfidf_matrix.toarray())

# 计算余弦相似度
cosine_sim = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]
print("\n余弦相似度：", cosine_sim)

# 判断是否是同一场比赛
def is_same_match(similarity, threshold=0.8):
    return similarity >= threshold

print(f"是否是同一场比赛: {is_same_match(cosine_sim)}")