import zhconv
import jieba
from snownlp import SnowNLP
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from collections import Counter
import re
import math
from decimal import Decimal, ROUND_HALF_UP

from data.crawler_data import pattern, target_words, equivalent_words, hedges, profit_scope

class Analysis:
    def __init__(self, *args):
        self.games = []
        for arg in args:
            self.games.extend(arg)
        self.unique_sources = self.combination(len(Counter(game['source'] for game in self.games if game['source'] is not None)), 2)
        self.sup_source = self.unique_sources == 0

    def combination(self, n, k):
        '''计算组合数 C(n, k)'''
        return 0 if n < k else math.factorial(n) // (math.factorial(k) * math.factorial(n - k))
        # return math.factorial(n) // (math.factorial(k) * math.factorial(n - k))

    def calculate(self):
        '''对冲分析 - 核心方法'''
        # 初步清洗数据
        all_games, tuples = self.cleaning_games()
        # 数据重新组合
        clean_games = self.tuple_games(all_games, tuples)
        # 计算盈利百分比, 并重装所需数据
        return self.crawler_odds(clean_games)

    def crawler_odds(self, clean_games):
        '''计算盈利百分比, 并重装所需数据'''
        results = []
        for games in clean_games:
            for hedge in hedges:
                heds = self.hedge_games(games, hedge)
                if len(heds) <= 0: continue
                source_group = {item['source'] for item in heds}
                if len(source_group) > 1 or self.sup_source:
                    # 判断是否存在 odds 为 '0'
                    exists_zero = any(Decimal(item['game_odds']['odds']) == 0 for item in heds)
                    if exists_zero: continue
                    total_cost = 0
                    profit = ''
                    tmps = []
                    for h in heds:
                        game_odds = h['game_odds']
                        gain = Decimal('1000')
                        divisor = Decimal(game_odds['odds'])
                        cost = gain / divisor
                        cost = cost.quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
                        tmps.append({
                            'kick_off_time': h['kick_off_time'],
                            'league': h['league'],
                            'team': h['team'],
                            'type': game_odds['type'],
                            'ratio': game_odds['ratio'],
                            'odds': game_odds['odds'],
                            'source': h['source'],
                            'cost': cost})
                        total_cost += cost
                    profit = (gain - total_cost) / total_cost * 100
                    profit = profit.quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
                    infos = []
                    for tmp in tmps:
                        infos.append({
                            'type': tmp['type'],
                            'ratio': tmp['ratio'],
                            'odds': tmp['odds'],
                            'source': tmp['source'],
                        })
                    icon = 'up' if profit > 0 else 'down'
                    if profit > profit_scope:
                        res = {
                            'kick_off_time': tmps[0]['kick_off_time'],
                            'league': tmps[0]['league'],
                            'team': tmps[0]['team'],
                            'profit': f'{profit}%',
                            'icon': icon,
                            'infos': infos
                        }
                        results.append(res)
        # 按 profit 的数值排序（升序）
        # results.sort(key=lambda x: float(x['profit'].rstrip('%')), reverse=True)
        results.sort(key=lambda x: float(x['profit'].rstrip('%')))
        return results

    def percentage(self):
        pass

    def hedge_games(self, games, hedges):
        '''盘口对比球赛获取响应赔率'''
        results = []
        for hedge in hedges:
            game_odds = []
            for game in games:
                result = next((item for item in game['game_odds'] if item['key'] == hedge), None)
                game_odds.append({'key': hedge, 'game_odds': result, 'kick_off_time': game['kick_off_time'], 'league': game['league'], 'team': game['team'], 'source': game['source']})
            # 使用列表推导式过滤出odds不为None的元素
            filtered_data = [item for item in game_odds if item['game_odds'] is not None]
            # 使用max函数获取odds最大值对应的对象
            if len(filtered_data) > 0:
                max_game = max(filtered_data, key=lambda x: float(x['game_odds']['odds']) if x['game_odds']['odds'] is not None else 0.0)
                results.append(max_game)
            # 对比组合好的数据长度是否合hedge一样
            if len(results) == len(hedges):
                return results
        return []

    def cleaning_games(self):
        '''数据初步清理 - 核心方法'''
        duplicates = []
        all_games = []
        for i in range(len(self.games)):
            game_i = self.games[i]
            for j in range(len(self.games)):
                game_j = self.games[j]
                if None == game_i['kick_off_time'] or None == game_j['kick_off_time']: continue   # 跳过为空数据
                if game_i['source'] == game_j['source'] and not self.sup_source: continue   # 跳过同源数据
                # 判断是否已比对过
                i_key = f'{game_i['team_home']}_{game_i['team_away']}_{game_i['source']}'
                j_key = f'{game_j['team_home']}_{game_j['team_away']}_{game_j['source']}'
                if self.is_tuple_in_list(set((i_key, j_key)), duplicates): continue
                # 先判断开赛时间是否相同
                if game_i['kick_off_time'] == game_j['kick_off_time']:
                    # 再判断联赛是否相同 (如果是单一来源, 则用 == 判断, 否则判断联赛相似度)
                    is_league = False
                    is_league = game_i['league'] == game_j['league'] if self.sup_source else self.is_same_match(game_i, game_j, 'league') >= 0.6
                    if is_league:
                        is_team = False
                        if self.sup_source:
                            # 如果是单一来源, 则用 == 判断
                            is_team = game_i['team_home'] == game_j['team_home'] and game_i['team_away'] == game_j['team_away']
                        else:
                            # 组队与客队的相似度加起来 >=0.6 认为为同一场比赛
                            cosine_sim_home = self.is_same_match(game_i, game_j, 'team_home')
                            cosine_sim_away = self.is_same_match(game_i, game_j, 'team_away')
                            cosine_sim = cosine_sim_home + cosine_sim_away
                            is_team = cosine_sim >= 0.6
                        if is_team:
                            # 将对比通过的数据组成元组加入集合
                            duplicates.append((i_key, j_key))
                            # 将对比通过赛事数据加入集合
                            all_games.append([game_i, game_j])
        # 通过元组整合多个来源数据组合
        tuples = []
        duplicates = list(set(duplicates))
        for key_i in duplicates:
            tmp = [key_i]
            for key_j in duplicates:
                if key_i == key_j: continue
                if self.has_intersection(key_i, key_j):
                    tmp.append(key_j)
            tuples.append(tmp)
        return all_games, tuples


    def tuple_games(self, all_games, tuples):
        '''通过元组将清洗后的赛事数据进行组合'''
        clean_games = []
        for tuple in tuples:
            if len(tuple) > self.unique_sources and not self.sup_source: continue
            cleaning_games = []
            for game in all_games:
                i_key = f'{game[0]['team_home']}_{game[0]['team_away']}_{game[0]['source']}'
                j_key = f'{game[1]['team_home']}_{game[1]['team_away']}_{game[1]['source']}'
                if (i_key, j_key) in tuple:
                    cleaning_games.extend(game)
            # 清理掉重复数据
            unique_games = []
            seen = set()
            for game in cleaning_games:
                key = f'{game["team_home"]}_{game["team_away"]}_{game["source"]}'
                if key not in seen or self.sup_source:
                    unique_games.append(game)
                    seen.add(key)
            clean_games.append(unique_games)
        return clean_games

    def has_intersection(self, tuple1, tuple2):
        '''判断两个元组是否有交集'''
        set1 = set(tuple1)
        set2 = set(tuple2)
        return bool(set1.intersection(set2))

    def is_same_match(self, game_i, game_j, node):
        '''判断文字相似度主方法'''
        # 对文本进行分词 并 将分词后的文本放入列表 - 联赛
        texts = [self.cut_words(game_i[node]), self.cut_words(game_j[node])]
        # 预处理文本 - 将文本中的词汇替换为标准词汇 如 (纽西兰 替换为: 新西兰)
        documents = [self.preprocess_text(text, equivalent_words) for text in texts]
        vectorizer = CountVectorizer(token_pattern=r'(?u)\b\w+\b')
        tfidf_matrix = vectorizer.fit_transform(documents)    # 使用 TfidfVectorizer 转换文本
        
        # 获取词汇表
        vocabulary = vectorizer.get_feature_names_out()
        
        # 使用列表推导式检查是否存在匹配 - 用于匹配权重
        matches = [item for item in vocabulary if any(keyword in item for keyword in target_words)]
        weight = 1
        if len(matches) <= 1:
            weight = 1
        elif len(matches) >= 2:
            weight = 5
        
        # # 提高目标词汇的权重
        for word in target_words:
            if word in vocabulary:
                word_index = list(vocabulary).index(word)
                # 提高权重，例如乘以 10
                tfidf_matrix[:, word_index] *= weight
        
        # 计算余弦相似度
        cosine_sim = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]
        # if cosine_sim > 0.9 and cosine_sim <= 2:
        #     print(f'{game_i[node]} -{game_i['source']} vs {game_j[node]} -{game_j['source']}: {cosine_sim}')
        return cosine_sim

    def is_tuple_in_list(self, new_tuple, existing_list):
        '''遍历列表，将每个元组转换为集合后比较'''
        new_set = set(new_tuple)
        for existing_tuple in existing_list:
            if set(existing_tuple) == new_set:
                return True
        return False

    def preprocess_text(self, text, equivalent_words):
        '''将文本中的词汇替换为标准词汇'''
        for word, standard_word in equivalent_words.items():
            text = text.replace(word, standard_word)
        return text

    def cut_words(self, text):
        '''繁体转简体'''
        text = zhconv.convert(text, 'zh-hans')
        # 先截取掉不需要的文字
        re_text = re.sub(pattern, "", text)
        # 使用jieba进行分词
        words = " ".join(jieba.cut(re_text))
        # 再使用SnowNLP进行进一步处理并返回处理结果
        return " ".join(SnowNLP(words).words)