# algorithm/utils.py
import numpy as np
def logistic_function(x, L=1, k=10, x0=0.5):
    return L / (1 + np.exp(-k * (x - x0)))


def inverted_sigmoid(x, L=1, k=10, x0=0.5):
    # 下降版的sigmoid
    return L - (L / (1 + np.exp(-k * (x - x0))))


# logistic模型的练习掌握映射函数，使用默认参数时，输入一个0~1的浮点数，会返回一个0~1的浮点数表示掌握程度
def mastery_level1(x, max_mastery=1, growth_rate=10, midpoint=0.5):
    """
    映射训练程度到掌握程度的函数，使用逻辑斯蒂增长模型。
    参数:
    - x: 训练程度，0到1之间的浮点数。
    - max_mastery: 最大掌握程度，默认为1。
    - growth_rate: 控制函数增长的速率。
    - midpoint: 函数增长最快的中点位置。
    返回:
    - 掌握程度，从0到max_mastery的浮点数。
    """
    return max_mastery / (1 + np.exp(-growth_rate * (x - midpoint)))
def inverted_mastery_level1(x):
    midpoint=0.5
    return mastery_level1(midpoint*2-x)
def mastery_level2(x):
    division = 0.2
    # 使用numpy.where进行条件判断
    y = np.where(x > division,
                 mastery_level1(x, max_mastery=1, growth_rate=10, midpoint=division),
                 mastery_level1(division, max_mastery=1, growth_rate=10, midpoint=division) / division * x)
    return y

def mastery_level3(x, max_mastery=1, growth_rate=20, midpoint=0.1):
    return max_mastery / (1 + np.exp(-growth_rate * (x - midpoint)))
def inverted_mastery_level2(x):
    midpoint=0.5
    return mastery_level2(midpoint*2-x)


def self_contribution_recommendation(x):
    # 自贡献推荐函数
    return 1.0 - x

# 定义一个包含需要忽略的标签下标的列表
ignored_tag_indices = []

# 非算法类标签的下标
non_algorithm_tag_indices = [14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,
                             46,48,52,57,58,59,60,61,70,77,81,82,83,85,88,89,90,91,92,93,94,95,96,97,98,99,102,107,
                             108,114,115,116,117,118,162,163,164,167,168,328,331,333,334,335,336,337,341,342,
                             343,344,347,348,361,362,363,367,381,383,386,389,390,393,394,397,408,409,420,421,
                             422,424,429,436,437
                             ]
# 难度得分，用于标签总分和用户标签掌握分计算 TagValue,UserTagMastery
diff_score = [22.5, 2, 15, 20, 25, 35, 45, 60]
# 各难度分数上界，用于标签重要性计算 TagImportance
diff_max_score = [0, 10, 30, 35, 45, 55, 70, 80]


# user_comprehensive_level使用数组
# 阈值数组，决定每个难度级别的解题比例必须达到多少才能被认为掌握该难度
# 更换算法后该数组废弃
level_threshold_count = [0, 0.01, 0.05, 0.04, 0.04, 0.04, 0.03, 0.05]

# 各函数的single版本，更易看懂
def mastery_level2_single(x):
    division=0.2

    if(x>division):
        return mastery_level1(x,1,10,division)
    else:
        return mastery_level1(division,1,10,division)/division*x

def inverted_mastery_level2_single(x):
    midpoint = 0.5
    return mastery_level2(midpoint*2-x)

def inverted_mastery_level_single(x):
    midpoint = 0.5
    return mastery_level1(midpoint*2-x)


# 逻辑函数

# 从题目集合中提取相关的标签，和这些标签的孩子标签 (并)
def extract_algorithm_tag_indices(problems):
    from tag.models import AlgorithmTag
    from algorithm.models import TagDependency
    # 提取直接关联的算法标签索引
    tag_indices = set(
        AlgorithmTag.objects.filter(
            problems__in=problems
        ).values_list('index', flat=True)
    )

    # 获取所有相关的依赖关系中的目标算法标签
    dependent_tag_indices = set(
        TagDependency.objects.filter(
            source_tag__index__in=tag_indices
        ).values_list('target_tag__index', flat=True)
    )

    # 返回合并的算法标签索引集
    return tag_indices.union(dependent_tag_indices)
