import pandas as pd
import re
import pprint
import processor as processor
import en2zh
from typing import Dict
from pk2df import get_plat_2_df

from feature import socialMedia

def find_word_group_indexes(string, patterns):  # patterns 是模板列表a、b
    # 使用正则表达式找到所有匹配的词组
    indexes = []
    for pattern in patterns:
        matches = re.finditer(pattern, string)
        for match in matches:
            indexes.append(match.start())
    return indexes


def find_word_end_indexes(string, patterns):  # patterns 是模板列表a、b
    # 使用正则表达式找到所有匹配的词组
    indexes = []
    for pattern in patterns:
        matches = re.finditer(pattern, string)
        for match in matches:
            indexes.append(match.end())
    return indexes


def find_word_start_end_indexes(string, patterns):  # patterns 是模板列表a、b
    # 使用正则表达式找到所有匹配的词组
    for pattern in patterns:
        match = re.search(pattern, string)
        if match:
            # 如果找到匹配项，返回开始和结束索引
            return match.start(), match.end()
            # 如果没有找到匹配项，返回None或适当的值
    return None


def is_tag_1(text, group_dict):
    text = str(text)
    min_distance = 30
    if text != '':  # 防止缺失内容
        a_indexes = find_word_group_indexes(text, group_dict["group_a_tag1"])
        b_indexes = find_word_end_indexes(text, group_dict["group_b_tag1"])
        if a_indexes and b_indexes:
            distances = [(b - a, b, a) for a in a_indexes for b in b_indexes]  # distances 的计算方法是否可 以优化
            min_distance, b_index, a_index = min(distances, key=lambda x: x[0])
            if 20 >= min_distance > 0:
                # df_explored['【节点】症状出现文本位置'] = text[a_index:b_index]
                return True
        # 检查C组和D组词汇是否同时出现在文本中
        c_indexes = find_word_group_indexes(text, group_dict["group_c_tag1"])
        d_indexes = find_word_end_indexes(text, group_dict["group_d_tag1"])
        if c_indexes and d_indexes:
            distances = [(d - c, d, c) for c in c_indexes for d in d_indexes]
            min_distance, d_index, c_index = min(distances, key=lambda x: x[0])
            if 20 >= min_distance > 0:
                # df_explored['【节点】症状出现文本位置'] = text[c_index:d_index]
                return True
    # 如果A组和B组词汇都找到了，或者C组和D组词汇都找到了，检查它们之间的距离是否小于或等于20
    return False


def is_tag_2(text, group_dict):
    text = str(text)
    # 检查A组和B组词汇是否同时出现在文本中
    if text != '':  # 防止缺失内容
        a_indexes = find_word_group_indexes(text, group_dict["group_a_tag2"])
        b_indexes = find_word_end_indexes(text, group_dict["group_b_tag2"])
        if a_indexes and b_indexes:
            distances = [(b - a, b, a) for a in a_indexes for b in b_indexes]  # distances 的计算方法是否可以优化
            min_distance, b_index, a_index = min(distances, key=lambda x: x[0])
            if 20 >= min_distance > 0:
                # df_explored['【节点】就诊确诊文本位置'] = text[a_index:b_index]
                return True
    # 如果A组和B组词汇都找到了，检查它们之间的距离是否小于或等于20
    return False


def is_tag_3(text, group_dict):
    text = str(text)
    if text != '':  # 防止缺失内容
        c_indexes = find_word_start_end_indexes(text, group_dict["group_c_tag3"])
        if c_indexes:
            # df_explored['【节点】治疗选择文本位置'] = text[c_indexes[0]:c_indexes[1]]
            return True
        a_indexes = find_word_group_indexes(text, group_dict["group_a_tag3"])
        b_indexes = find_word_end_indexes(text, group_dict["group_b_tag3"])
        if a_indexes and b_indexes:
            distances = [(b - a, b, a) for a in a_indexes for b in b_indexes]  # distances 的计算方法是否可以优化
            min_distance, b_index, a_index = min(distances, key=lambda x: x[0])
            if 20 >= min_distance > 0:
                # df_explored['【节点】治疗选择文本位置'] = text[a_index:b_index]
                return True
    return False


def is_tag_4(text, group_dict):
    text = str(text)
    if text != '':  # 防止缺失内容
        c_indexes = find_word_start_end_indexes(text, group_dict["group_c_tag4"])
        if c_indexes:
            # df_explored['【节点】方案更换文本位置'] = text[c_indexes[0]:c_indexes[1]]
            return True
        a_indexes = find_word_group_indexes(text, group_dict["group_a_tag4"])
        b_indexes = find_word_end_indexes(text, group_dict["group_b_tag4"])
        if a_indexes and b_indexes:
            distances = [(b - a, b, a) for a in a_indexes for b in b_indexes]  # distances 的计算方法是否可以优化
            min_distance, b_index, a_index = min(distances, key=lambda x: x[0])
            if 20 >= min_distance > 0:
                # df_explored['【节点】方案更换文本位置'] = text[a_index:b_index]
                return True
    return False


def get_judge(node, group_dict, df_explored, column='内容'):  # 对每行的分析
    tag2function = {
        '【节点】症状出现': is_tag_1,
        '【节点】就诊确诊': is_tag_2,
        '【节点】治疗选择': is_tag_3,
        '【节点】方案更换': is_tag_4,
    }
    df_explored['【节点】无节点'] = True
    for tag_col, is_tag_xxx in tag2function.items():
        df_explored[tag_col] = df_explored[column].apply(lambda x: is_tag_xxx(x, group_dict))
        df_explored['【节点】无节点'] = df_explored.apply(lambda row: False if row[tag_col] else row['【节点】无节点'], axis=1)
    # print(df_explored.columns)
    df_explored['节点统计'] = df_explored.apply(lambda x: get_conclusion(x, node), axis=1)
    return df_explored


def get_count(platform, columns, df_explored):  # column是包含了四个节点判定的列表
    plat_dict = {}  # {某节点判断:{微博:{True: xx}, 小红书:{......},......}}
    plat_dict_2 = {}
    for column in columns:
        plat_dict[column] = {}  # {某节点判断:{微博:{True: xx}, 小红书:{......},......}}
        plat_dict_2[column] = {}
        for plat in platform:

            df_selected = df_explored.loc[df_explored['平台'].apply(lambda x: en2zh.mapping_d.get(x, x)) == plat]
            df_ = pd.value_counts(df_selected[column].values).to_dict()  # 算数字
            # df_2 = pd.value_counts(df_selected[column].values, normalize=True).to_dict()  # 算比例

            plat_zh = en2zh.mapping_d.get(plat, plat)
            if True in df_:
                plat_dict[column][plat_zh] = df_[True]
                # plat_dict_2[column][plat_zh] = df_2[True]
            else:
                plat_dict[column][plat_zh] = 0
                # plat_dict_2[column][plat_zh] = 0

    for column in columns:
        for plat in platform:
            plat_zh = en2zh.mapping_d.get(plat, plat)
            distribution = {key: value[plat_zh] for key, value in plat_dict.items()}

            total = sum(distribution.values())

            plat_dict_2[column][plat_zh] = distribution[column] / total
    return plat_dict, plat_dict_2


def get_conclusion(row, node):
    lis = []
    for i in node:
        # print(row[node])
        if row[i]:
            lis.append(i)
    return '，'.join(lis)


def get_to_excel(platform, platform_proportion, tag_dist_xlsx, node, plat_dis, plat_dict, plat_dict_2):
    # 列名是平台和比例，行名是节点
    rows = []  # 表格
    # print(plat_dict.keys())
    for name_ in node:
        row = [name_]  # 哪个节点判断
        row_2 = []
        for plat in platform:
            # plat_zh = en2zh.mapping_d.get(plat, plat)
            row.append(plat_dict[name_].get(plat, 0))
        for plat in platform:
            # plat_zh = en2zh.mapping_d.get(plat, plat)
            row_2.append(plat_dict_2[name_].get(plat, 0))
        rows.append(row + row_2)

    new = ['总数']+[plat_dis[p]  for p in platform] +[1 ]*len(platform_proportion)

    rows.append(new)
    pd.DataFrame(rows, columns=["节点"] + platform + platform_proportion).to_excel(f'{tag_dist_xlsx}')


def do_med_treatment_analysis(group_dict, platform, node, proportion, df, column):
    # df = pd.read_excel(med_excel_file, keep_default_na=False)

    assert "hash_key" in df.columns
    assert "usr_hash_key" in df.columns
    assert "search_key" in df.columns
    df[column] = df[column].apply(lambda x: x if isinstance(x, str) else '')  # 如果x是字符串类型就放行

    # print("----------分割线----------")

    plat_dis = df['平台'].value_counts().to_dict()

    df = get_judge(node, group_dict, df, column)
    new_col =  ['平台',"hash_key","usr_hash_key" ,"search_key",column] + node + ['节点统计']
    #df_explored = df.loc[:,new_col]
    df_explored = df[new_col]

    # pprint.pprint(df_explored[df_explored['【节点】无节点'] == True])
    # print("----------分割线----------")

    plat_dict, plat_dict_2 = get_count(platform, node, df_explored)
    total = len(df_explored)
    # 以上正常
    # pprint.pprint(plat_dict)
    # rint("----------分割线----------")
    # pprint.pprint(plat_dict_2)
    # 以上正常
    # df_explored = df.loc[:, ['平台', column] + node + proportion + ['节点统计']]
    return plat_dis,plat_dict, plat_dict_2, df_explored

def get_multi_platform_df(platform):
    df_list = []
    plat2df = get_plat_2_df()
    for plat in platform:
        en = en2zh.plat_zh2en.get(plat,plat)
        if en in socialMedia:
            # file_path_tmp = f'df2/格式化结果_{plat}.xlsx'
            # # df_tmp =  pd.read_excel(f'df2/搜索结果_{plat}.xlsx')
            # df_tmp = pd.read_excel(file_path_tmp)
            df_tmp = plat2df[plat]
            assert 'usr_hash_key' in df_tmp
            assert 'search_key' in df_tmp
        else:
            df_tmp = plat2df[plat]
            #file_path_tmp = f'df2/搜索结果_{plat}.csv'
            # df_tmp =  pd.read_excel(f'df2/搜索结果_{plat}.xlsx')
            #df_tmp = pd.read_csv(file_path_tmp)


        df_list.append(df_tmp)
    df = pd.concat(df_list)  # 合并所有数据库

    return df

def get_social_or_conversation_analysis(df:pd.DataFrame,
                                        kw_group:Dict,
                                        platform:list,
                                        tag_dist_xlsx:str,
                                        text_col:str,
                                        source_xlsx:str):

    platform_proportion = [f"{plat}比例" for plat in platform]
    text_proportion = ['【节点】症状出现文本位置', '【节点】就诊确诊文本位置', '【节点】治疗选择文本位置', '【节点】方案更换文本位置']

    plat_dis,med_dis, med_prop_dis, df2 = do_med_treatment_analysis(kw_group, platform, nodes, text_proportion, df, text_col)

    less_df = []
    for plat in df2['平台'].unique():
        tmp = df2[df2['平台']==plat]
        s = len(tmp)
        tmp = tmp.sample(min(s,1000))
        less_df.append(tmp)
    text_detail = pd.concat(less_df)
    # 使用join方法进行连接
    user_df = pd.read_excel('format/format_users.xlsx')
    user_df['hash_key']=  user_df['hash_key'].apply(str)
    text_detail['usr_hash_key']=  text_detail['usr_hash_key'].apply(str)
    text_detail = pd.merge(text_detail, user_df, left_on='usr_hash_key', right_on='hash_key', how='left')
    #text_detail = text_detail.join(user_df, on='usr_hash_key', how='left')  # 'left'表示左连接
    text_detail.to_excel(f'{source_xlsx}')

    get_to_excel(platform, platform_proportion, tag_dist_xlsx, nodes,plat_dis, med_dis, med_prop_dis)


keyword_file = r"【Social Listening】关键词节点情绪词_240531UPD.xlsx"

nodes = ['【节点】症状出现', '【节点】就诊确诊', '【节点】治疗选择', '【节点】方案更换', '【节点】无节点']
tag2index = {
    '【节点】症状出现': '1',
    '【节点】就诊确诊': '2',
    '【节点】治疗选择': '3',
    '【节点】方案更换': '4',
}
def build_group_dict(keyword_file,tag2index):
    #group_dict["group_a_tag3"]
    group_dict = {}
    for n,index  in tag2index.items():
        if '无节点' in n:
            continue
        df = pd.read_excel(keyword_file,sheet_name=n)
        df = df.fillna('')
        for group in "ABCD":
            if group in df.columns:
                #print(group,list(df[group].unique()))
                ws = list(df[group].unique())
                ws = [w for w in ws if w ]
                g_lower = group.lower()
                group_dict[f'group_{g_lower}_tag{index}'] = ws
    #print(df)
    return group_dict
keyword_group = build_group_dict(keyword_file, tag2index)
platforms1 = ['小红书', '知乎', '抖音', '快手', '微博']
platforms2 = ['丁香', '春雨', '好大夫']
df1 = get_multi_platform_df(platforms1)
df1[['标题','内容']]=df1[['标题','内容']].fillna('')
df1['文本'] = df1['标题']+ df1['内容']
df2 = get_multi_platform_df(platforms2)

get_social_or_conversation_analysis(df1, keyword_group, platforms1, 'output/节点分析/社媒节点分析.xlsx', '文本', 'output/节点分析/社媒节点文本.xlsx')

#get_social_or_conversation_analysis(group_dict, platforms2, '问诊医生建议节点分析.xlsx', '医生', '问诊医生提及节点文本.xlsx')
get_social_or_conversation_analysis(df2,keyword_group, platforms2, 'output/节点分析/问诊病人提及节点分析.xlsx', '病人', 'output/节点分析/问诊病人提及节点文本.xlsx')

# 示例数据
# file_path = '全部问诊.xlsx'
#
# # 创建DataFrame
# df = pd.read_excel(file_path, keep_default_na=False)
