import pandas as pd
from requst_llm import baidu_api
from  conetnt_is_negative import prompt_text
import re,json
from bs4 import BeautifulSoup

import configparser
# 创建ConfigParser对象
config = configparser.ConfigParser()
# 读取配置文件
config.read('config.ini')
# 获取配置项的值
API_KEY = config.get('baidu', 'API_KEY')
SECRET_KEY = config.get('baidu', 'SECRET_KEY')
model_name_list = config.get('baidu', 'model_name_list')
model_name_list = model_name_list.split(',')

model_name = model_name_list[3]
def getresult(row):
    title = row['article_title']
    # title = row['Title']
    content = row['content']
    ocr = row['ocr_content']
    source_website = row['source_website']
    if "微博" in source_website:
        title = ""
    content = prompt_text.format(monitoring_subject="航空机构",title=title, content=content, ocr=ocr)
    content = clean_text(content)
    result = baidu_api([{
        "role":"user",
        "content":content
    }],model_name)
    label,reason = parse_json(result)
    return label,reason

def parse_json(text: str) -> list:
    """
    description:对大模型的结果进行解析
    Args:
    text:大模型返回的结果

    Returns:
        解析后的结果
    """
    if text is None:
        return ""
    pattern = r'```json(.*)```'
    match = re.search(pattern, text, re.DOTALL)
    try:
        if match:
            json_text = match.group(1).strip()
            json_list = eval(json_text)
            if isinstance(json_list, list):
                return json_list[0].get('label'), json_list[0].get('reason')
            else:
                return json_list.get('label'),json_list.get('reason')
        else:
            json_list = eval(text)
            return json_list.get('label'),json_list.get('reason')
    except Exception as e:
        print(e)
        return []

def clean_text(text):
    # 使用BeautifulSoup去除HTML标签
    soup = BeautifulSoup(text, "html.parser")
    cleaned_text = soup.get_text()

    # 使用正则表达式去除网址链接
    # 这里假设网址链接以http://, https://, ftp://, 或 www. 开头
    url_pattern = re.compile(
        r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+|'
        r'www\.(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
    )
    cleaned_text = re.sub(url_pattern, ' ', cleaned_text)

    # 将多个空格替换为单个空格
    cleaned_text = re.sub(r'\s+', ' ', cleaned_text).strip()

    return cleaned_text

if __name__ == '__main__':
    src_file = '3月5日航空类四项目情感分析200条.xlsx'
    df = pd.read_excel(src_file,dtype={'文章ID':str})
    # df = df[200:]
    df['result'] = df.apply(getresult, axis=1)
    df[["label", "reason"]] = df['result'].apply(lambda x: pd.Series(x))
    df.to_excel('tmp.xlsx', index=False)
    print(df.columns)
    print(df.head())