from zhipuai import ZhipuAI
client = ZhipuAI(api_key='dde993d50111c92467475db303e96d07.bqWvXGsbY9BVHmF6')

import pandas as pd
import numpy as np
import os
from pathlib import Path
import time
import csv
import jieba


#调用质朴ai
def zhipufunc(type, query):
  response = client.chat.completions.create(
    model='glm-4',
    messages=[
      {"role": "system", "content": "你是一个医疗导诊助手，判断文本该挂哪个科室给出最符合的一个，不用说明原因，如果和"+type+"差不多则回答"+type},
      {"role": "user", "content": query},],
  
  )
  return response.choices[0].message.content

#回答问题
def answer(query,res):
  response = client.chat.completions.create(
    model='glm-4',
    messages=[
      {"role": "system", "content": "你是一个专业的医生，请对患者的以下问题围绕药物、饮食、生活方面给出医学专业回答，参考:("+res+"),无法回答的请先参考括号里的内容，仍不知道就回答不知道且不用回答建议，不用说明其他，回答长度300字符以内,一定不要有换行等字符"},
      {"role": "user", "content": query},],
  
  )
  return response.choices[0].message.content

def fileGet():
  #filepath = "./test"
  filepath = "./sdataset"
  p = Path(filepath)
  for file in p.rglob('*.csv'):
    print(file)

    with open(filepath+'/1'+file.name, 'w', newline='') as f_out: 
      fieldnames = ['time', 'type', 'query','response','finalType']  
      # 创建CSV写入器  
      csv_writer = csv.DictWriter(f_out, fieldnames=fieldnames)  
      
      # 写入CSV文件的标题（列名）  
      csv_writer.writeheader() 
      # 逐块读取输入文件  
      for chunk in pd.read_csv(file, chunksize=50):  
          # 在这里处理每个块的数据  
          # processed_chunk = chunk.apply(some_function)  # 替换为你的处理函数  

          chunk_dict_list = chunk.to_dict(orient='records')  
            
          # 在这里添加新列（假设新列的值是基于某些计算得到的，这里我们简单地使用None作为示例）  
          for row in chunk_dict_list:  
              
              type = row['type']
              query = row['query']
              new = zhipufunc(type, query)
              row['finalType'] = new
          
          print(row) 
          # 将处理后的字典列表逐行写入输出文件  
          csv_writer.writerows(chunk_dict_list)  


    # #遍历文件 skiprow:跳过前几行
    # ngData = pd.read_csv(file,skiprows=0)
    # ngList = []
    # # i = 0
    # for i,row in ngData.iterrows():
    #   # print(index)
    #   type = row['type']
    #   query = row['query']
    #   #调质朴
    #   new = zhipufunc(type, query)
    #   ngList.append(new)
    #   # i = i + 1
      
    # ngData['finalType'] = ngList
    # ngData.to_csv(file, index=False)


def clean():
  filepath = "./sdataset"
  #filepath = "./test"
  stopFile = "./stopwords.txt"
  p = Path(filepath)
  
  for file in p.rglob('*.csv'):
    print(file.name.replace('.csv',''))
    a = file.name.replace('.csv','')
    #遍历文件
    ngData = pd.read_csv(file,na_values=False)
    #选择不包含如题的行
    ngData = ngData[~ngData['query'].str.contains('如题', na=False)]

    #ngData = ngData.drop(index=ngData[ngData['query'] =='test'].index)
    ngData = ngData[ngData['type'].str.contains(a, na=False)] 
    ngData = ngData[ngData['query'].str.len() <= 120]  
    ngData.drop_duplicates(inplace=True)  #去重，覆盖原
    ngData.reset_index(drop=True, inplace=True) # 删除数据后，恢复索引
    # 替换空格和换行
    ngData.applymap(lambda x: str(x).replace(' ', '').replace('\n', '').replace(',,,',''))
    ngData['query'] = ngData['query'].str.replace(r'\s+', '', regex=True)
    ngData.dropna()

    # index = 0
    # for row in ngData.itertuples():
    #     #ngData.at[row.Index, 'query'] = '123'

    #     #数据清洗
    #     query = ngData.loc[index,'query']
    #     print(query)
    #     #new_query = remove_stopwords(stopFile, query)
    #     index = index +1
      
    ngData.to_csv(file, index=False)

# 定义去停用词函数
def remove_stopwords(stoppath, query):
    with open(stoppath, 'r', encoding='utf-8') as f:
      stopwords = [line.strip() for line in f.readlines()]

    seg_list = jieba.cut(query)
    filtered_list = [word for word in seg_list if word not in stopwords]
    return ''.join(filtered_list)


#使用llm改写回答
def get_new_answer():
  # filepath = "./test"
  filepath = "./sdataset"
  p = Path(filepath)
  for file in p.rglob('*.csv'):
    print(file)

    with open(filepath+'/s1/1'+file.name, 'w', newline='') as f_out: 
      fieldnames = ['time', 'type', 'query','response','finalType','new_res']  
      # 创建CSV写入器  
      csv_writer = csv.DictWriter(f_out, fieldnames=fieldnames)  
      
      # 写入CSV文件的标题（列名）  
      csv_writer.writeheader() 
      # 逐块读取输入文件  
      for chunk in pd.read_csv(file, chunksize=50):  
          # 在这里处理每个块的数据  
          # processed_chunk = chunk.apply(some_function)  # 替换为你的处理函数  

          chunk_dict_list = chunk.to_dict(orient='records')  
            
          # 在这里添加新列（假设新列的值是基于某些计算得到的，这里我们简单地使用None作为示例）  
          for row in chunk_dict_list:  
              
              response = row['response']
              query = row['query']
              response = str(response)
              new = answer(query, response)
              row['new_res'] = new
          
          print(row) 
          # 将处理后的字典列表逐行写入输出文件  
          csv_writer.writerows(chunk_dict_list)  


#clean()
get_new_answer()


#print(zhipufunc("产科","请问区医院可以做胎儿的心脏彩超吗"))




