from flask import Flask, Response, json, request, jsonify
from dashscope import Generation
from dashscope.api_entities.dashscope_response import Role
from flask_cors import CORS
import dashscope
import os
from PyPDF2 import PdfReader
from docx import Document

# import numpy as np
# dashscope.api_key = "your_api_key"

app = Flask(__name__)
CORS(app, resources="/*")

dsr1distilla70b = 'deepseek-r1-distill-llama-70b'

# 接收全部的信息
messages = []

@app.route('/llm/request')
def stream_numbers():
    global messages
    query = request.args.get('query', default='default query')

    def chat():
        print(query)
        messages.append({'role': Role.USER, 'content': query})
        whole_message = ''
        # responses = Generation.call(Generation.Models.qwen_turbo, messages=messages, result_format='message', stream=True,
        # responses = Generation.call(Generation.Models.dsr1distilla70b, messages=messages, result_format='message', stream=True,
        # responses = Generation.call(dsr1distilla70b, messages=messages, result_format='message', stream=True,
        responses = Generation.call(Generation.Models.qwen_max, messages=messages, result_format='message', stream=True,
                                    incremental_output=True)

        for response in responses:
            part_message = response.output.choices[0]['message']['content']
            whole_message += part_message
            print(part_message, end='')
            json_data = json.dumps({"message": response.output.choices[0]['message']['content']})
            yield f"data: {json_data}\n\n"  # 按照SSE格式发送数据

        messages.append({'role': 'assistant', 'content': whole_message})
        json_data = json.dumps({"message": 'done'})
        yield f"data: {json_data}\n\n"  # 按照SSE格式发送数据
        print('结束')
    headers = {
        'Content-Type': 'text/event-stream',
        'Cache-Control': 'no-cache',
        'X-Accel-Buffering': 'no',
    }

    return Response(chat(), content_type='text/event-stream', headers=headers)

# 新增处理文件上传的路route
@app.route('/llm/analyze_document', methods=['POST'])
def analyze_document():
    try:
        if 'file' not in request.files:
            return jsonify({"error": "No file part"}), 400
        file = request.files['file']
        if file.filename == '':
            return jsonify({"error": "No selected file"}), 400

        if file.filename.endswith('.pdf'):
            content = extract_pdf_content(file)
        elif file.filename.endswith('.docx'):
            content = extract_docx_content(file)
        elif file.filename.endswith('.txt'):
            content = file.stream.read().decode('utf-8')
        else:
            return jsonify({"error": "Unsupported file type"}), 400

        # 调用大语言模型分析总结文档内容
        summary = generate_summary(content)
        return jsonify({"summary": summary})
    except Exception as e:
        print(f"处理文件时出错: {str(e)}")
        return jsonify({"error": str(e)}), 500

def extract_pdf_content(file):
    reader = PdfReader(file)
    text = ""
    for page in reader.pages:
        text += page.extract_text()
    return text

def extract_docx_content(file):
    doc = Document(file)
    full_text = []
    for para in doc.paragraphs:
        full_text.append(para.text)
    return '\n'.join(full_text)

def generate_summary(content):
    messages = [{'role': Role.USER, 'content': f'请分析并总结以下文档内容：{content}'}]
    response = Generation.call(
        Generation.Models.qwen_max,
        messages=messages,
        result_format='message'
    )
    return response.output.choices[0]['message']['content']

if __name__ == '__main__':
    app.run(debug=True, host='0.0.0.0', port=5000)
    # app.run(debug=True, host='182.92.125.26', port=5000)
