-------------------------------------------------------------------------------------------------------------


from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# 加载预训练模型和分词器
model_name = "gpt2-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# 将模型设置为评估模式，减少不必要的计算
model.eval()

# 示例输入
input_text = "什么是人工智能？"
inputs = tokenizer(input_text, return_tensors="pt")

# 原始模型推理
with torch.no_grad():
    original_output = model(**inputs)

print("原始模型输出:", original_output)


from transformers import Trainer, TrainingArguments, DistilBertForSequenceClassification

# 加载大模型（教师模型）和小模型（学生模型）
teacher_model = AutoModelForCausalLM.from_pretrained("gpt2-medium")
student_model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")

# 定义训练参数
training_args = TrainingArguments(
    output_dir="./distilled_model",
    per_device_train_batch_size=4,
    num_train_epochs=1,
    logging_dir='./logs',
)

# 训练数据（示例）
train_dataset = [{"input_ids": inputs["input_ids"], "labels": original_output.logits.argmax(-1)}]

# 使用Trainer进行蒸馏训练
trainer = Trainer(
    model=student_model,
    args=training_args,
    train_dataset=train_dataset
)

# 训练学生模型
trainer.train()

# 训练完成后保存模型
student_model.save_pretrained("./distilled_student_model")
print("蒸馏模型已保存到 ./distilled_student_model")


import torch

# 检查是否有GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)

# 使用FP16推理
with torch.cuda.amp.autocast():
    inputs = tokenizer(input_text, return_tensors="pt").to(device)
    with torch.no_grad():
        fp16_output = model(**inputs)

print("FP16推理输出:", fp16_output)


from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments, DistilBertForSequenceClassification
import torch

# 1. 加载模型和分词器
model_name = "gpt2-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
model.eval()

# 示例输入
input_text = "什么是人工智能？"
inputs = tokenizer(input_text, return_tensors="pt")

# 原始模型推理
with torch.no_grad():
    original_output = model(**inputs)
print("原始模型输出:", original_output)

# 2. 模型蒸馏
teacher_model = AutoModelForCausalLM.from_pretrained("gpt2-medium")
student_model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")
training_args = TrainingArguments(output_dir="./distilled_model", per_device_train_batch_size=4, num_train_epochs=1, logging_dir='./logs')

train_dataset = [{"input_ids": inputs["input_ids"], "labels": original_output.logits.argmax(-1)}]
trainer = Trainer(model=student_model, args=training_args, train_dataset=train_dataset)
trainer.train()
student_model.save_pretrained("./distilled_student_model")
print("蒸馏模型已保存到 ./distilled_student_model")

# 3. FP16精度推理
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
with torch.cuda.amp.autocast():
    inputs = tokenizer(input_text, return_tensors="pt").to(device)
    with torch.no_grad():
        fp16_output = model(**inputs)
print("FP16推理输出:", fp16_output)


-------------------------------------------------------------------------------------------------------------


import time
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, DistilBertForSequenceClassification, Trainer, TrainingArguments

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 加载原始大模型和分词器
model_name = "gpt2-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
original_model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
original_model.eval()

# 示例输入
input_text = "什么是机器学习的基本概念？"
inputs = tokenizer(input_text, return_tensors="pt").to(device)

# 1. 未优化模型推理（原始大模型）
start_time = time.time()
with torch.no_grad():
    original_output = original_model(**inputs)
end_time = time.time()
original_duration = end_time - start_time
print("未优化模型响应时间: {:.4f} 秒".format(original_duration))

# 2. 蒸馏小模型推理
# 使用DistilBERT作为小模型
student_model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased").to(device)
student_model.eval()

# 蒸馏训练模拟
train_dataset = [{"input_ids": inputs["input_ids"], "labels": original_output.logits.argmax(-1)}]
training_args = TrainingArguments(output_dir="./distilled_model", per_device_train_batch_size=4, num_train_epochs=1, logging_dir='./logs')

trainer = Trainer(
    model=student_model,
    args=training_args,
    train_dataset=train_dataset
)

# 执行蒸馏训练
trainer.train()

# 蒸馏模型推理
start_time = time.time()
with torch.no_grad():
    student_output = student_model(**inputs)
end_time = time.time()
distilled_duration = end_time - start_time
print("蒸馏模型响应时间: {:.4f} 秒".format(distilled_duration))

# 3. FP16精度推理
student_model.half()  # 将模型转换为半精度
start_time = time.time()
with torch.cuda.amp.autocast():
    with torch.no_grad():
        fp16_output = student_model(**inputs)
end_time = time.time()
fp16_duration = end_time - start_time
print("FP16精度推理响应时间: {:.4f} 秒".format(fp16_duration))


-------------------------------------------------------------------------------------------------------------


import time

def process_question(question: str) -> str:
    """
    模拟处理单个问题的函数。
    :param question: 用户输入的问题
    :return: 模拟的回答
    """
    # 模拟处理时间
    time.sleep(1)  # 假设每个问题需要1秒来处理
    return f"回答：这是针对问题 '{question}' 的回答。"


from concurrent.futures import ThreadPoolExecutor, as_completed

def handle_multiple_requests(questions):
    """
    使用ThreadPoolExecutor并发处理多个问题。
    :param questions: 问题列表
    :return: 回答列表
    """
    responses = []
    with ThreadPoolExecutor(max_workers=5) as executor:
        # 提交所有问题，启动并发任务
        future_to_question = {executor.submit(process_question, q): q for q in questions}
        
        # 获取所有任务的结果
        for future in as_completed(future_to_question):
            question = future_to_question[future]
            try:
                response = future.result()
                responses.append((question, response))
                print(f"处理完成：问题 '{question}'，回答 '{response}'")
            except Exception as exc:
                print(f"处理问题 '{question}' 时出错: {exc}")
    
    return responses


import asyncio

async def async_process_question(question: str) -> str:
    """
    异步处理单个问题的函数。
    :param question: 用户输入的问题
    :return: 模拟的回答
    """
    await asyncio.sleep(1)  # 模拟I/O等待时间
    return f"回答：这是针对问题 '{question}' 的回答。"

async def handle_async_requests(questions):
    """
    使用asyncio并发处理多个问题。
    :param questions: 问题列表
    :return: 回答列表
    """
    tasks = [async_process_question(q) for q in questions]
    responses = await asyncio.gather(*tasks)
    return responses
import asyncio
from concurrent.futures import ThreadPoolExecutor

async def worker(queue):
    """
    队列的工作进程，从队列中取出问题并处理。
    """
    while True:
        question = await queue.get()
        if question is None:
            break
        response = await loop.run_in_executor(executor, process_question, question)
        print(f"问题: {question}, 回答: {response}")
        queue.task_done()

async def main(questions):
    """
    主函数，管理队列并创建工作进程。
    """
    queue = asyncio.Queue()

    # 将问题放入队列
    for question in questions:
        await queue.put(question)

    # 创建多个工作进程
    tasks = []
    for _ in range(5):  # 启动5个并发工作进程
        task = asyncio.create_task(worker(queue))
        tasks.append(task)

    # 等待队列中的任务完成
    await queue.join()

    # 停止工作进程
    for _ in range(5):
        await queue.put(None)

    await asyncio.gather(*tasks)

# 定义要处理的问题列表
questions = [f"问题{i}" for i in range(10)]

# 创建事件循环并运行主函数
executor = ThreadPoolExecutor()
loop = asyncio.get_event_loop()
loop.run_until_complete(main(questions))
loop.close()


-------------------------------------------------------------------------------------------------------------


from langchain.chains import SimpleChain
from langchain.prompts import PromptTemplate

# 定义基础任务链
class BaseChain(SimpleChain):
    def __init__(self, name):
        super().__init__(name=name)
    
    def process(self, input_data):
        raise NotImplementedError("Subclasses should implement this method.")


class FAQChain(BaseChain):
    def process(self, input_data):
        faq_responses = {
            "公司成立时间": "我们的公司成立于2001年。",
            "核心产品": "我们的核心产品是智能设备和数据分析服务。"
        }
        return faq_responses.get(input_data, "无法找到相关的常见问题解答。")

class ComplexQueryChain(BaseChain):
    def process(self, input_data):
        # 模拟复杂查询处理流程
        return f"这是针对复杂问题 '{input_data}' 的分步分析结果。"

# 定义嵌套任务链
class NestedTaskChain(BaseChain):
    def __init__(self):
        super().__init__(name="NestedTaskChain")
        self.faq_chain = FAQChain(name="FAQChain")
        self.complex_chain = ComplexQueryChain(name="ComplexQueryChain")
    
    def process(self, input_data):
        if "常见" in input_data:
            return self.faq_chain.process(input_data)
        else:
            return self.complex_chain.process(input_data)


class DynamicRouterChain(BaseChain):
    def __init__(self):
        super().__init__(name="DynamicRouterChain")
        self.nested_chain = NestedTaskChain()
    
    def process(self, input_data):
        if "分析" in input_data:
            # 复杂查询，使用嵌套任务链处理
            result = self.nested_chain.process(input_data)
            return f"复杂查询处理结果：{result}"
        else:
            # 简单查询，直接处理
            return f"简单查询直接回答：这是针对简单查询 '{input_data}' 的直接回答。"


def test_task_chains():
    router_chain = DynamicRouterChain()
    
    # 测试常见问题
    print(router_chain.process("公司成立时间"))  # 应该走FAQChain
    print(router_chain.process("核心产品"))      # 应该走FAQChain
    
    # 测试复杂查询
    print(router_chain.process("市场趋势分析"))  # 应该走ComplexQueryChain
    print(router_chain.process("年度收益分析"))  # 应该走ComplexQueryChain
    
    # 测试简单查询
    print(router_chain.process("产品定价策略"))  # 应该走直接回答
    print(router_chain.process("销售渠道"))      # 应该走直接回答

# 执行测试
test_task_chains()

