--------------------------------------------------------------------------------------------------------------


import random
from collections import defaultdict

# 1. 数据准备：定义一个示例文本数据集
text = "今天天气很好 我想出去走走 但是有点冷 不过阳光明媚 适合散步"

# 2. 构建2-grams统计表
def build_ngrams(text, n=2):
    words = text.split()
    ngrams = defaultdict(list)
    
    for i in range(len(words) - n + 1):
        key = tuple(words[i:i + n - 1])
        next_word = words[i + n - 1]
        ngrams[key].append(next_word)
    
    return ngrams

# 3. 使用N-grams表生成文本
def generate_text(ngrams, start_words, num_words=10):
    current_words = start_words
    result = list(start_words)
    
    for _ in range(num_words):
        next_words = ngrams.get(current_words)
        if not next_words:
            break
        next_word = random.choice(next_words)
        result.append(next_word)
        current_words = tuple(result[-(len(current_words)):])
    
    return " ".join(result)

# 生成N-grams表
ngrams = build_ngrams(text, n=2)
print("N-grams 统计表:", dict(ngrams))

# 基于2-grams生成文本
generated_text = generate_text(ngrams, \
start_words=("今天天气",), num_words=5)
print("生成的文本:", generated_text)


--------------------------------------------------------------------------------------------------------------


import numpy as np
from collections import defaultdict

# 1. 创建一个随机生成的5x5 RGB图像，每个像素包含三个通道(RGB)
np.random.seed(42)  # 设置随机种子以获得相同的输出
image = np.random.randint(0, 256, (5, 5, 3), dtype=np.uint8)
print("原始图像数据:\n", image)

# 2. 构建3x3邻域N-grams模型
def build_image_ngrams(image, n=3):
    h, w, c = image.shape
    ngrams = defaultdict(list)

    # 遍历每个像素（除去边界像素）
    for i in range(h - n + 1):
        for j in range(w - n + 1):
            # 提取3x3邻域
            neighborhood = image[i:i+n, j:j+n]
            center_pixel = image[i + n // 2, j + n // 2]
            # 将邻域（不包含中心像素）作为键，中心像素值作为值
            key = tuple(neighborhood.flatten())  # 展平邻域数据作为键
            ngrams[key].append(tuple(center_pixel)) 
 # 将中心像素添加到值列表中

    return ngrams

# 生成3x3邻域N-grams统计表
ngrams = build_image_ngrams(image)
print("\n3x3邻域N-grams统计表:")
for k, v in ngrams.items():
    print("邻域:", k, "中心像素:", v)

# 3. 基于N-grams表进行图像局部预测
def predict_center_pixel(ngrams, neighborhood):
    key = tuple(neighborhood.flatten())
    if key in ngrams:
        # 返回中心像素的平均值（近似预测）
        predicted_pixel = np.mean(ngrams[key], axis=0).astype(int)
        return tuple(predicted_pixel)
    else:
        return (0, 0, 0)  # 未知模式时返回黑色

# 示例：对一个新3x3邻域进行预测
test_neighborhood = image[1:4, 1:4]  # 提取一个3x3邻域
predicted_pixel = predict_center_pixel(ngrams, test_neighborhood)
print("\n测试邻域:\n", test_neighborhood)
print("预测的中心像素:", predicted_pixel)


--------------------------------------------------------------------------------------------------------------


import openai
import os

# 配置API密钥
openai.api_key = os.getenv("OPENAI_API_KEY")

# 使用GPT模型生成文本
response = openai.Completion.create(
    engine="text-davinci-003",
    prompt="描述今天的天气情况",
    max_tokens=50
)
print("GPT生成的文本:", response.choices[0].text.strip())


--------------------------------------------------------------------------------------------------------------


from collections import defaultdict, Counter

# 简单文本数据集
text = "今天天气很好 我想出去走走 但是有点冷 不过阳光明媚 适合散步 我喜欢晴天"

# 构建词频模型
def build_word_model(text):
    words = text.split()
    word_model = defaultdict(Counter)
    for i in range(len(words) - 1):
        word_model[words[i]][words[i + 1]] += 1
    return word_model

# 预测下一个词
def predict_next_word(word_model, word):
    if word in word_model:
        return word_model[word].most_common(1)[0][0]  # 返回出现频率最高的下一个词
    return None

# 生成词频模型并预测
word_model = build_word_model(text)
print("词频模型:", dict(word_model))
next_word = predict_next_word(word_model, "天气")
print("预测的下一个词:", next_word)


--------------------------------------------------------------------------------------------------------------


import numpy as np

# 定义示例词向量
query_vector = np.array([1, 0, 1])
key_vector = np.array([0, 1, 0])
value_vector = np.array([1, 2, 3])

# 计算注意力分数（点积）
attention_score = np.dot(query_vector, key_vector)

# 计算加权后的输出
attention_output = attention_score * value_vector
print("注意力分数:", attention_score)
print("加权求和输出:", attention_output)


--------------------------------------------------------------------------------------------------------------


import openai
import os

# 配置API密钥
openai.api_key = os.getenv("OPENAI_API_KEY")

# 使用GPT-3模型生成文本
response = openai.Completion.create(
    engine="text-davinci-003",
    prompt="简述机器学习的基本概念。",
    max_tokens=50,
    temperature=0.7
)
print("GPT-3生成的文本:", response.choices[0].text.strip())


--------------------------------------------------------------------------------------------------------------


>> pip install openai
import openai
import os

# 配置API密钥
openai.api_key = os.getenv("OPENAI_API_KEY")

# 定义对话模型函数
def chat_with_model(messages, model_name="gpt-3.5-turbo", \
temperature=0.7, max_tokens=100):
    response = openai.ChatCompletion.create(
        model=model_name,
        messages=messages,
        temperature=temperature,
        max_tokens=max_tokens
    )
    return response['choices'][0]['message']['content']

# 初始化对话历史记录
conversation_history = []

# 示例对话：连续提问并响应
print("请输入'退出'来结束对话。")
while True:
    # 用户输入
    user_input = input("用户: ")
    if user_input.lower() == "退出":
        print("对话已结束。")
        break

    # 将用户输入加入对话历史
    conversation_history.append({"role": "user", "content": user_input})
    
    # 生成回复
    model_reply = chat_with_model(conversation_history)
    
    # 将模型回复加入对话历史
    conversation_history.append({"role": "assistant", \
"content": model_reply})
    
    # 显示模型回复
    print("助手:", model_reply)


--------------------------------------------------------------------------------------------------------------


import openai
import os

# 配置API密钥
openai.api_key = os.getenv("OPENAI_API_KEY")

# 定义LLM类模型生成函数
def generate_text(prompt, model_name="text-davinci-003", \
temperature=0.7, max_tokens=100, top_p=0.9):
    response = openai.Completion.create(
        engine=model_name,
        prompt=prompt,
        temperature=temperature,
        max_tokens=max_tokens,
        top_p=top_p
    )
    return response['choices'][0]['text'].strip()

# 示例任务：生成机器学习概念的简要说明
prompt_text = "简述机器学习的基本概念。"
generated_text = generate_text(prompt_text)
print("生成的文本:", generated_text)


--------------------------------------------------------------------------------------------------------------


# 示例：调用LLM类模型并集成到任务链
from langchain import LLMChain

# 定义一个任务链
llm_chain = LLMChain(
    llm=generate_text,  # 使用自定义的文本生成函数
    prompt="简述机器学习在金融领域的应用。",
)

# 执行任务链
result = llm_chain.run()
print("LLM模型任务链生成结果:", result)


--------------------------------------------------------------------------------------------------------------


import openai
import os

# 配置API密钥
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Completion.create(
    engine="text-davinci-003",
    prompt="简述机器学习的基本概念。",
    max_tokens=50,
    temperature=0.7
)

print("生成的文本:", response.choices[0].text.strip())


def generate_text(prompt, engine="text-davinci-003", \
max_tokens=100, temperature=0.7, top_p=0.9):
    response = openai.Completion.create(
        engine=engine,
        prompt=prompt,
        max_tokens=max_tokens,
        temperature=temperature,
        top_p=top_p
    )
    return response.choices[0].text.strip()

# 示例调用
prompt_text = "描述人工智能的主要应用领域。"
generated_text = generate_text(prompt_text)
print("生成的文本:", generated_text)


# 正式风格的简短回答
response = generate_text("请简要解释机器学习的工作原理。", \
max_tokens=50, temperature=0.3)
print("正式简短回答:", response)

# 创意风格的长文本生成
response = generate_text("想象一个未来的智能城市，并描述它的特点。", \
max_tokens=150, temperature=0.9)
print("创意长文本:", response)


def generate_text_with_error_handling(prompt, \
engine="text-davinci-003", max_tokens=100, temperature=0.7):
    try:
        response = openai.Completion.create(
            engine=engine,
            prompt=prompt,
            max_tokens=max_tokens,
            temperature=temperature
        )
        return response.choices[0].text.strip()
    except openai.error.OpenAIError as e:
        print("API调用失败:", e)
        return None

# 示例调用
result = generate_text_with_error_handling("解释深度学习的优势。")
if result:
    print("生成的文本:", result)
else:
    print("生成失败。")



task_prompt = "生成一个新员工入职的详细任务清单。"

# 生成任务清单
task_list = generate_text(task_prompt, max_tokens=150, temperature=0.6)
print("任务清单:\n", task_list)


--------------------------------------------------------------------------------------------------------------


def generate_ai_text(prompt, model_name="text-davinci-003", \
temperature=0.6, max_tokens=100, top_p=0.9):
    try:
        response = openai.Completion.create(
            engine=model_name,
            prompt=prompt,
            temperature=temperature,
            max_tokens=max_tokens,
            top_p=top_p
        )
        return response.choices[0].text.strip()
    except openai.error.OpenAIError as e:
        print("API调用失败:", e)
        return None
# 定义具体的任务提示词
education_prompt = "简要描述人工智能在教育中的应用，包括个性化学习、智能评估和虚拟辅导。"

# 执行文本生成
generated_education_text = generate_ai_text(education_prompt)

# 显示生成的内容
if generated_education_text:
    print("生成的文本:\n", generated_education_text)
else:
    print("文本生成失败。")


--------------------------------------------------------------------------------------------------------------


product_prompt = "生成一段简洁的产品介绍，产品名称为：智能家居助手。描述其主要功能和特点。"

# 生成产品介绍
product_description = generate_ai_text(product_prompt, \
temperature=0.5, max_tokens=100)
print("产品介绍:\n", product_description)


--------------------------------------------------------------------------------------------------------------


from langchain.base_model import BaseModel

class CustomLangChainModel(BaseModel):
    def __init__(self, model_name, temperature=0.7, max_tokens=100):
        self.model_name = model_name
        self.temperature = temperature
        self.max_tokens = max_tokens

    def generate(self, prompt):
        # 生成函数 - 定义具体生成逻辑
        raise NotImplementedError("自定义生成逻辑需要在子类中实现")
import openai

class CustomLangChainModel(BaseModel):
    def __init__(self, model_name, temperature=0.7, max_tokens=100):
        self.model_name = model_name
        self.temperature = temperature
        self.max_tokens = max_tokens

    def generate(self, prompt):
        # 使用OpenAI API进行文本生成
        response = openai.Completion.create(
            engine=self.model_name,
            prompt=prompt,
            temperature=self.temperature,
            max_tokens=self.max_tokens
        )
        return response.choices[0].text.strip()


# 配置API密钥
openai.api_key = "your_openai_api_key_here"  # 将此处替换为实际的API密钥

# 创建自定义模型实例
custom_model = CustomLangChainModel(\
model_name="text-davinci-003", temperature=0.5, max_tokens=50)

# 生成文本
prompt_text = "请简述机器学习的基本概念。"
generated_text = custom_model.generate(prompt_text)

# 显示生成的文本
print("生成的文本:", generated_text)


--------------------------------------------------------------------------------------------------------------


class CustomLangChainModel(BaseModel):
    def __init__(self, model_name, temperature=0.7, max_tokens=100):
        self.model_name = model_name
        self.temperature = temperature
        self.max_tokens = max_tokens

    def generate(self, prompt):
        try:
            # 使用OpenAI API进行文本生成
            response = openai.Completion.create(
                engine=self.model_name,
                prompt=prompt,
                temperature=self.temperature,
                max_tokens=self.max_tokens
            )
            return response.choices[0].text.strip()
        except openai.error.OpenAIError as e:
            print("API调用失败:", e)
            return None


--------------------------------------------------------------------------------------------------------------


from langchain import LLMChain

# 创建任务链
llm_chain = LLMChain(llm=custom_model, prompt="请描述深度学习的核心优势。")

# 执行任务链
result = llm_chain.run()
print("任务链生成的结果:", result)


--------------------------------------------------------------------------------------------------------------


import openai
from langchain.base_model import BaseModel

class CustomLangChainModel(BaseModel):
    def __init__(self, model_name, temperature=0.7, \
max_tokens=100, top_p=0.9, frequency_penalty=0, presence_penalty=0):
        self.model_name = model_name
        self.temperature = temperature
        self.max_tokens = max_tokens
        self.top_p = top_p
        self.frequency_penalty = frequency_penalty
        self.presence_penalty = presence_penalty

    def generate(self, prompt):
        try:
            # 调用OpenAI API生成文本
            response = openai.Completion.create(
                engine=self.model_name,
                prompt=prompt,
                temperature=self.temperature,
                max_tokens=self.max_tokens,
                top_p=self.top_p,
                frequency_penalty=self.frequency_penalty,
                presence_penalty=self.presence_penalty
            )
            return response.choices[0].text.strip()
        except openai.error.OpenAIError as e:
            print("API调用失败:", e)
            return None


# 创建模型实例，设置temperature参数
custom_model = CustomLangChainModel(\
model_name="text-davinci-003", temperature=0.3, max_tokens=50)

prompt_text = "简要描述人工智能在金融领域的应用。"
print("低temperature（0.3）:")
print(custom_model.generate(prompt_text))

custom_model.temperature = 0.9  # 提高temperature值
print("高temperature（0.9）:")
print(custom_model.generate(prompt_text))


--------------------------------------------------------------------------------------------------------------


custom_model = CustomLangChainModel(\
model_name="text-davinci-003", temperature=0.5, max_tokens=30)
print("短文本（max_tokens=30）:")
print(custom_model.generate(prompt_text))

custom_model.max_tokens = 100  # 增加生成长度
print("长文本（max_tokens=100）:")
print(custom_model.generate(prompt_text))


custom_model = CustomLangChainModel(\
model_name="text-davinci-003", temperature=0.5, max_tokens=50, top_p=0.5)
print("较低top_p（0.5）:")
print(custom_model.generate(prompt_text))

custom_model.top_p = 1  # 放宽词汇范围
print("较高top_p（1）:")
print(custom_model.generate(prompt_text))


custom_model = CustomLangChainModel(model_name=\
"text-davinci-003", temperature=0.5, max_tokens=50, frequency_penalty=0)
print("无重复惩罚（frequency_penalty=0）:")
print(custom_model.generate(prompt_text))

custom_model.frequency_penalty = 1  # 增加重复惩罚
print("高重复惩罚（frequency_penalty=1）:")
print(custom_model.generate(prompt_text))


--------------------------------------------------------------------------------------------------------------


# 创建内存缓存类
class InMemoryCache:
    def __init__(self):
        self.cache = {}

    def get(self, key):
        # 获取缓存中的值，如果不存在则返回None
        return self.cache.get(key)

    def set(self, key, value):
        # 设置缓存的键值对
        self.cache[key] = value

# 示例用法
cache = InMemoryCache()


import openai

class CustomLangChainModel:
    def __init__(self, model_name, temperature=0.7, \
max_tokens=100, cache=None):
        self.model_name = model_name
        self.temperature = temperature
        self.max_tokens = max_tokens
        self.cache = cache  # 内存缓存实例

    def generate(self, prompt):
        # Step 1: 检查缓存
        if self.cache:
            cached_response = self.cache.get(prompt)
            if cached_response:
                print("从缓存中获取结果...")
                return cached_response

        # Step 2: 调用OpenAI API生成文本
        print("缓存中无结果，调用模型生成...")
        response = openai.Completion.create(
            engine=self.model_name,
            prompt=prompt,
            temperature=self.temperature,
            max_tokens=self.max_tokens
        )
        result = response.choices[0].text.strip()

        # Step 3: 将结果存入缓存
        if self.cache:
            self.cache.set(prompt, result)

        return result


# 配置API密钥
openai.api_key = "your_openai_api_key_here"  # 替换为实际的API密钥

# 创建内存缓存实例
cache = InMemoryCache()

# 创建自定义模型实例并集成缓存
custom_model = CustomLangChainModel(model_name=\
"text-davinci-003", temperature=0.5, max_tokens=50, cache=cache)

# 生成文本任务示例
prompt_text = "简要描述机器学习的应用。"

# 第一次生成调用，将调用模型生成结果并存入缓存
print("第一次生成调用结果:")
generated_text = custom_model.generate(prompt_text)
print("生成的文本:", generated_text)

# 第二次生成调用，若缓存中存在相同prompt，直接返回缓存结果
print("\n第二次生成调用结果:")
generated_text = custom_model.generate(prompt_text)
print("生成的文本:", generated_text)


--------------------------------------------------------------------------------------------------------------


import json
import os

class FileCache:
    def __init__(self, cache_file="cache.json"):
        self.cache_file = cache_file
        # 初始化时尝试加载缓存文件内容
        if not os.path.exists(self.cache_file):
            with open(self.cache_file, 'w') as f:
                json.dump({}, f)

    def _load_cache(self):
        with open(self.cache_file, 'r') as f:
            return json.load(f)

    def _save_cache(self, cache_data):
        with open(self.cache_file, 'w') as f:
            json.dump(cache_data, f)

    def get(self, key):
        # 从缓存文件中获取数据
        cache_data = self._load_cache()
        return cache_data.get(key)

    def set(self, key, value):
        # 更新缓存并写入文件
        cache_data = self._load_cache()
        cache_data[key] = value
        self._save_cache(cache_data)


import openai

class CustomLangChainModel:
    def __init__(self, model_name, temperature=0.7, \
max_tokens=100, cache=None):
        self.model_name = model_name
        self.temperature = temperature
        self.max_tokens = max_tokens
        self.cache = cache  # 文件缓存实例

    def generate(self, prompt):
        # Step 1: 检查文件缓存
        if self.cache:
            cached_response = self.cache.get(prompt)
            if cached_response:
                print("从文件缓存中获取结果...")
                return cached_response

        # Step 2: 调用OpenAI API生成文本
        print("文件缓存中无结果，调用模型生成...")
        response = openai.Completion.create(
            engine=self.model_name,
            prompt=prompt,
            temperature=self.temperature,
            max_tokens=self.max_tokens
        )
        result = response.choices[0].text.strip()

        # Step 3: 将生成的结果存入文件缓存
        if self.cache:
            self.cache.set(prompt, result)

        return result


# 配置API密钥
openai.api_key = "your_openai_api_key_here"  # 替换为实际的API密钥

# 创建文件缓存实例
file_cache = FileCache("cache.json")

# 创建自定义模型实例并集成文件缓存
custom_model = CustomLangChainModel(model_name=\
"text-davinci-003", temperature=0.5, max_tokens=50, cache=file_cache)

# 生成文本任务示例
prompt_text = "简要描述机器学习的应用。"

# 第一次生成调用，将调用模型生成结果并存入文件缓存
print("第一次生成调用结果:")
generated_text = custom_model.generate(prompt_text)
print("生成的文本:", generated_text)

# 第二次生成调用，若文件缓存中存在相同prompt，直接返回缓存结果
print("\n第二次生成调用结果:")
generated_text = custom_model.generate(prompt_text)
print("生成的文本:", generated_text)


--------------------------------------------------------------------------------------------------------------


>> sudo apt update
>> sudo apt install redis-server
>> brew install redis
>> sudo service redis-server start
>> redis-cli ping
>> pip install redis
import redis
import json

class RedisCache:
    def __init__(self, host='localhost', port=6379, db=0):
        # 初始化Redis连接
        self.client = redis.Redis(host=host, port=port, db=db)

    def get(self, key):
        # 从Redis获取缓存数据
        cached_value = self.client.get(key)
        if cached_value:
            # 返回解码后的JSON数据
            return json.loads(cached_value)
        return None

    def set(self, key, value, expiration=3600):
        # 将数据以JSON格式存储到Redis，设置过期时间
        self.client.setex(key, expiration, json.dumps(value))

    def delete(self, key):
        # 从Redis中删除指定的缓存
        self.client.delete(key)


import openai

class CustomLangChainModel:
    def __init__(self, model_name, temperature=0.7, \
max_tokens=100, cache=None):
        self.model_name = model_name
        self.temperature = temperature
        self.max_tokens = max_tokens
        self.cache = cache  # Redis缓存实例

    def generate(self, prompt):
        # Step 1: 检查Redis缓存
        if self.cache:
            cached_response = self.cache.get(prompt)
            if cached_response:
                print("从Redis缓存中获取结果...")
                return cached_response

        # Step 2: 调用OpenAI API生成文本
        print("Redis缓存中无结果，调用模型生成...")
        response = openai.Completion.create(
            engine=self.model_name,
            prompt=prompt,
            temperature=self.temperature,
            max_tokens=self.max_tokens
        )
        result = response.choices[0].text.strip()

        # Step 3: 将生成的结果存入Redis缓存
        if self.cache:
            self.cache.set(prompt, result)

        return result


# 配置API密钥
openai.api_key = "your_openai_api_key_here"  # 替换为实际的API密钥

# 创建Redis缓存实例
redis_cache = RedisCache()

# 创建自定义模型实例并集成Redis缓存
custom_model = CustomLangChainModel(model_name=\
"text-davinci-003", temperature=0.5, max_tokens=50, cache=redis_cache)

# 生成文本任务示例
prompt_text = "简要描述深度学习的应用。"

# 第一次生成调用，将调用模型生成结果并存入Redis缓存
print("第一次生成调用结果:")
generated_text = custom_model.generate(prompt_text)
print("生成的文本:", generated_text)

# 第二次生成调用，若Redis缓存中存在相同prompt，直接返回缓存结果
print("\n第二次生成调用结果:")
generated_text = custom_model.generate(prompt_text)
print("生成的文本:", generated_text)


--------------------------------------------------------------------------------------------------------------


import zlib

# 压缩数据示例
def set_compressed_cache(key, value):
    compressed_value = zlib.compress(json.dumps(value).encode('utf-8'))
    redis_cache.client.setex(key, 3600, compressed_value)

# 解压缩数据示例
def get_compressed_cache(key):
    compressed_value = redis_cache.client.get(key)
    if compressed_value:
        return json.loads(zlib.decompress(compressed_value).decode('utf-8'))
    return None
