--------------------------------------------------------------------------------------------------------------


import os
api_key = os.getenv("OPENAI_API_KEY")
import openai
import os

# 从环境变量中获取API密钥
openai.api_key = os.getenv("OPENAI_API_KEY")

# 调用OpenAI的文本生成接口
response = openai.Completion.create(
    engine="text-davinci-003",
    prompt="请简要描述企业创新的重要性。",
    max_tokens=50
)

# 输出生成结果
print(response.choices[0].text.strip())


--------------------------------------------------------------------------------------------------------------


import os
api_key = os.getenv("OPENAI_API_KEY")
>> bash /path/to/Anaconda3-xxxx-Linux-x86_64.sh
>> conda --version
>> conda create -n langchain-env python=3.8
>> conda activate langchain-env
>> source activate langchain-env
>> python --version
>> conda install pip numpy
>> conda env list
>> conda deactivate
>> conda remove -n langchain-env --all
>> conda install jupyter
>> jupyter notebook


--------------------------------------------------------------------------------------------------------------


import sys
import numpy

print("Python Interpreter Path:", sys.executable)
print("Numpy Version:", numpy.__version__)

>> conda activate langchain-env
>> pip install openai


--------------------------------------------------------------------------------------------------------------


>> conda create -n langchain-env python=3.8
>> conda activate langchain-env
>> conda env list
>> conda remove -n langchain-env --all
>> conda install numpy
>> conda update numpy
>> conda remove numpy
>> conda list


import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt

# 1. 创建一个模拟的多维数组数据集
np.random.seed(42)
data = np.random.rand(100, 5) * 100  # 创建100个样本，每个样本有5个特征

# 2. 数据归一化处理 (Min-Max Normalization)
data_min = data.min(axis=0)
data_max = data.max(axis=0)
normalized_data = (data - data_min) / (data_max - data_min)

# 3. 计算相似性矩阵（余弦相似度）
def cosine_similarity_matrix(data):
    dot_product = np.dot(data, data.T)
    norms = np.linalg.norm(data, axis=1)
    norm_matrix = np.outer(norms, norms)
    similarity_matrix = dot_product / norm_matrix
    np.fill_diagonal(similarity_matrix, 0)  # 设置对角线为0，排除自相似
    return similarity_matrix

similarity_matrix = cosine_similarity_matrix(normalized_data)

# 4. 找出最相似的数据对
most_similar_pairs = np.unravel_index\
(np.argmax(similarity_matrix), similarity_matrix.shape)
print("最相似的数据对索引:", most_similar_pairs)
print("最相似的样本相似度:", similarity_matrix[most_similar_pairs])

# 5. 使用PCA进行数据降维并可视化
pca = PCA(n_components=2)
reduced_data = pca.fit_transform(normalized_data)

plt.scatter(reduced_data[:, 0], reduced_data[:, 1], c='blue', \
label='Data Points')
plt.scatter(reduced_data[most_similar_pairs, \
0], reduced_data[most_similar_pairs, 1], c='red', \
label='Most Similar Pair',\
 s=100)
plt.title("Data Points in 2D after PCA Reduction")
plt.xlabel("Principal Component 1")
plt.ylabel("Principal Component 2")
plt.legend()
plt.show()


--------------------------------------------------------------------------------------------------------------


>> conda env export > environment.yml
>> conda env create -f environment.yml
>> conda create --name new-env --clone langchain-env


--------------------------------------------------------------------------------------------------------------


>> pip install langchain
>> pip install openai
>> pip install requests
>> pip install numpy
>> pip install pandas
>> pip install faiss-cpu
>> pip install faiss-gpu
>> pip install scikit-learn


import openai
import os

# 从环境变量中获取API密钥
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Completion.create(
    engine="text-davinci-003",
    prompt="请简要描述人工智能的基本概念。",
    max_tokens=50
)

# 输出生成的文本
print("生成的文本:", response.choices[0].text.strip())


response = openai.Completion.create(
    engine="text-davinci-003",
    prompt="请描述太阳系的组成。",
    max_tokens=50,
    temperature=0.7
)
response = openai.Completion.create(
    engine="text-davinci-003",
    prompt="简要介绍机器学习的应用。",
    max_tokens=50,
    top_p=0.9
)
response = openai.Completion.create(
    engine="text-davinci-003",
    prompt="给出三个AI应用的示例。",
    max_tokens=50,
    n=3
)

for i, choice in enumerate(response.choices):
    print(f"生成的文本 {i + 1}: {choice.text.strip()}")
response = openai.Completion.create(
    engine="text-davinci-003",
    prompt="编写一段简单的Python代码，计算两个数的和。",
    max_tokens=50,
    stop=["# End of code"]
)

print("生成的代码:", response.choices[0].text.strip())
try:
    response = openai.Completion.create(
        engine="text-davinci-003",
        prompt="介绍Python的基本功能。",
        max_tokens=50
    )
    print(response.choices[0].text.strip())
except Exception as e:
    print("API调用失败:", e)


--------------------------------------------------------------------------------------------------------------


>> pip install tqdm
from tqdm import tqdm
import time

for i in tqdm(range(100)):
    time.sleep(0.1)  # 模拟长时间任务


>> pip install matplotlib seaborn
import matplotlib.pyplot as pltimport seaborn as \
snsimport numpy as np
data = np.random.randn(100)
sns.histplot(data, kde=True)
plt.title("数据分布图")
plt.show()
import logging
logging.basicConfig(level=logging.INFO, format=\
'%(asctime)s - %(levelname)s - %(message)s')
logging.info("LangChain项目启动")


--------------------------------------------------------------------------------------------------------------


pip install python-dotenv



from dotenv import load_dotenvimport os
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")print("API密钥:", api_key)


--------------------------------------------------------------------------------------------------------------


>> pip install joblib
from joblib import Parallel, delayedimport time
def process_data(i):
    time.sleep(1)
    return i * i

results = Parallel(n_jobs=4)(delayed(process_data)(i)\
 for i in range(10))print("计算结果:", results)


--------------------------------------------------------------------------------------------------------------


pip install scipy
from scipy.spatial.distance import cosineimport numpy as np

vec1 = np.array([1, 2, 3])
vec2 = np.array([2, 3, 4])
similarity = 1 - cosine(vec1, vec2)print("余弦相似度:", similarity)


--------------------------------------------------------------------------------------------------------------


>> pip install transformers
from transformers import pipeline
summarizer = pipeline("summarization")
text = "The LangChain project aims to integrate large language models with chain-based tasks..."
summary = summarizer(text, max_length=50, min_length=25, do_sample=False)print("总结:", summary[0]['summary_text'])
