# -*- coding: utf-8 -*-

# @Project : fastapi-tutorial
# @Date    : 20240326-1613
# @Author  : robin

import sys

# 文档转换器
# 文档总结精炼和翻译

# 文档切割
# 原理
# 将文档分成小的、有意义的块(句子).
# 将小的块组合成为一个更大的块，直到达到一定的大小.
# 一旦达到一定的大小，接着开始创建与下一个块重叠的部分.
#
# 示例
# 第一个文档分割
# 按字符切割
# 代码文档切割
# 按token来切割

import chardet
def detect_charset(file_path):
    with open(file_path, 'rb' ) as f1:
        data = f1.read()
        result = chardet.detect(data)
        return result['encoding']

testfileencoding  = detect_charset("test.txt")
testfileencoding2 = detect_charset("letter.txt")

# 第一个文档切割
# from langchain.text_splitter import RecursiveCharacterTextSplitter
#
# #加载要切割的文档
# with open("test.txt", 'r', encoding=testfileencoding) as f:
#     zuizhonghuanxiang = f.read()
#
# # print(zuizhonghuanxiang)
#
# #初始化切割器
# text_splitter = RecursiveCharacterTextSplitter(
#     chunk_size=50,#切分的文本块大小，一般通过长度函数计算
#     chunk_overlap=20,#切分的文本块重叠大小，一般通过长度函数计算
#     length_function=len,#长度函数,也可以传递tokenize函数
#     add_start_index=True,#是否添加起始索引
# )
#
# text = text_splitter.create_documents([zuizhonghuanxiang])
# print(len(text))
# print(text[0])
# print(text[1])

# 字符串切割
from langchain.text_splitter import CharacterTextSplitter

#加载要切分的文档
with open("test.txt", 'r', encoding=testfileencoding) as f:
    zuizhonghuanxiang = f.read()

#初始化切分器
text_splitter = CharacterTextSplitter(
    separator="。",#切割的标志字符，默认是\n\n
    chunk_size=50,#切分的文本块大小，一般通过长度函数计算
    chunk_overlap=20,#切分的文本块重叠大小，一般通过长度函数计算
    length_function=len,#长度函数,也可以传递tokenize函数
    add_start_index=True,#是否添加起始索引
    is_separator_regex=False,#是否是正则表达式
)
text = text_splitter.create_documents([zuizhonghuanxiang])
print(len(text))
print(text[0])

# 代码文档切割
from langchain.text_splitter import (
    RecursiveCharacterTextSplitter,
    Language,
)

#支持解析的编程语言
#[e.value for e in Language]

#要切割的代码文档
PYTHON_CODE = """
def hello_world():
    print("Hello, World!")
#调用函数
hello_world()
"""
py_spliter = RecursiveCharacterTextSplitter.from_language(
    language=Language.PYTHON,
    chunk_size=50,
    chunk_overlap=10,
)
python_docs = py_spliter.create_documents([PYTHON_CODE])
print(len(python_docs))
print(python_docs[0].page_content)

# 按token来切割文档
from langchain.text_splitter import CharacterTextSplitter

#要切割的文档
with open("test.txt", 'r', encoding=testfileencoding) as f:
    zuizhonghuanxiang = f.read()

#初始化切分器
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
    chunk_size=4000,#切分的文本块大小，一般通过长度函数计算
    chunk_overlap=30,#切分的文本块重叠大小，一般通过长度函数计算
)

text = text_splitter.create_documents([zuizhonghuanxiang])
print(text[0])

# 文档的总结、精炼和翻译
# ! pip install doctran==0.0.14

#加载文档
with open("letter.txt", 'r', encoding=testfileencoding2) as f:
    content = f.read()

# print(content)

from dotenv import load_dotenv
import os
load_dotenv("../.env")

OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]

OPENAI_MODEL = "gpt-3.5-turbo"
OPENAI_TOKEN_LIMIT = 8000

from doctran import Doctran
doctrans = Doctran(
    openai_api_key=OPENAI_API_KEY,
    openai_model=OPENAI_MODEL,
    openai_token_limit=OPENAI_TOKEN_LIMIT,
)
documents = doctrans.parse(content=content)
print("总结文档>>>>>>>>>")
#总结文档
summary = documents.summarize(token_limit=100).execute()
print(summary.transformed_content)

print("翻译一下文档>>>>>>>>>")
#翻译一下文档
translation = summary.translate( language="chinese").execute()
print(translation.transformed_content)

print("精炼文档>>>>>>>>>")
#精炼文档，删除除了某个主题或关键词之外的内容，仅保留与主题相关的内容
refined = documents.refine(topics=["marketing","Development"]).execute()
print(refined.transformed_content)