% \documentclass{memoir}
% \pagestyle{empty} % 移除页眉页脚
% \setlrmarginsandblock{0pt}{0pt}{*} % 左右边距设为 0
% \setulmarginsandblock{0pt}{0pt}{*} % 上下边距设为 0
% \checkandfixthelayout % 应用布局设置
\documentclass{article}
\usepackage{caption}
% \documentclass{standalone}
\usepackage{geometry}
\geometry{
    % a4paper,.
    top=1cm,
    % top=2.2cm, bottom=2.2cm, 
    bottom=1cm,
    left=0cm, right=0cm,
    % heightrounded,
    % totalheight=infinite,  % 理论上允许无限高度（需谨慎使用）
    % totalheight=\maxdimen  % 理论上允许无限高度（需谨慎使用）
}
% \nopagebreak[4]
\pagestyle{empty} %取消页码数
% \usepackage{adjustbox}
% % 临时调整页面高度
% \newgeometry{
%     top=2.2cm, bottom=2.2cm, 
%     left=3.5cm, right=3.5cm,
%     totalheight=\maxdimen  % 理论上允许无限高度（需谨慎使用）
% }

% \usepackage{atveryend} % 用于在文档末尾执行命令

% \AtVeryEndDocument{
%   \pdfpageheight=\document % 将页面高度设为内容实际高度
% }
% \AllowBreak % 也可在关键位置手动添加，建议全局使用

% \checkandfixthelayout
% \documentclass[a4paper]{article}
\usepackage[linesnumbered,ruled,vlined,algo2e]{algorithm2e}
% \usepackage[linesnumbered,ruled,vlined,algo2e,breakable]{algorithm2e}
% \SetAlgoLined：增添end行
% \DontPrintSemicolon：不显示行末尾的分号
% \SetKwInput{KwInput}{Input}：设置输入
% \SetKwInOut{KwInput}{Input}：设置输入输出
% ruled：标题左对齐，顶部和底部各添加一条线
% linesnumbered：显示行编号
\usepackage{ctex} %注意，这里必须引入ctex,否则中文内容无法输出
% 导入代码块
\usepackage{listings}
\usepackage{xcolor}
\SetKwRepeat{Do}{do}{while}
\SetKwFunction{Try}{try}
\SetKwFunction{Catch}{catch}

\usepackage{fontspec}     % 管理西文字体

\setmainfont{Times New Roman}
% \setmainfont{SimHei}
\setCJKmainfont{SimSun}
% \setCJKmainfont{SimHei}
% \SetKwProg{TryCatch}{Try}{Catch}{}

% 若需数学字体适配，可添加：
% \usepackage{mathspec}     % 数学符号字体适配（可选）

% 自定义代码块样式
\lstset{
    % basicstyle = \zihao{-4}, % 小四号等宽字体（中文需搭配 ctex 宏包）
    % frame=single, % 单线边框
    frame = tb,            % 仅显示上下边框
    numbers=left, % 显示行号
    % numberstyle=\tiny, % 行号字体
    numberstyle=\small, % 行号字体
    % keywordstyle=\color{blue!70}, % 关键字颜色
    % commentstyle=\color{red!50!green!50!blue!50}, % 注释颜色
    % frame=shadowbox, % 为代码块添加阴影框
    % rulesepcolor=\color{red!20!green!20!blue!20}, % 阴影框颜色
    escapeinside=``, % 允许在代码块中使用 LaTeX 命令
    % xleftmargin=2em, xrightmargin=2em, aboveskip=1em, % 设置代码块的边距
    xleftmargin=2em,
    % framesep=8pt, % 边框与代码间距
    breaklines=true, % 自动换行
    % framexleftmargin=2em % 阴影框左边距
}

% 标题样式设置
% \captionsetup[lstlisting]{
%     font = {\small\CJKfamily{SimSun}\fontspec{Times New Roman}},
%     labelfont = bf,
%     labelsep = colon
% }

\begin{document}
% \renewcommand{\thelstlisting}{\Roman{lstlisting}} % 大写罗马数字（I, II, III）
\renewcommand{\lstlistingname}{代码块} % 将 "Listing" 改为 "代码块"



% \lstset{language=python} % 设置代码语言为 Python
% captionpos=b, % 标题位置（b: 底部，t: 顶部）
% title={手动标题 (编号需重置)} % 覆盖默认标题]
% font = {small, \CJKfamily{SimSun}, \fontspec{Times New Roman}},
\begin{lstlisting}[
    caption={训练精度的计算}, 
    label={lst:train-precision},
    language=Python,
    captionpos=b, 
    title={\zihao{-4}驱动代码生成核心流程}]
class Agent_Post_DB:    
def __init__(self,model_base:model_list.Model_Base):
    self.model_base=model_base
    self.client=self.model_base.client

def parse_md(self,contents: str, sep: str = "#####"):
    messages = []
    for turn in contents.split(sep):
        if turn.strip() == "":
            continue
        role, *inst = turn.split("\n")
        inst = "\n".join(inst).strip()
        messages.append({"role": role.strip(), "content": inst})
    return messages

def fuzz_code_generate(self,prompt:str,model_name)->str:
        max_retries = self.model_base.model_dict['max_retries']
        retry_delay = self.model_base.model_dict['retry_delay']
        model_path=self.model_base.model_dict['local_path']

        quantization_config = BitsAndBytesConfig(load_in_8bit=True)
        device = "cuda" if torch.cuda.is_available() else "cpu"

        messages=self.parse_md(contents=prompt)
        if self.model_base.model_dict['is_local']:
            tokenizer = AutoTokenizer.from_pretrained(model_path)
            if config.GPU_Deploy:
                model = AutoModelForCausalLM.from_pretrained(model_path,device_map="auto",quantization_config=quantization_config)
            else:
                model= AutoModelForCausalLM.from_pretrained(model_path,device_map=device,torch_dtype=torch.bfloat16)
            generator = transformers.pipeline(
                    "text-generation",
                    tokenizer=tokenizer,
                    model=model,
                    torch_dtype=torch.bfloat16,
                )
            sequences = generator(
                prompt,
                max_new_tokens=self.model_base.model_dict['max_tokens'],
                do_sample=self.model_base.model_dict['do_sample'],
                temperature=self.model_base.model_dict['temperature'],
                num_return_sequences=self.model_base.model_dict['num_return_sequences'],
            )
            return sequences[0]['generated_text']  
        else:                    
            for _ in range(max_retries):
                try:
                    response = self.client.chat.completions.create(
                        model=model_name,
                        messages=messages,
                        max_tokens=self.model_base.model_dict['max_tokens'],
                    )
                    return response.choices[0].message.content
                except openai.error.RateLimitError as e:
                    print(f"Rate limit error encountered: {e}. Retrying in {retry_delay} seconds...")
                    time.sleep(retry_delay)
                except openai.error.APIConnectionError as e:
                    print(f"Connection error encountered: {e}. Retrying in {retry_delay} seconds...")
                    time.sleep(retry_delay)
            print("Failed to generate fuzz code after multiple retries.")
            return None
\end{lstlisting}

\end{document}    