{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "b8c03b60-cf01-4eb8-96b6-33569c3838ae",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "import http.client\n",
    "import json\n",
    "import os\n",
    "\n",
    "path = 'data/'\n",
    "version ='L4090-v012'\n",
    "os.makedirs(path+'models',exist_ok=True)\n",
    "os.makedirs(path+f'feature',exist_ok=True)\n",
    "os.makedirs(path+f'feature_importance',exist_ok=True)\n",
    "os.makedirs(path+'submissions',exist_ok=True)\n",
    "os.makedirs(path+'logs',exist_ok=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "5fc2049f-d96e-4866-9cb3-9b509e3eda95",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>apis</th>\n",
       "      <th>user_messages</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0</td>\n",
       "      <td>[{'name': 'query_block_info', 'description': '...</td>\n",
       "      <td>[我想查一下钱包地址0x742d35Cc6634C0532925a3b844Bc454e44...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>[{'name': 'extract_quotes_by_theme', 'descript...</td>\n",
       "      <td>[Construct a question and provide a detailed a...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>2</td>\n",
       "      <td>[{'name': 'soil_remediation', 'description': '...</td>\n",
       "      <td>[那为什么其他模型没有做过这种尝试？, 我们最近发现Sunny Acres Resident...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>3</td>\n",
       "      <td>[{'name': 'configure_uart', 'description': '配置...</td>\n",
       "      <td>[How can I change my notification settings on ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>4</td>\n",
       "      <td>[{'name': 'dynasty_archives', 'description': '...</td>\n",
       "      <td>[创业公司智慧园大厦装修预算如何控制？, 我想查看一下2023-10-02的考勤情况。, 关...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2284</th>\n",
       "      <td>2284</td>\n",
       "      <td>[{'name': 'export_tariff_lookup', 'description...</td>\n",
       "      <td>[我想了解一下，如果出口500件商品到中国，关税会是多少？, 这些商品是红酒，单位是瓶，适用...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2285</th>\n",
       "      <td>2285</td>\n",
       "      <td>[{'name': 'dynamic_pricing', 'description': '根...</td>\n",
       "      <td>[北京在2000-2020年期间的城市化进程对环境和居民生活的影响是什么？人口增长率为3%。...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2286</th>\n",
       "      <td>2286</td>\n",
       "      <td>[{'name': 'tax_simulation', 'description': '模拟...</td>\n",
       "      <td>[I need to download a CSV file from a webpage ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2287</th>\n",
       "      <td>2287</td>\n",
       "      <td>[{'name': 'warehouse_inventory', 'description'...</td>\n",
       "      <td>[小王今年30岁，小李今年40岁，问再过多少年小王的年龄能超过小李？, 我想了解一下员工67...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2288</th>\n",
       "      <td>2288</td>\n",
       "      <td>[{'name': 'public_spending_analysis', 'descrip...</td>\n",
       "      <td>[我正在研究中国不同朝代的历史，想获取一些详细的档案资料。对于唐朝，我特别关注政治和文化方面...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>2289 rows × 3 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "        id                                               apis  \\\n",
       "0        0  [{'name': 'query_block_info', 'description': '...   \n",
       "1        1  [{'name': 'extract_quotes_by_theme', 'descript...   \n",
       "2        2  [{'name': 'soil_remediation', 'description': '...   \n",
       "3        3  [{'name': 'configure_uart', 'description': '配置...   \n",
       "4        4  [{'name': 'dynasty_archives', 'description': '...   \n",
       "...    ...                                                ...   \n",
       "2284  2284  [{'name': 'export_tariff_lookup', 'description...   \n",
       "2285  2285  [{'name': 'dynamic_pricing', 'description': '根...   \n",
       "2286  2286  [{'name': 'tax_simulation', 'description': '模拟...   \n",
       "2287  2287  [{'name': 'warehouse_inventory', 'description'...   \n",
       "2288  2288  [{'name': 'public_spending_analysis', 'descrip...   \n",
       "\n",
       "                                          user_messages  \n",
       "0     [我想查一下钱包地址0x742d35Cc6634C0532925a3b844Bc454e44...  \n",
       "1     [Construct a question and provide a detailed a...  \n",
       "2     [那为什么其他模型没有做过这种尝试？, 我们最近发现Sunny Acres Resident...  \n",
       "3     [How can I change my notification settings on ...  \n",
       "4     [创业公司智慧园大厦装修预算如何控制？, 我想查看一下2023-10-02的考勤情况。, 关...  \n",
       "...                                                 ...  \n",
       "2284  [我想了解一下，如果出口500件商品到中国，关税会是多少？, 这些商品是红酒，单位是瓶，适用...  \n",
       "2285  [北京在2000-2020年期间的城市化进程对环境和居民生活的影响是什么？人口增长率为3%。...  \n",
       "2286  [I need to download a CSV file from a webpage ...  \n",
       "2287  [小王今年30岁，小李今年40岁，问再过多少年小王的年龄能超过小李？, 我想了解一下员工67...  \n",
       "2288  [我正在研究中国不同朝代的历史，想获取一些详细的档案资料。对于唐朝，我特别关注政治和文化方面...  \n",
       "\n",
       "[2289 rows x 3 columns]"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_df_a=pd.read_json('data/合成数据初赛测试集.jsonl',lines=True)\n",
    "test_df_b=pd.read_json('data/合成数据决赛赛题.jsonl',lines=True)\n",
    "test_df_b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "3232bc60-35b1-4c66-a04b-188dc5f6d95b",
   "metadata": {},
   "outputs": [],
   "source": [
    "for apis in test_df_b['apis']:\n",
    "    for api in apis:\n",
    "        if api['parameters']['type']!='object':\n",
    "            print(api)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "3783c6b2-6afe-46bb-89b1-c285ad6457cc",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "for apis in test_df_b['apis']:\n",
    "    for api in apis:\n",
    "        if sorted(api['parameters'].keys())!=['properties', 'required', 'type']:\n",
    "            print(api)            "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "17c3d7dc-dc23-4123-b255-3e73ddcd1815",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "l1=[]\n",
    "for apis in test_df_b['apis']:\n",
    "    for api in apis:\n",
    "        for i in api['parameters']['properties'].values():\n",
    "            l1.append(i['type'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "c7e3deca-daff-4d1c-86b6-9ac53ee891f3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'array', 'boolean', 'dict', 'float', 'integer', 'number', 'string', 'tuple'}"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "set(l1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "c0259d4c-90d3-44ff-b895-fb140e54481a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def del_type(api):\n",
    "    del api['parameters']['type']\n",
    "    api['required']=api['parameters'].pop('required', None)\n",
    "    api['parameters']=api['parameters'].pop('properties', None)\n",
    "    return api"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "5bf0a831-455e-4cd2-93ca-d4d19cc308c4",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "test_df_a['apis']=test_df_a['apis'].apply(lambda x:[del_type(i) for i in x])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "d3ea7b24-4dc1-4440-9cc6-e74d17a8de3e",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_df_b['apis']=test_df_b['apis'].apply(lambda x:[del_type(i) for i in x])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "512e0505-c477-44c8-8ef1-7bd69f677eea",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "user_messages\n",
       "1     613\n",
       "2     470\n",
       "3     376\n",
       "4     326\n",
       "5     268\n",
       "6     161\n",
       "7      58\n",
       "8      15\n",
       "9       1\n",
       "10      1\n",
       "Name: count, dtype: int64"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_df_b['user_messages'].apply(lambda x:len(x)).value_counts(sort=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "cf4407e8-c914-4e27-bf16-cd177bc3748d",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "apis\n",
       "5     516\n",
       "4     381\n",
       "7     373\n",
       "3     344\n",
       "6     337\n",
       "2     292\n",
       "8      33\n",
       "9       7\n",
       "10      3\n",
       "11      2\n",
       "12      1\n",
       "Name: count, dtype: int64"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_df_b['apis'].apply(lambda x:len(x)).value_counts(sort=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "9917b342-dd64-4312-8224-435cc3403272",
   "metadata": {},
   "outputs": [],
   "source": [
    "prompt = '''你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\n",
    "\n",
    "# 输入数据\n",
    "- 用户消息列表（按时间顺序排列）：{user_messages}\n",
    "- 可用API列表（每个API包含名称、描述、参数和必填项）：{apis}\n",
    "\n",
    "# 处理规则\n",
    "1. **单消息处理**\n",
    "   ✔️ 消息无对应API → 返回[]\n",
    "   ✔️ 消息匹配API但缺少必填参数 → 返回[]\n",
    "   ✔️ 消息匹配API且参数完整 → 返回对应编排\n",
    "\n",
    "2. **跨消息处理**（仅当前消息无匹配时触发）\n",
    "   ✔️ 检查后续消息是否包含当前API所需参数：\n",
    "   - 后续有参数 → 返回[]\n",
    "   - 后续无参数 → 整合最近N条消息参数\n",
    "   ✔️ 参数完整 → 返回对应编排\n",
    "   ✔️ 仍不完整 → 返回[]\n",
    "\n",
    "# 编排要求\n",
    "1. 参数类型处理：\n",
    "   - number类型：123（无引号）\n",
    "   - boolean类型：true/false（小写）\n",
    "   - string类型：\"value\"（需引号）\n",
    "\n",
    "2. 输出格式规范：\n",
    "```json\n",
    "[\n",
    "  {\"name\": \"api1\", \"arguments\": {...}},\n",
    "  [],\n",
    "  {\"name\": \"api2\", \"arguments\": {...}},\n",
    "  ...\n",
    "]\n",
    "```\n",
    "\n",
    "# 处理流程\n",
    "1. 顺序处理每个用户消息\n",
    "2. 对每个消息执行：\n",
    "   a. 匹配可能API\n",
    "   b. 提取消息中的参数\n",
    "   c. 验证必填参数\n",
    "   d. 按规则决定是否跨消息整合\n",
    "3. 生成最终编排列表\n",
    "\n",
    "# 注意事项\n",
    "1. 严格保持输出列表长度与输入消息列表一致\n",
    "2. 每个消息只能对应0-1个API调用\n",
    "3. 参数值必须来自消息原文\n",
    "4. 不确定时优先返回空列表'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "3e07918e-3e9a-4e77-963f-1609d2f00815",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_query(x):\n",
    "    return prompt.replace('{user_messages}',json.dumps(x['user_messages'],ensure_ascii=False)).replace('{apis}',json.dumps(x['apis'],ensure_ascii=False))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "882d2dc7-0858-4f7c-b442-b76f07d2e220",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "test_df_a['query']=test_df_a.apply(get_query,axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "e20c9559-da39-4810-8357-95a32c9d6495",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_df_b['query']=test_df_b.apply(get_query,axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "aacfe2f6-9714-4e68-8566-9a3c953abdad",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_reponse(x):\n",
    "    user_messages=x['user_messages']\n",
    "    apis=x['apis']\n",
    "    apis_name=[i['name'] for i in apis]\n",
    "    response=x['targets']\n",
    "    res=[]\n",
    "    if type(response)==list:\n",
    "        for data in response:\n",
    "            if type(data)==list:\n",
    "                if len(data)==0:\n",
    "                    res.append([])\n",
    "                else:\n",
    "                    data=data[0]\n",
    "                    if type(data)==dict:\n",
    "                        if 'name' in data and 'arguments' in data:\n",
    "                            api_name=data['name']\n",
    "                            arguments=data['arguments']\n",
    "                            arguments_keys=arguments.keys()\n",
    "                            if api_name in apis_name:\n",
    "                                api_id=apis_name.index(api_name)\n",
    "                                api=apis[api_id]\n",
    "                                required=api['required']\n",
    "                                parameters=api['parameters']\n",
    "                                parameters_keys=parameters.keys()\n",
    "                                parameters_types={}\n",
    "                                for key,value in parameters.items():\n",
    "                                    parameters_types[key]=value['type']\n",
    "                                if (set(required)-set(arguments_keys))==set():\n",
    "                                    if (set(arguments_keys)-set(parameters_keys))==set():\n",
    "                                        check_type=0\n",
    "                                        for key,value in arguments.items():\n",
    "                                            api_parameter_type=parameters_types[key]\n",
    "                                            if api_parameter_type=='boolean' and type(value)!=bool:\n",
    "                                                print('布尔类型错误',arguments,parameters)\n",
    "                                                check_type+=1\n",
    "                                                break\n",
    "                                            if api_parameter_type=='number' and type(value) not in [int,float]:\n",
    "                                                print('number类型错误',arguments,parameters)\n",
    "                                                check_type+=1\n",
    "                                                break\n",
    "                                            if api_parameter_type=='string' and type(value)!=str:\n",
    "                                                print('string类型错误',arguments,parameters)\n",
    "                                                check_type+=1\n",
    "                                                break\n",
    "                                        if check_type==0:\n",
    "                                            res.append(data)# 不带列表\n",
    "                                        else:\n",
    "                                            res.append([])\n",
    "                                    else:\n",
    "                                        print('多参数',set(arguments_keys)-set(parameters_keys),data)\n",
    "                                        res.append([])\n",
    "                                else:\n",
    "                                    print('required缺少',set(required)-set(arguments_keys),data)\n",
    "                                    res.append([])\n",
    "                            else:\n",
    "                                print('api_name不对',data)\n",
    "                                res.append([])\n",
    "                        else:\n",
    "                            print('缺少name和arguments',data)\n",
    "                            res.append([])\n",
    "                    else:\n",
    "                        res.append([])\n",
    "    else:\n",
    "        res=[[]]*len(user_messages)\n",
    "    return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "4c856606-9b41-461d-9a2b-f0fcc2cf5e53",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>targets</th>\n",
       "      <th>apis</th>\n",
       "      <th>user_messages</th>\n",
       "      <th>query</th>\n",
       "      <th>label</th>\n",
       "      <th>most_common_dict_check2</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>8</td>\n",
       "      <td>[[], [], [{'name': 'supplier_info', 'arguments...</td>\n",
       "      <td>[{'name': 'generate_production_report', 'descr...</td>\n",
       "      <td>[我正在寻找亚洲地区的供应商信息，你能帮我查一下吗？, 对了，我还需要包含他们的财务数据。,...</td>\n",
       "      <td>你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...</td>\n",
       "      <td>```json\\n[[], [], {\"name\": \"supplier_info\", \"a...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>2</td>\n",
       "      <td>[[], [], [], [{'name': 'send_promotional_email...</td>\n",
       "      <td>[{'name': 'embassy_appointment', 'description'...</td>\n",
       "      <td>[Use the following function to calculate the a...</td>\n",
       "      <td>你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...</td>\n",
       "      <td>```json\\n[[], [], [], {\"name\": \"send_promotion...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>5</td>\n",
       "      <td>[[], [], [], [], [{'name': 'generate_marketing...</td>\n",
       "      <td>[{'name': 'log_absence', 'description': '记录员工的...</td>\n",
       "      <td>[为了更好地分析市场活动的效果，我需要生成一份综合报告。请问可以设置报告中包含的指标为销售量...</td>\n",
       "      <td>你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...</td>\n",
       "      <td>```json\\n[[], [], [], [], {\"name\": \"generate_m...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>18</td>\n",
       "      <td>[[], [], [{'name': 'order_fulfillment_tracking...</td>\n",
       "      <td>[{'name': 'order_fulfillment_tracking', 'descr...</td>\n",
       "      <td>[我正在关注我的订单，它目前的状态是处理中。, 对了，我的客户编号是C12345。, 订单编...</td>\n",
       "      <td>你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...</td>\n",
       "      <td>```json\\n[[], [], {\"name\": \"order_fulfillment_...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>17</td>\n",
       "      <td>[[], [], [], [{'name': 'dye_batch_management',...</td>\n",
       "      <td>[{'name': 'dye_batch_management', 'description...</td>\n",
       "      <td>[我需要管理一个新的染色批次，面料类型是棉的。, 这个批次的染色过程将由主管S007负责监督...</td>\n",
       "      <td>你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...</td>\n",
       "      <td>```json\\n[[], [], [], {\"name\": \"dye_batch_mana...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>955</th>\n",
       "      <td>988</td>\n",
       "      <td>[[], [], [{'name': 'apply_discount', 'argument...</td>\n",
       "      <td>[{'name': 'apply_discount', 'description': '对订...</td>\n",
       "      <td>[帮我分析一下这个文档，看看里面的逻辑关系。, 我正在考虑使用一个折扣码来减少我的订单费用，...</td>\n",
       "      <td>你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...</td>\n",
       "      <td>```json\\n[[], [], {\"name\": \"apply_discount\", \"...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>956</th>\n",
       "      <td>400</td>\n",
       "      <td>[[], [], [], [{'name': 'agricultural_subsidy_e...</td>\n",
       "      <td>[{'name': 'monitor_water_quality', 'descriptio...</td>\n",
       "      <td>[我正在考虑扩大我的农场规模，目前是10公顷。, 去年我的年收入大约是50000元，我的农场...</td>\n",
       "      <td>你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...</td>\n",
       "      <td>```json\\n[[], [], [], {\"name\": \"agricultural_s...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>957</th>\n",
       "      <td>200</td>\n",
       "      <td>[[], [], [], [{'name': 'submit_diplomatic_requ...</td>\n",
       "      <td>[{'name': 'submit_diplomatic_request', 'descri...</td>\n",
       "      <td>[Create an algorithm that determines if two st...</td>\n",
       "      <td>你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...</td>\n",
       "      <td>```json\\n[[], [], [], {\"name\": \"submit_diploma...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>958</th>\n",
       "      <td>111</td>\n",
       "      <td>[[], [], [], [], [], [{'name': 'crop_yield_pre...</td>\n",
       "      <td>[{'name': 'calculate_resource_allocation', 'de...</td>\n",
       "      <td>[我正在考虑给小麦田施用300公斤的化肥，你觉得这会影响产量吗？, 同时，我计划将灌溉水平保...</td>\n",
       "      <td>你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...</td>\n",
       "      <td>```json\\n[[], [], [], [], [], {\"name\": \"crop_y...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>959</th>\n",
       "      <td>995</td>\n",
       "      <td>[[], [], [{'name': 'get_user_profile', 'argume...</td>\n",
       "      <td>[{'name': 'get_user_profile', 'description': '...</td>\n",
       "      <td>[我想查看一下我的个人资料，包括支付方式信息和订单历史记录。, 我的用户ID是12345。,...</td>\n",
       "      <td>你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...</td>\n",
       "      <td>```json\\n[[], [], {\"name\": \"get_user_profile\",...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>960 rows × 7 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "      id                                            targets  \\\n",
       "0      8  [[], [], [{'name': 'supplier_info', 'arguments...   \n",
       "1      2  [[], [], [], [{'name': 'send_promotional_email...   \n",
       "2      5  [[], [], [], [], [{'name': 'generate_marketing...   \n",
       "3     18  [[], [], [{'name': 'order_fulfillment_tracking...   \n",
       "4     17  [[], [], [], [{'name': 'dye_batch_management',...   \n",
       "..   ...                                                ...   \n",
       "955  988  [[], [], [{'name': 'apply_discount', 'argument...   \n",
       "956  400  [[], [], [], [{'name': 'agricultural_subsidy_e...   \n",
       "957  200  [[], [], [], [{'name': 'submit_diplomatic_requ...   \n",
       "958  111  [[], [], [], [], [], [{'name': 'crop_yield_pre...   \n",
       "959  995  [[], [], [{'name': 'get_user_profile', 'argume...   \n",
       "\n",
       "                                                  apis  \\\n",
       "0    [{'name': 'generate_production_report', 'descr...   \n",
       "1    [{'name': 'embassy_appointment', 'description'...   \n",
       "2    [{'name': 'log_absence', 'description': '记录员工的...   \n",
       "3    [{'name': 'order_fulfillment_tracking', 'descr...   \n",
       "4    [{'name': 'dye_batch_management', 'description...   \n",
       "..                                                 ...   \n",
       "955  [{'name': 'apply_discount', 'description': '对订...   \n",
       "956  [{'name': 'monitor_water_quality', 'descriptio...   \n",
       "957  [{'name': 'submit_diplomatic_request', 'descri...   \n",
       "958  [{'name': 'calculate_resource_allocation', 'de...   \n",
       "959  [{'name': 'get_user_profile', 'description': '...   \n",
       "\n",
       "                                         user_messages  \\\n",
       "0    [我正在寻找亚洲地区的供应商信息，你能帮我查一下吗？, 对了，我还需要包含他们的财务数据。,...   \n",
       "1    [Use the following function to calculate the a...   \n",
       "2    [为了更好地分析市场活动的效果，我需要生成一份综合报告。请问可以设置报告中包含的指标为销售量...   \n",
       "3    [我正在关注我的订单，它目前的状态是处理中。, 对了，我的客户编号是C12345。, 订单编...   \n",
       "4    [我需要管理一个新的染色批次，面料类型是棉的。, 这个批次的染色过程将由主管S007负责监督...   \n",
       "..                                                 ...   \n",
       "955  [帮我分析一下这个文档，看看里面的逻辑关系。, 我正在考虑使用一个折扣码来减少我的订单费用，...   \n",
       "956  [我正在考虑扩大我的农场规模，目前是10公顷。, 去年我的年收入大约是50000元，我的农场...   \n",
       "957  [Create an algorithm that determines if two st...   \n",
       "958  [我正在考虑给小麦田施用300公斤的化肥，你觉得这会影响产量吗？, 同时，我计划将灌溉水平保...   \n",
       "959  [我想查看一下我的个人资料，包括支付方式信息和订单历史记录。, 我的用户ID是12345。,...   \n",
       "\n",
       "                                                 query  \\\n",
       "0    你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...   \n",
       "1    你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...   \n",
       "2    你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...   \n",
       "3    你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...   \n",
       "4    你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...   \n",
       "..                                                 ...   \n",
       "955  你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...   \n",
       "956  你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...   \n",
       "957  你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...   \n",
       "958  你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...   \n",
       "959  你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\\...   \n",
       "\n",
       "                                                 label  \\\n",
       "0    ```json\\n[[], [], {\"name\": \"supplier_info\", \"a...   \n",
       "1    ```json\\n[[], [], [], {\"name\": \"send_promotion...   \n",
       "2    ```json\\n[[], [], [], [], {\"name\": \"generate_m...   \n",
       "3    ```json\\n[[], [], {\"name\": \"order_fulfillment_...   \n",
       "4    ```json\\n[[], [], [], {\"name\": \"dye_batch_mana...   \n",
       "..                                                 ...   \n",
       "955  ```json\\n[[], [], {\"name\": \"apply_discount\", \"...   \n",
       "956  ```json\\n[[], [], [], {\"name\": \"agricultural_s...   \n",
       "957  ```json\\n[[], [], [], {\"name\": \"submit_diploma...   \n",
       "958  ```json\\n[[], [], [], [], [], {\"name\": \"crop_y...   \n",
       "959  ```json\\n[[], [], {\"name\": \"get_user_profile\",...   \n",
       "\n",
       "     most_common_dict_check2  \n",
       "0                          2  \n",
       "1                          2  \n",
       "2                          2  \n",
       "3                          2  \n",
       "4                          2  \n",
       "..                       ...  \n",
       "955                        2  \n",
       "956                        2  \n",
       "957                        2  \n",
       "958                        2  \n",
       "959                        2  \n",
       "\n",
       "[960 rows x 7 columns]"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_df=pd.read_json(f'data/submissions/submission_L4090-v006_2025-03-11_23_02_16.jsonl',lines=True)[['id','targets']]\n",
    "train_df=train_df.merge(test_df_a,how='left',on='id')\n",
    "train_df['label']=train_df.apply(lambda x:get_reponse(x),axis=1)\n",
    "train_df['most_common_dict_check2']=train_df['label'].apply(lambda x:sum([len(i) for i in x]))\n",
    "train_df=train_df[train_df['most_common_dict_check2']==2].reset_index(drop=True)\n",
    "train_df['label']=train_df['label'].apply(lambda x:'```json\\n'+json.dumps(x, ensure_ascii=False)+'\\n```')\n",
    "train_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "44f4a6c5-a587-46f8-9905-8ed4c7e070c6",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "with open(path+f'feature/train_{version}.jsonl', 'w',  encoding='utf-8') as fw:\n",
    "    for idx,query in enumerate(train_df['query']):\n",
    "        response=train_df['label'][idx]\n",
    "        s = json.dumps({\n",
    "            'query': query,\n",
    "            'response': response\n",
    "        }, ensure_ascii=False)\n",
    "        fw.write(s + '\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "4b0c7c38-5352-4997-9763-03a950cc74bb",
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(path+f'feature/test_{version}.jsonl', 'w',  encoding='utf-8') as fw:\n",
    "    for idx,query in enumerate(test_df_b['query']):\n",
    "        response='无'\n",
    "        s = json.dumps({\n",
    "            'query': query,\n",
    "            'response': response\n",
    "        }, ensure_ascii=False)\n",
    "        fw.write(s + '\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "b033ddc9-99c4-47f3-9b3a-299b2187d6f6",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/sft.py --model ../../public_data/Qwen2-7B-Instruct --output_dir data/models/L4090-v012 --model_revision master --torch_dtype bfloat16 --dataset data/feature/train_L4090-v012.jsonl --train_type lora --lora_rank 512 --lora_alpha 1024 --target_modules all-linear --num_train_epochs 5 --max_length 8192 --gradient_checkpointing true --per_device_train_batch_size 1 --learning_rate 1e-5 --gradient_accumulation_steps 1 --max_grad_norm 1.0 --warmup_ratio 0.03 --eval_steps 1000000 --save_steps 1000000 --save_total_limit 1 --attn_impl flash_attn --save_only_model true --freeze_llm false --freeze_vit true --freeze_aligner true --split_dataset_ratio 0 --acc_strategy seq --logging_steps 10`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Setting args.lazy_tokenize: False\n",
      "/home/ubuntu/anaconda3/lib/python3.10/site-packages/transformers/training_args.py:1594: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead\n",
      "  warnings.warn(\n",
      "[INFO:swift] output_dir: data/models/L4090-v012/v0-20250318-112919\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: TrainArguments(\n",
      "_n_gpu=-1,\n",
      "acc_steps=1,\n",
      "acc_strategy=seq,\n",
      "accelerator_config={'dispatch_batches': False},\n",
      "adafactor=False,\n",
      "adalora_beta1=0.85,\n",
      "adalora_beta2=0.85,\n",
      "adalora_deltaT=1,\n",
      "adalora_init_r=12,\n",
      "adalora_orth_reg_weight=0.5,\n",
      "adalora_target_r=8,\n",
      "adalora_tfinal=0,\n",
      "adalora_tinit=0,\n",
      "adam_beta1=0.9,\n",
      "adam_beta2=0.999,\n",
      "adam_epsilon=1e-08,\n",
      "adapter_act=gelu,\n",
      "adapter_length=128,\n",
      "adapters=[],\n",
      "add_version=True,\n",
      "attn_impl=flash_attn,\n",
      "auto_find_batch_size=False,\n",
      "average_tokens_across_devices=False,\n",
      "batch_eval_metrics=False,\n",
      "bf16=True,\n",
      "bf16_full_eval=False,\n",
      "bnb_4bit_compute_dtype=torch.bfloat16,\n",
      "bnb_4bit_quant_storage=None,\n",
      "bnb_4bit_quant_type=nf4,\n",
      "bnb_4bit_use_double_quant=True,\n",
      "boft_block_num=0,\n",
      "boft_block_size=4,\n",
      "boft_dropout=0.0,\n",
      "boft_n_butterfly_factor=1,\n",
      "check_model=True,\n",
      "ckpt_dir=None,\n",
      "columns={},\n",
      "create_checkpoint_symlink=False,\n",
      "custom_dataset_info=[],\n",
      "custom_register_path=[],\n",
      "data_seed=42,\n",
      "dataloader_drop_last=False,\n",
      "dataloader_num_workers=0,\n",
      "dataloader_persistent_workers=False,\n",
      "dataloader_pin_memory=True,\n",
      "dataloader_prefetch_factor=None,\n",
      "dataset=['data/feature/train_L4090-v012.jsonl'],\n",
      "dataset_num_proc=1,\n",
      "ddp_backend=None,\n",
      "ddp_broadcast_buffers=None,\n",
      "ddp_bucket_cap_mb=None,\n",
      "ddp_find_unused_parameters=None,\n",
      "ddp_timeout=1800,\n",
      "debug=None,\n",
      "deepspeed=None,\n",
      "device_map=None,\n",
      "disable_tqdm=None,\n",
      "dispatch_batches=None,\n",
      "do_eval=False,\n",
      "do_predict=False,\n",
      "do_train=False,\n",
      "download_mode=reuse_dataset_if_exists,\n",
      "enable_cache=False,\n",
      "eval_accumulation_steps=None,\n",
      "eval_delay=0,\n",
      "eval_do_concat_batches=True,\n",
      "eval_on_start=False,\n",
      "eval_steps=1000000.0,\n",
      "eval_strategy=steps,\n",
      "eval_use_gather_object=False,\n",
      "evaluation_strategy=steps,\n",
      "external_plugins=[],\n",
      "fourier_n_frequency=2000,\n",
      "fourier_scaling=300.0,\n",
      "fp16=False,\n",
      "fp16_backend=auto,\n",
      "fp16_full_eval=False,\n",
      "fp16_opt_level=O1,\n",
      "freeze_aligner=True,\n",
      "freeze_llm=False,\n",
      "freeze_parameters=[],\n",
      "freeze_parameters_ratio=0.0,\n",
      "freeze_vit=True,\n",
      "fsdp=,\n",
      "fsdp_config=None,\n",
      "fsdp_min_num_params=0,\n",
      "fsdp_num=1,\n",
      "fsdp_transformer_layer_cls_to_wrap=None,\n",
      "full_determinism=False,\n",
      "galore_cos_threshold=0.4,\n",
      "galore_gamma_proj=2,\n",
      "galore_optim_per_parameter=False,\n",
      "galore_proj_bits=4,\n",
      "galore_proj_group_size=256,\n",
      "galore_proj_quant=False,\n",
      "galore_proj_type=std,\n",
      "galore_quantization=False,\n",
      "galore_queue_size=5,\n",
      "galore_rank=128,\n",
      "galore_scale=1.0,\n",
      "galore_target_modules=None,\n",
      "galore_update_proj_gap=50,\n",
      "galore_with_embedding=False,\n",
      "generation_config=None,\n",
      "generation_max_length=None,\n",
      "generation_num_beams=None,\n",
      "gradient_accumulation_steps=1,\n",
      "gradient_checkpointing=True,\n",
      "gradient_checkpointing_kwargs=None,\n",
      "greater_is_better=False,\n",
      "group_by_length=False,\n",
      "half_precision_backend=auto,\n",
      "hqq_axis=None,\n",
      "hub_always_push=False,\n",
      "hub_model_id=None,\n",
      "hub_private_repo=None,\n",
      "hub_strategy=every_save,\n",
      "hub_token=<HUB_TOKEN>,\n",
      "ignore_args_error=False,\n",
      "ignore_data_skip=False,\n",
      "include_for_metrics=[],\n",
      "include_inputs_for_metrics=False,\n",
      "include_num_input_tokens_seen=False,\n",
      "include_tokens_per_second=False,\n",
      "init_weights=True,\n",
      "jit_mode_eval=False,\n",
      "label_names=None,\n",
      "label_smoothing_factor=0.0,\n",
      "lazy_tokenize=False,\n",
      "learning_rate=1e-05,\n",
      "length_column_name=length,\n",
      "lisa_activated_layers=0,\n",
      "lisa_step_interval=20,\n",
      "llamapro_num_groups=None,\n",
      "llamapro_num_new_blocks=4,\n",
      "load_args=False,\n",
      "load_best_model_at_end=False,\n",
      "load_data_args=False,\n",
      "load_dataset_config=None,\n",
      "local_rank=-1,\n",
      "local_repo_path=None,\n",
      "log_level=passive,\n",
      "log_level_replica=warning,\n",
      "log_on_each_node=True,\n",
      "logging_dir=/data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/runs,\n",
      "logging_first_step=True,\n",
      "logging_nan_inf_filter=True,\n",
      "logging_steps=10,\n",
      "logging_strategy=steps,\n",
      "logprobs=False,\n",
      "lora_alpha=1024,\n",
      "lora_bias=none,\n",
      "lora_dropout=0.05,\n",
      "lora_dtype=None,\n",
      "lora_ga_batch_size=2,\n",
      "lora_ga_direction=ArB2r,\n",
      "lora_ga_iters=2,\n",
      "lora_ga_max_length=1024,\n",
      "lora_ga_scale=stable,\n",
      "lora_ga_stable_gamma=16,\n",
      "lora_modules=[],\n",
      "lora_rank=512,\n",
      "lorap_lr_ratio=None,\n",
      "loss_scale=default,\n",
      "loss_type=None,\n",
      "lr_scheduler_kwargs=None,\n",
      "lr_scheduler_type=cosine,\n",
      "max_grad_norm=1.0,\n",
      "max_length=8192,\n",
      "max_memory={},\n",
      "max_new_tokens=64,\n",
      "max_pixels=None,\n",
      "max_steps=-1,\n",
      "metric=None,\n",
      "metric_for_best_model=loss,\n",
      "metric_warmup_step=0,\n",
      "model=../../public_data/Qwen2-7B-Instruct,\n",
      "model_author=[None, None],\n",
      "model_kwargs={},\n",
      "model_layer_cls_name=None,\n",
      "model_name=[None, None],\n",
      "model_revision=master,\n",
      "model_type=qwen2,\n",
      "modules_to_save=[],\n",
      "mp_parameters=,\n",
      "neftune_noise_alpha=None,\n",
      "no_cuda=False,\n",
      "norm_bbox=None,\n",
      "num_beams=1,\n",
      "num_labels=None,\n",
      "num_train_epochs=5.0,\n",
      "optim=adamw_torch,\n",
      "optim_args=None,\n",
      "optim_target_modules=None,\n",
      "optimizer=None,\n",
      "output_dir=/data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919,\n",
      "overwrite_output_dir=False,\n",
      "packing=False,\n",
      "padding_side=right,\n",
      "past_index=-1,\n",
      "per_device_eval_batch_size=1,\n",
      "per_device_train_batch_size=1,\n",
      "predict_with_generate=False,\n",
      "prediction_loss_only=False,\n",
      "push_to_hub=False,\n",
      "push_to_hub_model_id=None,\n",
      "push_to_hub_organization=None,\n",
      "push_to_hub_token=<PUSH_TO_HUB_TOKEN>,\n",
      "quant_bits=None,\n",
      "quant_method=None,\n",
      "ray_scope=last,\n",
      "reft_args=None,\n",
      "reft_intervention_type=LoreftIntervention,\n",
      "reft_layer_key=None,\n",
      "reft_layers=None,\n",
      "reft_rank=4,\n",
      "remove_unused_columns=True,\n",
      "repetition_penalty=None,\n",
      "report_to=['tensorboard'],\n",
      "response_prefix=None,\n",
      "restore_callback_states_from_checkpoint=False,\n",
      "resume_from_checkpoint=None,\n",
      "resume_only_model=False,\n",
      "rope_scaling=None,\n",
      "run_name=None,\n",
      "save_on_each_node=False,\n",
      "save_only_model=True,\n",
      "save_safetensors=True,\n",
      "save_steps=1000000.0,\n",
      "save_strategy=steps,\n",
      "save_total_limit=1,\n",
      "seed=42,\n",
      "sequence_parallel_size=1,\n",
      "skip_memory_metrics=True,\n",
      "sortish_sampler=False,\n",
      "split_batches=None,\n",
      "split_dataset_ratio=0.0,\n",
      "stop_words=[],\n",
      "stream=False,\n",
      "streaming=False,\n",
      "strict=False,\n",
      "swanlab_exp_name=None,\n",
      "swanlab_mode=cloud,\n",
      "swanlab_project=None,\n",
      "swanlab_token=<SWANLAB_TOKEN>,\n",
      "swanlab_workspace=None,\n",
      "system=None,\n",
      "target_modules=['all-linear'],\n",
      "target_regex=None,\n",
      "task_type=causal_lm,\n",
      "temperature=0.0,\n",
      "template=qwen,\n",
      "template_backend=swift,\n",
      "tf32=None,\n",
      "tools_prompt=react_en,\n",
      "top_k=None,\n",
      "top_logprobs=None,\n",
      "top_p=None,\n",
      "torch_compile=False,\n",
      "torch_compile_backend=None,\n",
      "torch_compile_mode=None,\n",
      "torch_dtype=torch.bfloat16,\n",
      "torch_empty_cache_steps=None,\n",
      "torchdynamo=None,\n",
      "tpu_metrics_debug=False,\n",
      "tpu_num_cores=None,\n",
      "train_type=lora,\n",
      "trainable_parameters=[],\n",
      "truncation_strategy=delete,\n",
      "tuner_backend=peft,\n",
      "use_chat_template=True,\n",
      "use_cpu=False,\n",
      "use_dora=False,\n",
      "use_galore=False,\n",
      "use_hf=False,\n",
      "use_ipex=False,\n",
      "use_legacy_prediction_loop=False,\n",
      "use_liger=False,\n",
      "use_liger_kernel=False,\n",
      "use_mps_device=False,\n",
      "use_rslora=False,\n",
      "use_swift_lora=False,\n",
      "val_dataset=[],\n",
      "vera_d_initial=0.1,\n",
      "vera_dropout=0.0,\n",
      "vera_projection_prng_key=0,\n",
      "vera_rank=256,\n",
      "warmup_ratio=0.03,\n",
      "warmup_steps=0,\n",
      "weight_decay=0.1,\n",
      "zero_hpz_partition_size=None,\n",
      ")\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] model_kwargs: {'device_map': 'auto'}\n",
      "Loading checkpoint shards: 100%|██████████████████| 4/4 [01:14<00:00, 18.71s/it]\n",
      "[INFO:swift] model.hf_device_map: {'model.embed_tokens': 0, 'model.layers.0': 0, 'model.layers.1': 0, 'model.layers.2': 0, 'model.layers.3': 0, 'model.layers.4': 1, 'model.layers.5': 1, 'model.layers.6': 1, 'model.layers.7': 1, 'model.layers.8': 1, 'model.layers.9': 1, 'model.layers.10': 1, 'model.layers.11': 1, 'model.layers.12': 1, 'model.layers.13': 2, 'model.layers.14': 2, 'model.layers.15': 2, 'model.layers.16': 2, 'model.layers.17': 2, 'model.layers.18': 2, 'model.layers.19': 2, 'model.layers.20': 2, 'model.layers.21': 2, 'model.layers.22': 3, 'model.layers.23': 3, 'model.layers.24': 3, 'model.layers.25': 3, 'model.layers.26': 3, 'model.layers.27': 3, 'model.norm': 3, 'model.rotary_emb': 3, 'lm_head': 3}\n",
      "[INFO:swift] model_info: ModelInfo(model_type='qwen2', model_dir='/data/xiaosa/public_data/Qwen2-7B-Instruct', torch_dtype=torch.bfloat16, max_model_len=32768, quant_method=None, quant_bits=None, rope_scaling=None, config=Qwen2Config {\n",
      "  \"_name_or_path\": \"/data/xiaosa/public_data/Qwen2-7B-Instruct\",\n",
      "  \"architectures\": [\n",
      "    \"Qwen2ForCausalLM\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"bos_token_id\": 151643,\n",
      "  \"eos_token_id\": 151645,\n",
      "  \"hidden_act\": \"silu\",\n",
      "  \"hidden_size\": 3584,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 18944,\n",
      "  \"max_position_embeddings\": 32768,\n",
      "  \"max_window_layers\": 28,\n",
      "  \"model_type\": \"qwen2\",\n",
      "  \"num_attention_heads\": 28,\n",
      "  \"num_hidden_layers\": 28,\n",
      "  \"num_key_value_heads\": 4,\n",
      "  \"rms_norm_eps\": 1e-06,\n",
      "  \"rope_scaling\": null,\n",
      "  \"rope_theta\": 1000000.0,\n",
      "  \"sliding_window\": 131072,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"bfloat16\",\n",
      "  \"transformers_version\": \"4.49.0\",\n",
      "  \"use_cache\": true,\n",
      "  \"use_sliding_window\": false,\n",
      "  \"vocab_size\": 152064\n",
      "}\n",
      ", task_type='causal_lm', num_labels=None)\n",
      "[INFO:swift] model.generation_config: GenerationConfig {\n",
      "  \"bos_token_id\": 151643,\n",
      "  \"eos_token_id\": [\n",
      "    151645,\n",
      "    151643\n",
      "  ],\n",
      "  \"max_new_tokens\": 64,\n",
      "  \"pad_token_id\": 151643,\n",
      "  \"repetition_penalty\": 1.05\n",
      "}\n",
      "\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] The TrainArguments will be saved in: /data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/args.json\n",
      "[INFO:swift] Start time of running main: 2025-03-18 11:30:36.054753\n",
      "Generating train split: 960 examples [00:00, 24909.72 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-in1fqwpb\n",
      "Map: 100%|██████████████████████████| 960/960 [00:00<00:00, 19131.23 examples/s]\n",
      "[INFO:swift] train_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 960\n",
      "})\n",
      "[INFO:swift] val_dataset: None\n",
      "Map: 100%|████████████████████████████| 960/960 [00:04<00:00, 194.76 examples/s]\n",
      "[INFO:swift] [INPUT_IDS] [151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 56568, 101909, 104715, 7082, 30868, 59956, 110498, 3837, 85106, 100345, 20002, 64205, 100168, 106188, 102179, 7082, 1773, 14880, 110439, 87752, 104190, 54542, 48443, 2, 69058, 20074, 198, 12, 50042, 64205, 44177, 9909, 59879, 20450, 107684, 108467, 7552, 5122, 1183, 35946, 96555, 104243, 104320, 105638, 104933, 27369, 3837, 107809, 108965, 32876, 100158, 101037, 11319, 497, 330, 32664, 34187, 3837, 35946, 106750, 102298, 104056, 102090, 20074, 1773, 497, 330, 104933, 9370, 915, 20412, 16, 17, 18, 19, 20, 3837, 100632, 113643, 99794, 104056, 82700, 27369, 1773, 7026, 12, 26853, 107, 11622, 7082, 44177, 9909, 103991, 7082, 102298, 29991, 5373, 53481, 5373, 32665, 33108, 58514, 68756, 47882, 7552, 5122, 58, 4913, 606, 788, 330, 19366, 91980, 14813, 497, 330, 4684, 788, 330, 43959, 100382, 100184, 9370, 99426, 100370, 497, 330, 13786, 788, 5212, 11736, 1819, 788, 5212, 1313, 788, 330, 917, 497, 330, 4684, 788, 330, 100370, 31905, 3837, 29524, 102443, 3837, 9754, 26381, 57191, 103986, 14345, 330, 2468, 4164, 788, 5212, 1313, 788, 330, 917, 497, 330, 4684, 788, 330, 100370, 9370, 55286, 45785, 7, 20450, 68805, 17714, 28189, 18506, 40175, 9940, 2137, 330, 408, 4164, 788, 5212, 1313, 788, 330, 917, 497, 330, 4684, 788, 330, 100370, 9370, 80565, 45785, 7, 20450, 68805, 17714, 28189, 18506, 40175, 9940, 2137, 330, 997, 35060, 29622, 788, 5212, 1313, 788, 330, 6117, 497, 330, 4684, 788, 330, 64471, 100630, 22045, 104310, 14345, 330, 3006, 8955, 788, 5212, 1313, 788, 330, 917, 497, 330, 4684, 788, 330, 100370, 9370, 66017, 68805, 3837, 29524, 23424, 57191, 20055, 9207, 2137, 330, 6279, 788, 4383, 11736, 1819, 497, 330, 2468, 4164, 497, 330, 408, 4164, 497, 330, 3006, 8955, 1341, 2137, 5212, 606, 788, 330, 27122, 84833, 38661, 497, 330, 4684, 788, 330, 65577, 101903, 9370, 107039, 102086, 3837, 100630, 103964, 17340, 5373, 110683, 33108, 63379, 72881, 1773, 497, 330, 13786, 788, 5212, 27122, 842, 788, 5212, 1313, 788, 330, 917, 497, 330, 4684, 788, 330, 101903, 9370, 102157, 106918, 38304, 14345, 330, 19417, 261, 788, 5212, 1313, 788, 330, 917, 497, 330, 4684, 788, 330, 102086, 42223, 9370, 66187, 14345, 330, 12338, 788, 5212, 1313, 788, 330, 4082, 497, 330, 4684, 788, 330, 110683, 3837, 102119, 18493, 16, 26939, 20, 101920, 14345, 330, 14727, 788, 5212, 1313, 788, 330, 917, 497, 330, 4684, 788, 330, 102086, 42223, 9370, 63379, 72881, 14345, 330, 1028, 788, 5212, 1313, 788, 330, 917, 497, 330, 4684, 788, 330, 102086, 45785, 3837, 68805, 17714, 28189, 18506, 40175, 14345, 330, 18559, 8237, 57426, 788, 5212, 1313, 788, 330, 6117, 497, 330, 4684, 788, 330, 64471, 85106, 71817, 105463, 111933, 9207, 2137, 330, 6279, 788, 4383, 27122, 842, 497, 330, 19417, 261, 497, 330, 12338, 497, 330, 14727, 497, 330, 1028, 497, 330, 18559, 8237, 57426, 1341, 2137, 5212, 606, 788, 330, 39830, 3109, 497, 330, 4684, 788, 330, 45912, 104933, 27369, 497, 330, 13786, 788, 5212, 39830, 842, 788, 5212, 1313, 788, 330, 917, 497, 330, 4684, 788, 330, 104933, 9370, 102157, 106918, 14345, 330, 997, 39737, 4898, 1769, 788, 5212, 1313, 788, 330, 6117, 497, 330, 4684, 788, 330, 64471, 102298, 102090, 20074, 497, 330, 2258, 788, 895, 2137, 330, 997, 9840, 1769, 788, 5212, 1313, 788, 330, 6117, 497, 330, 4684, 788, 330, 64471, 102298, 101229, 82700, 27369, 497, 330, 2258, 788, 830, 2137, 330, 3943, 788, 5212, 1313, 788, 330, 917, 497, 330, 4684, 788, 330, 104933, 108339, 101065, 9207, 2137, 330, 6279, 788, 4383, 39830, 842, 497, 330, 3943, 92446, 2533, 2, 40666, 226, 21887, 104190, 198, 16, 13, 3070, 23990, 64205, 54542, 1019, 256, 57156, 30543, 6567, 27757, 22226, 42192, 103124, 7082, 11397, 55616, 19536, 256, 57156, 30543, 6567, 27757, 22226, 106188, 7082, 77288, 102822, 58514, 68756, 32665, 11397, 55616, 19536, 256, 57156, 30543, 6567, 27757, 22226, 106188, 7082, 100136, 32665, 100873, 11397, 55616, 103124, 30868, 59956, 271, 17, 13, 3070, 99960, 64205, 54542, 334, 9909, 99373, 67949, 64205, 42192, 106188, 13343, 111916, 23083, 256, 57156, 30543, 6567, 96, 222, 32876, 105463, 64205, 64471, 102298, 67949, 7082, 102974, 32665, 28311, 256, 481, 38433, 236, 99448, 18830, 32665, 11397, 55616, 19536, 256, 481, 38433, 236, 99448, 42192, 32665, 11397, 43614, 112, 39762, 104044, 45, 38989, 64205, 32665, 198, 256, 57156, 30543, 70969, 100873, 11397, 55616, 103124, 30868, 59956, 198, 256, 57156, 30543, 220, 99754, 16530, 100873, 11397, 55616, 1294, 271, 2, 84238, 244, 59956, 101882, 198, 16, 13, 70969, 31905, 54542, 28311, 256, 481, 1372, 31905, 5122, 16, 17, 18, 9909, 42192, 72586, 17992, 23083, 256, 481, 2710, 31905, 5122, 1866, 91233, 9909, 30709, 61443, 23083, 256, 481, 914, 31905, 40727, 957, 1, 9909, 58362, 72586, 17992, 27866, 17, 13, 70568, 68805, 101931, 28311, 73594, 2236, 198, 9640, 220, 5212, 606, 788, 330, 2068, 16, 497, 330, 16370, 788, 14916, 22050, 220, 10239, 220, 5212, 606, 788, 330, 2068, 17, 497, 330, 16370, 788, 14916, 22050, 220, 12236, 921, 13874, 19324, 2, 40666, 226, 21887, 102054, 198, 16, 13, 220, 107684, 54542, 103991, 20002, 64205, 198, 17, 13, 69162, 103991, 64205, 75117, 28311, 256, 264, 13, 94305, 117, 54387, 87267, 7082, 198, 256, 293, 13, 93685, 50350, 22226, 101047, 32665, 198, 256, 272, 13, 18137, 36677, 33477, 58514, 68756, 32665, 198, 256, 294, 13, 6567, 234, 231, 104190, 103930, 64471, 99960, 64205, 102102, 198, 18, 13, 220, 43959, 103941, 30868, 59956, 44177, 271, 2, 97161, 102195, 198, 16, 13, 220, 100470, 100662, 66017, 44177, 98402, 57218, 31196, 64205, 44177, 101266, 198, 17, 13, 6567, 107, 237, 18947, 64205, 101097, 103124, 15, 12, 16, 18947, 7082, 47872, 11622, 198, 18, 13, 70969, 25511, 100645, 101919, 64205, 103283, 198, 19, 13, 86009, 60610, 13343, 104747, 31526, 34794, 44177, 151645, 198, 151644, 77091, 198, 73594, 2236, 198, 58, 12995, 10071, 5212, 606, 788, 330, 39830, 3109, 497, 330, 16370, 788, 5212, 39830, 842, 788, 330, 16, 17, 18, 19, 20, 497, 330, 3943, 788, 330, 104320, 497, 330, 997, 39737, 4898, 1769, 788, 830, 11, 330, 997, 9840, 1769, 788, 895, 3417, 921, 73594, 151645]\n",
      "[INFO:swift] [INPUT] <|im_start|>system\n",
      "You are a helpful assistant.<|im_end|>\n",
      "<|im_start|>user\n",
      "你是一个专业的API编排助手，需要根据用户消息智能匹配最佳API。请严格按照以下规则处理：\n",
      "\n",
      "# 输入数据\n",
      "- 用户消息列表（按时间顺序排列）：[\"我正在寻找亚洲地区的供应商信息，你能帮我查一下吗？\", \"对了，我还需要包含他们的财务数据。\", \"供应商的ID是12345，不过我不想了解他们的产品信息。\"]\n",
      "- 可用API列表（每个API包含名称、描述、参数和必填项）：[{\"name\": \"generate_production_report\", \"description\": \"生成电子制造的生产报告\", \"parameters\": {\"report_type\": {\"type\": \"string\", \"description\": \"报告类型，如每日，月度或年度\"}, \"start_date\": {\"type\": \"string\", \"description\": \"报告的开始日期(时间格式为YYYY-MM-DD)\"}, \"end_date\": {\"type\": \"string\", \"description\": \"报告的结束日期(时间格式为YYYY-MM-DD)\"}, \"include_failed_products\": {\"type\": \"boolean\", \"description\": \"是否包括失败的产品\"}, \"output_format\": {\"type\": \"string\", \"description\": \"报告的输出格式，如PDF或Excel\"}}, \"required\": [\"report_type\", \"start_date\", \"end_date\", \"output_format\"]}, {\"name\": \"staff_performance_review\", \"description\": \"记录员工的绩效评估，包括评价人、评分和评语。\", \"parameters\": {\"staff_id\": {\"type\": \"string\", \"description\": \"员工的唯一标识符\"}, \"reviewer\": {\"type\": \"string\", \"description\": \"评估员的姓名\"}, \"score\": {\"type\": \"number\", \"description\": \"评分，通常在1到5之间\"}, \"comments\": {\"type\": \"string\", \"description\": \"评估员的评语\"}, \"date\": {\"type\": \"string\", \"description\": \"评估日期，格式为YYYY-MM-DD\"}, \"follow_up_needed\": {\"type\": \"boolean\", \"description\": \"是否需要进行后续跟进\"}}, \"required\": [\"staff_id\", \"reviewer\", \"score\", \"comments\", \"date\", \"follow_up_needed\"]}, {\"name\": \"supplier_info\", \"description\": \"获取供应商信息\", \"parameters\": {\"supplier_id\": {\"type\": \"string\", \"description\": \"供应商的唯一标识\"}, \"include_financial_data\": {\"type\": \"boolean\", \"description\": \"是否包含财务数据\", \"default\": false}, \"include_product_data\": {\"type\": \"boolean\", \"description\": \"是否包含供应产品信息\", \"default\": true}, \"region\": {\"type\": \"string\", \"description\": \"供应商所在的区域\"}}, \"required\": [\"supplier_id\", \"region\"]}]\n",
      "\n",
      "# 处理规则\n",
      "1. **单消息处理**\n",
      "   ✔️ 消息无对应API → 返回[]\n",
      "   ✔️ 消息匹配API但缺少必填参数 → 返回[]\n",
      "   ✔️ 消息匹配API且参数完整 → 返回对应编排\n",
      "\n",
      "2. **跨消息处理**（仅当前消息无匹配时触发）\n",
      "   ✔️ 检查后续消息是否包含当前API所需参数：\n",
      "   - 后续有参数 → 返回[]\n",
      "   - 后续无参数 → 整合最近N条消息参数\n",
      "   ✔️ 参数完整 → 返回对应编排\n",
      "   ✔️ 仍不完整 → 返回[]\n",
      "\n",
      "# 编排要求\n",
      "1. 参数类型处理：\n",
      "   - number类型：123（无引号）\n",
      "   - boolean类型：true/false（小写）\n",
      "   - string类型：\"value\"（需引号）\n",
      "\n",
      "2. 输出格式规范：\n",
      "```json\n",
      "[\n",
      "  {\"name\": \"api1\", \"arguments\": {...}},\n",
      "  [],\n",
      "  {\"name\": \"api2\", \"arguments\": {...}},\n",
      "  ...\n",
      "]\n",
      "```\n",
      "\n",
      "# 处理流程\n",
      "1. 顺序处理每个用户消息\n",
      "2. 对每个消息执行：\n",
      "   a. 匹配可能API\n",
      "   b. 提取消息中的参数\n",
      "   c. 验证必填参数\n",
      "   d. 按规则决定是否跨消息整合\n",
      "3. 生成最终编排列表\n",
      "\n",
      "# 注意事项\n",
      "1. 严格保持输出列表长度与输入消息列表一致\n",
      "2. 每个消息只能对应0-1个API调用\n",
      "3. 参数值必须来自消息原文\n",
      "4. 不确定时优先返回空列表<|im_end|>\n",
      "<|im_start|>assistant\n",
      "```json\n",
      "[[], [], {\"name\": \"supplier_info\", \"arguments\": {\"supplier_id\": \"12345\", \"region\": \"亚洲\", \"include_financial_data\": true, \"include_product_data\": false}}]\n",
      "```<|im_end|>\n",
      "[INFO:swift] [LABELS_IDS] [-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 73594, 2236, 198, 58, 12995, 10071, 5212, 606, 788, 330, 39830, 3109, 497, 330, 16370, 788, 5212, 39830, 842, 788, 330, 16, 17, 18, 19, 20, 497, 330, 3943, 788, 330, 104320, 497, 330, 997, 39737, 4898, 1769, 788, 830, 11, 330, 997, 9840, 1769, 788, 895, 3417, 921, 73594, 151645]\n",
      "[INFO:swift] [LABELS] [-100 * 946]```json\n",
      "[[], [], {\"name\": \"supplier_info\", \"arguments\": {\"supplier_id\": \"12345\", \"region\": \"亚洲\", \"include_financial_data\": true, \"include_product_data\": false}}]\n",
      "```<|im_end|>\n",
      "Map: 100%|███████████████████████████| 960/960 [00:00<00:00, 1252.92 examples/s]\n",
      "[INFO:swift] Dataset Token Length: 1196.507292±200.806115, min=779.000000, max=2032.000000, size=960\n",
      "[INFO:swift] lora_config: LoraConfig(task_type='CAUSAL_LM', peft_type=<PeftType.LORA: 'LORA'>, auto_mapping=None, base_model_name_or_path='/data/xiaosa/public_data/Qwen2-7B-Instruct', revision=None, inference_mode=False, r=512, target_modules={'o_proj', 'v_proj', 'q_proj', 'up_proj', 'k_proj', 'down_proj', 'gate_proj'}, exclude_modules=None, lora_alpha=1024, lora_dropout=0.05, fan_in_fan_out=False, bias='none', use_rslora=False, modules_to_save=[], init_lora_weights=True, layers_to_transform=None, layers_pattern=None, rank_pattern={}, alpha_pattern={}, megatron_config=None, megatron_core='megatron.core', loftq_config={}, eva_config=None, use_dora=False, layer_replication=None, runtime_config=LoraRuntimeConfig(ephemeral_gpu_offload=False), lora_bias=False, lora_dtype=None, lorap_lr_ratio=None, lorap_emb_lr=1e-06)\n",
      "[INFO:swift] model: PeftModelForCausalLM(\n",
      "  (base_model): LoraModel(\n",
      "    (model): Qwen2ForCausalLM(\n",
      "      (model): Qwen2Model(\n",
      "        (embed_tokens): Embedding(152064, 3584)\n",
      "        (layers): ModuleList(\n",
      "          (0-27): 28 x Qwen2DecoderLayer(\n",
      "            (self_attn): Qwen2Attention(\n",
      "              (q_proj): lora.Linear(\n",
      "                (base_layer): Linear(in_features=3584, out_features=3584, bias=True)\n",
      "                (lora_dropout): ModuleDict(\n",
      "                  (default): Dropout(p=0.05, inplace=False)\n",
      "                )\n",
      "                (lora_A): ModuleDict(\n",
      "                  (default): Linear(in_features=3584, out_features=512, bias=False)\n",
      "                )\n",
      "                (lora_B): ModuleDict(\n",
      "                  (default): Linear(in_features=512, out_features=3584, bias=False)\n",
      "                )\n",
      "                (lora_embedding_A): ParameterDict()\n",
      "                (lora_embedding_B): ParameterDict()\n",
      "                (lora_magnitude_vector): ModuleDict()\n",
      "              )\n",
      "              (k_proj): lora.Linear(\n",
      "                (base_layer): Linear(in_features=3584, out_features=512, bias=True)\n",
      "                (lora_dropout): ModuleDict(\n",
      "                  (default): Dropout(p=0.05, inplace=False)\n",
      "                )\n",
      "                (lora_A): ModuleDict(\n",
      "                  (default): Linear(in_features=3584, out_features=512, bias=False)\n",
      "                )\n",
      "                (lora_B): ModuleDict(\n",
      "                  (default): Linear(in_features=512, out_features=512, bias=False)\n",
      "                )\n",
      "                (lora_embedding_A): ParameterDict()\n",
      "                (lora_embedding_B): ParameterDict()\n",
      "                (lora_magnitude_vector): ModuleDict()\n",
      "              )\n",
      "              (v_proj): lora.Linear(\n",
      "                (base_layer): Linear(in_features=3584, out_features=512, bias=True)\n",
      "                (lora_dropout): ModuleDict(\n",
      "                  (default): Dropout(p=0.05, inplace=False)\n",
      "                )\n",
      "                (lora_A): ModuleDict(\n",
      "                  (default): Linear(in_features=3584, out_features=512, bias=False)\n",
      "                )\n",
      "                (lora_B): ModuleDict(\n",
      "                  (default): Linear(in_features=512, out_features=512, bias=False)\n",
      "                )\n",
      "                (lora_embedding_A): ParameterDict()\n",
      "                (lora_embedding_B): ParameterDict()\n",
      "                (lora_magnitude_vector): ModuleDict()\n",
      "              )\n",
      "              (o_proj): lora.Linear(\n",
      "                (base_layer): Linear(in_features=3584, out_features=3584, bias=False)\n",
      "                (lora_dropout): ModuleDict(\n",
      "                  (default): Dropout(p=0.05, inplace=False)\n",
      "                )\n",
      "                (lora_A): ModuleDict(\n",
      "                  (default): Linear(in_features=3584, out_features=512, bias=False)\n",
      "                )\n",
      "                (lora_B): ModuleDict(\n",
      "                  (default): Linear(in_features=512, out_features=3584, bias=False)\n",
      "                )\n",
      "                (lora_embedding_A): ParameterDict()\n",
      "                (lora_embedding_B): ParameterDict()\n",
      "                (lora_magnitude_vector): ModuleDict()\n",
      "              )\n",
      "            )\n",
      "            (mlp): Qwen2MLP(\n",
      "              (gate_proj): lora.Linear(\n",
      "                (base_layer): Linear(in_features=3584, out_features=18944, bias=False)\n",
      "                (lora_dropout): ModuleDict(\n",
      "                  (default): Dropout(p=0.05, inplace=False)\n",
      "                )\n",
      "                (lora_A): ModuleDict(\n",
      "                  (default): Linear(in_features=3584, out_features=512, bias=False)\n",
      "                )\n",
      "                (lora_B): ModuleDict(\n",
      "                  (default): Linear(in_features=512, out_features=18944, bias=False)\n",
      "                )\n",
      "                (lora_embedding_A): ParameterDict()\n",
      "                (lora_embedding_B): ParameterDict()\n",
      "                (lora_magnitude_vector): ModuleDict()\n",
      "              )\n",
      "              (up_proj): lora.Linear(\n",
      "                (base_layer): Linear(in_features=3584, out_features=18944, bias=False)\n",
      "                (lora_dropout): ModuleDict(\n",
      "                  (default): Dropout(p=0.05, inplace=False)\n",
      "                )\n",
      "                (lora_A): ModuleDict(\n",
      "                  (default): Linear(in_features=3584, out_features=512, bias=False)\n",
      "                )\n",
      "                (lora_B): ModuleDict(\n",
      "                  (default): Linear(in_features=512, out_features=18944, bias=False)\n",
      "                )\n",
      "                (lora_embedding_A): ParameterDict()\n",
      "                (lora_embedding_B): ParameterDict()\n",
      "                (lora_magnitude_vector): ModuleDict()\n",
      "              )\n",
      "              (down_proj): lora.Linear(\n",
      "                (base_layer): Linear(in_features=18944, out_features=3584, bias=False)\n",
      "                (lora_dropout): ModuleDict(\n",
      "                  (default): Dropout(p=0.05, inplace=False)\n",
      "                )\n",
      "                (lora_A): ModuleDict(\n",
      "                  (default): Linear(in_features=18944, out_features=512, bias=False)\n",
      "                )\n",
      "                (lora_B): ModuleDict(\n",
      "                  (default): Linear(in_features=512, out_features=3584, bias=False)\n",
      "                )\n",
      "                (lora_embedding_A): ParameterDict()\n",
      "                (lora_embedding_B): ParameterDict()\n",
      "                (lora_magnitude_vector): ModuleDict()\n",
      "              )\n",
      "              (act_fn): SiLU()\n",
      "            )\n",
      "            (input_layernorm): Qwen2RMSNorm((3584,), eps=1e-06)\n",
      "            (post_attention_layernorm): Qwen2RMSNorm((3584,), eps=1e-06)\n",
      "          )\n",
      "        )\n",
      "        (norm): Qwen2RMSNorm((3584,), eps=1e-06)\n",
      "        (rotary_emb): Qwen2RotaryEmbedding()\n",
      "      )\n",
      "      (lm_head): Linear(in_features=3584, out_features=152064, bias=False)\n",
      "    )\n",
      "  )\n",
      ")\n",
      "[INFO:swift] model_parameter_info: PeftModelForCausalLM: 8907.4621M Params (1291.8456M Trainable [14.5030%]), 0.0001M Buffers.\n",
      "/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/trainers/mixin.py:81: FutureWarning: `tokenizer` is deprecated and will be removed in version 5.0.0 for `Seq2SeqTrainer.__init__`. Use `processing_class` instead.\n",
      "  super().__init__(\n",
      "No label_names provided for model class `PeftModelForCausalLM`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.\n",
      "[INFO:swift] The logging file will be saved in: /data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/logging.jsonl\n",
      "{'loss': 0.13851231, 'seq_acc': 0.0, 'grad_norm': 8.25205803, 'learning_rate': 7e-08, 'memory(GiB)': 41.51, 'train_speed(iter/s)': 0.55615, 'epoch': 0.0, 'global_step/max_steps': '1/4800', 'percentage': '0.02%', 'elapsed_time': '1s', 'remaining_time': '2h 0m 5s'}\n",
      "{'loss': 0.21066489, 'seq_acc': 0.0, 'grad_norm': 4.71586084, 'learning_rate': 6.9e-07, 'memory(GiB)': 50.81, 'train_speed(iter/s)': 1.273666, 'epoch': 0.01, 'global_step/max_steps': '10/4800', 'percentage': '0.21%', 'elapsed_time': '7s', 'remaining_time': '1h 0m 18s'}\n",
      "{'loss': 0.09964452, 'seq_acc': 0.2, 'grad_norm': 2.92233658, 'learning_rate': 1.39e-06, 'memory(GiB)': 53.92, 'train_speed(iter/s)': 1.338259, 'epoch': 0.02, 'global_step/max_steps': '20/4800', 'percentage': '0.42%', 'elapsed_time': '14s', 'remaining_time': '58m 20s'}\n",
      "{'loss': 0.07370846, 'seq_acc': 0.1, 'grad_norm': 7.80563545, 'learning_rate': 2.08e-06, 'memory(GiB)': 57.68, 'train_speed(iter/s)': 1.329858, 'epoch': 0.03, 'global_step/max_steps': '30/4800', 'percentage': '0.62%', 'elapsed_time': '22s', 'remaining_time': '58m 59s'}\n",
      "{'loss': 0.04053537, 'seq_acc': 0.5, 'grad_norm': 4.83569431, 'learning_rate': 2.78e-06, 'memory(GiB)': 61.56, 'train_speed(iter/s)': 1.348949, 'epoch': 0.04, 'global_step/max_steps': '40/4800', 'percentage': '0.83%', 'elapsed_time': '29s', 'remaining_time': '58m 13s'}\n",
      "{'loss': 0.06132866, 'seq_acc': 0.3, 'grad_norm': 5.59369612, 'learning_rate': 3.47e-06, 'memory(GiB)': 61.56, 'train_speed(iter/s)': 1.350364, 'epoch': 0.05, 'global_step/max_steps': '50/4800', 'percentage': '1.04%', 'elapsed_time': '36s', 'remaining_time': '58m 9s'}\n",
      "{'loss': 0.03345063, 'seq_acc': 0.6, 'grad_norm': 14.38028908, 'learning_rate': 4.17e-06, 'memory(GiB)': 61.56, 'train_speed(iter/s)': 1.357195, 'epoch': 0.06, 'global_step/max_steps': '60/4800', 'percentage': '1.25%', 'elapsed_time': '43s', 'remaining_time': '57m 49s'}\n",
      "{'loss': 0.05450813, 'seq_acc': 0.2, 'grad_norm': 8.65906143, 'learning_rate': 4.86e-06, 'memory(GiB)': 62.87, 'train_speed(iter/s)': 1.3479, 'epoch': 0.07, 'global_step/max_steps': '70/4800', 'percentage': '1.46%', 'elapsed_time': '51s', 'remaining_time': '58m 9s'}\n",
      "{'loss': 0.03203806, 'seq_acc': 0.3, 'grad_norm': 2.77922988, 'learning_rate': 5.56e-06, 'memory(GiB)': 62.87, 'train_speed(iter/s)': 1.338862, 'epoch': 0.08, 'global_step/max_steps': '80/4800', 'percentage': '1.67%', 'elapsed_time': '59s', 'remaining_time': '58m 27s'}\n",
      "{'loss': 0.04953095, 'seq_acc': 0.3, 'grad_norm': 10.02822399, 'learning_rate': 6.25e-06, 'memory(GiB)': 62.87, 'train_speed(iter/s)': 1.3492, 'epoch': 0.09, 'global_step/max_steps': '90/4800', 'percentage': '1.88%', 'elapsed_time': '1m 6s', 'remaining_time': '57m 55s'}\n",
      "{'loss': 0.02943398, 'seq_acc': 0.1, 'grad_norm': 7.16859818, 'learning_rate': 6.94e-06, 'memory(GiB)': 62.87, 'train_speed(iter/s)': 1.352952, 'epoch': 0.1, 'global_step/max_steps': '100/4800', 'percentage': '2.08%', 'elapsed_time': '1m 13s', 'remaining_time': '57m 39s'}\n",
      "{'loss': 0.03323607, 'seq_acc': 0.5, 'grad_norm': 0.04038914, 'learning_rate': 7.64e-06, 'memory(GiB)': 62.87, 'train_speed(iter/s)': 1.355504, 'epoch': 0.11, 'global_step/max_steps': '110/4800', 'percentage': '2.29%', 'elapsed_time': '1m 20s', 'remaining_time': '57m 27s'}\n",
      "{'loss': 0.03277011, 'seq_acc': 0.4, 'grad_norm': 0.22027147, 'learning_rate': 8.33e-06, 'memory(GiB)': 62.87, 'train_speed(iter/s)': 1.352765, 'epoch': 0.12, 'global_step/max_steps': '120/4800', 'percentage': '2.50%', 'elapsed_time': '1m 28s', 'remaining_time': '57m 28s'}\n",
      "{'loss': 0.03220743, 'seq_acc': 0.4, 'grad_norm': 0.2886326, 'learning_rate': 9.03e-06, 'memory(GiB)': 64.2, 'train_speed(iter/s)': 1.345852, 'epoch': 0.14, 'global_step/max_steps': '130/4800', 'percentage': '2.71%', 'elapsed_time': '1m 36s', 'remaining_time': '57m 39s'}\n",
      "{'loss': 0.05577358, 'seq_acc': 0.2, 'grad_norm': 5.14797974, 'learning_rate': 9.72e-06, 'memory(GiB)': 64.2, 'train_speed(iter/s)': 1.346314, 'epoch': 0.15, 'global_step/max_steps': '140/4800', 'percentage': '2.92%', 'elapsed_time': '1m 43s', 'remaining_time': '57m 31s'}\n",
      "{'loss': 0.02287129, 'seq_acc': 0.6, 'grad_norm': 18.81019783, 'learning_rate': 1e-05, 'memory(GiB)': 65.56, 'train_speed(iter/s)': 1.339413, 'epoch': 0.16, 'global_step/max_steps': '150/4800', 'percentage': '3.12%', 'elapsed_time': '1m 51s', 'remaining_time': '57m 42s'}\n",
      "{'loss': 0.02974528, 'seq_acc': 0.7, 'grad_norm': 5.2142601, 'learning_rate': 1e-05, 'memory(GiB)': 65.56, 'train_speed(iter/s)': 1.336429, 'epoch': 0.17, 'global_step/max_steps': '160/4800', 'percentage': '3.33%', 'elapsed_time': '1m 59s', 'remaining_time': '57m 43s'}\n",
      "{'loss': 0.04017694, 'seq_acc': 0.5, 'grad_norm': 0.03247174, 'learning_rate': 1e-05, 'memory(GiB)': 65.56, 'train_speed(iter/s)': 1.339149, 'epoch': 0.18, 'global_step/max_steps': '170/4800', 'percentage': '3.54%', 'elapsed_time': '2m 6s', 'remaining_time': '57m 29s'}\n",
      "{'loss': 0.04876598, 'seq_acc': 0.6, 'grad_norm': 1.4674387, 'learning_rate': 1e-05, 'memory(GiB)': 67.24, 'train_speed(iter/s)': 1.334314, 'epoch': 0.19, 'global_step/max_steps': '180/4800', 'percentage': '3.75%', 'elapsed_time': '2m 14s', 'remaining_time': '57m 34s'}\n",
      "{'loss': 0.01695102, 'seq_acc': 0.6, 'grad_norm': 0.41947028, 'learning_rate': 1e-05, 'memory(GiB)': 67.24, 'train_speed(iter/s)': 1.332603, 'epoch': 0.2, 'global_step/max_steps': '190/4800', 'percentage': '3.96%', 'elapsed_time': '2m 22s', 'remaining_time': '57m 32s'}\n",
      "{'loss': 0.00988903, 'seq_acc': 0.7, 'grad_norm': 0.37597269, 'learning_rate': 1e-05, 'memory(GiB)': 67.24, 'train_speed(iter/s)': 1.335794, 'epoch': 0.21, 'global_step/max_steps': '200/4800', 'percentage': '4.17%', 'elapsed_time': '2m 29s', 'remaining_time': '57m 16s'}\n",
      "{'loss': 0.02195488, 'seq_acc': 0.6, 'grad_norm': 0.01744288, 'learning_rate': 1e-05, 'memory(GiB)': 67.24, 'train_speed(iter/s)': 1.335655, 'epoch': 0.22, 'global_step/max_steps': '210/4800', 'percentage': '4.38%', 'elapsed_time': '2m 36s', 'remaining_time': '57m 10s'}\n",
      "{'loss': 0.0290387, 'seq_acc': 0.7, 'grad_norm': 0.03661367, 'learning_rate': 9.99e-06, 'memory(GiB)': 67.24, 'train_speed(iter/s)': 1.334686, 'epoch': 0.23, 'global_step/max_steps': '220/4800', 'percentage': '4.58%', 'elapsed_time': '2m 44s', 'remaining_time': '57m 5s'}\n",
      "{'loss': 0.03187534, 'seq_acc': 0.7, 'grad_norm': 0.02959662, 'learning_rate': 9.99e-06, 'memory(GiB)': 67.24, 'train_speed(iter/s)': 1.335188, 'epoch': 0.24, 'global_step/max_steps': '230/4800', 'percentage': '4.79%', 'elapsed_time': '2m 51s', 'remaining_time': '56m 56s'}\n",
      "{'loss': 0.03294066, 'seq_acc': 0.3, 'grad_norm': 5.5279479, 'learning_rate': 9.99e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334617, 'epoch': 0.25, 'global_step/max_steps': '240/4800', 'percentage': '5.00%', 'elapsed_time': '2m 59s', 'remaining_time': '56m 51s'}\n",
      "{'loss': 0.06807249, 'seq_acc': 0.3, 'grad_norm': 8.25252342, 'learning_rate': 9.99e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.337829, 'epoch': 0.26, 'global_step/max_steps': '250/4800', 'percentage': '5.21%', 'elapsed_time': '3m 6s', 'remaining_time': '56m 35s'}\n",
      "{'loss': 0.02329856, 'seq_acc': 0.6, 'grad_norm': 0.04649577, 'learning_rate': 9.98e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.3394, 'epoch': 0.27, 'global_step/max_steps': '260/4800', 'percentage': '5.42%', 'elapsed_time': '3m 13s', 'remaining_time': '56m 24s'}\n",
      "{'loss': 0.01824031, 'seq_acc': 0.7, 'grad_norm': 0.04162632, 'learning_rate': 9.98e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.341773, 'epoch': 0.28, 'global_step/max_steps': '270/4800', 'percentage': '5.62%', 'elapsed_time': '3m 20s', 'remaining_time': '56m 11s'}\n",
      "{'loss': 0.03085034, 'seq_acc': 0.6, 'grad_norm': 0.76393449, 'learning_rate': 9.98e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.343436, 'epoch': 0.29, 'global_step/max_steps': '280/4800', 'percentage': '5.83%', 'elapsed_time': '3m 28s', 'remaining_time': '55m 59s'}\n",
      "{'loss': 0.0127307, 'seq_acc': 0.8, 'grad_norm': 1.68822312, 'learning_rate': 9.98e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.342932, 'epoch': 0.3, 'global_step/max_steps': '290/4800', 'percentage': '6.04%', 'elapsed_time': '3m 35s', 'remaining_time': '55m 53s'}\n",
      "{'loss': 0.0587652, 'seq_acc': 0.3, 'grad_norm': 7.98548889, 'learning_rate': 9.97e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.344386, 'epoch': 0.31, 'global_step/max_steps': '300/4800', 'percentage': '6.25%', 'elapsed_time': '3m 42s', 'remaining_time': '55m 42s'}\n",
      "{'loss': 0.02730957, 'seq_acc': 0.5, 'grad_norm': 5.48783588, 'learning_rate': 9.97e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.343185, 'epoch': 0.32, 'global_step/max_steps': '310/4800', 'percentage': '6.46%', 'elapsed_time': '3m 50s', 'remaining_time': '55m 38s'}\n",
      "{'loss': 0.03138548, 'seq_acc': 0.6, 'grad_norm': 0.52085578, 'learning_rate': 9.96e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.340972, 'epoch': 0.33, 'global_step/max_steps': '320/4800', 'percentage': '6.67%', 'elapsed_time': '3m 58s', 'remaining_time': '55m 36s'}\n",
      "{'loss': 0.02443886, 'seq_acc': 0.5, 'grad_norm': 1.24232435, 'learning_rate': 9.96e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.339968, 'epoch': 0.34, 'global_step/max_steps': '330/4800', 'percentage': '6.88%', 'elapsed_time': '4m 5s', 'remaining_time': '55m 31s'}\n",
      "{'loss': 0.04254716, 'seq_acc': 0.4, 'grad_norm': 0.82365513, 'learning_rate': 9.96e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.338505, 'epoch': 0.35, 'global_step/max_steps': '340/4800', 'percentage': '7.08%', 'elapsed_time': '4m 13s', 'remaining_time': '55m 28s'}\n",
      "{'loss': 0.07795136, 'seq_acc': 0.3, 'grad_norm': 2.13328576, 'learning_rate': 9.95e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.341553, 'epoch': 0.36, 'global_step/max_steps': '350/4800', 'percentage': '7.29%', 'elapsed_time': '4m 20s', 'remaining_time': '55m 13s'}\n",
      "{'loss': 0.02389767, 'seq_acc': 0.5, 'grad_norm': 0.12891077, 'learning_rate': 9.95e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.337543, 'epoch': 0.38, 'global_step/max_steps': '360/4800', 'percentage': '7.50%', 'elapsed_time': '4m 28s', 'remaining_time': '55m 15s'}\n",
      "{'loss': 0.03902974, 'seq_acc': 0.5, 'grad_norm': 0.04732016, 'learning_rate': 9.94e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.338205, 'epoch': 0.39, 'global_step/max_steps': '370/4800', 'percentage': '7.71%', 'elapsed_time': '4m 36s', 'remaining_time': '55m 6s'}\n",
      "{'loss': 0.01612347, 'seq_acc': 0.7, 'grad_norm': 0.00895497, 'learning_rate': 9.94e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.336282, 'epoch': 0.4, 'global_step/max_steps': '380/4800', 'percentage': '7.92%', 'elapsed_time': '4m 44s', 'remaining_time': '55m 4s'}\n",
      "{'loss': 0.02605846, 'seq_acc': 0.6, 'grad_norm': 5.48562241, 'learning_rate': 9.93e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.336404, 'epoch': 0.41, 'global_step/max_steps': '390/4800', 'percentage': '8.12%', 'elapsed_time': '4m 51s', 'remaining_time': '54m 56s'}\n",
      "{'loss': 0.02549206, 'seq_acc': 0.7, 'grad_norm': 0.04414688, 'learning_rate': 9.93e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.335412, 'epoch': 0.42, 'global_step/max_steps': '400/4800', 'percentage': '8.33%', 'elapsed_time': '4m 59s', 'remaining_time': '54m 51s'}\n",
      "{'loss': 0.04282818, 'seq_acc': 0.6, 'grad_norm': 7.89067602, 'learning_rate': 9.92e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.335975, 'epoch': 0.43, 'global_step/max_steps': '410/4800', 'percentage': '8.54%', 'elapsed_time': '5m 6s', 'remaining_time': '54m 42s'}\n",
      "{'loss': 0.03945529, 'seq_acc': 0.5, 'grad_norm': 1.83278239, 'learning_rate': 9.91e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334598, 'epoch': 0.44, 'global_step/max_steps': '420/4800', 'percentage': '8.75%', 'elapsed_time': '5m 14s', 'remaining_time': '54m 38s'}\n",
      "{'loss': 0.02849221, 'seq_acc': 0.3, 'grad_norm': 6.34631157, 'learning_rate': 9.91e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332804, 'epoch': 0.45, 'global_step/max_steps': '430/4800', 'percentage': '8.96%', 'elapsed_time': '5m 22s', 'remaining_time': '54m 35s'}\n",
      "{'loss': 0.02645998, 'seq_acc': 0.5, 'grad_norm': 2.78466082, 'learning_rate': 9.9e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331215, 'epoch': 0.46, 'global_step/max_steps': '440/4800', 'percentage': '9.17%', 'elapsed_time': '5m 30s', 'remaining_time': '54m 32s'}\n",
      "{'loss': 0.05570911, 'seq_acc': 0.4, 'grad_norm': 3.56945491, 'learning_rate': 9.89e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.330646, 'epoch': 0.47, 'global_step/max_steps': '450/4800', 'percentage': '9.38%', 'elapsed_time': '5m 37s', 'remaining_time': '54m 26s'}\n",
      "{'loss': 0.01386064, 'seq_acc': 0.7, 'grad_norm': 4.81068897, 'learning_rate': 9.89e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333044, 'epoch': 0.48, 'global_step/max_steps': '460/4800', 'percentage': '9.58%', 'elapsed_time': '5m 44s', 'remaining_time': '54m 12s'}\n",
      "{'loss': 0.02778095, 'seq_acc': 0.4, 'grad_norm': 0.27604458, 'learning_rate': 9.88e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331858, 'epoch': 0.49, 'global_step/max_steps': '470/4800', 'percentage': '9.79%', 'elapsed_time': '5m 52s', 'remaining_time': '54m 8s'}\n",
      "{'loss': 0.01424511, 'seq_acc': 0.6, 'grad_norm': 0.47568274, 'learning_rate': 9.87e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331987, 'epoch': 0.5, 'global_step/max_steps': '480/4800', 'percentage': '10.00%', 'elapsed_time': '6m 0s', 'remaining_time': '54m 0s'}\n",
      "{'loss': 0.04625554, 'seq_acc': 0.4, 'grad_norm': 3.505404, 'learning_rate': 9.86e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334204, 'epoch': 0.51, 'global_step/max_steps': '490/4800', 'percentage': '10.21%', 'elapsed_time': '6m 6s', 'remaining_time': '53m 47s'}\n",
      "{'loss': 0.041688, 'seq_acc': 0.6, 'grad_norm': 0.17797256, 'learning_rate': 9.86e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.335824, 'epoch': 0.52, 'global_step/max_steps': '500/4800', 'percentage': '10.42%', 'elapsed_time': '6m 14s', 'remaining_time': '53m 36s'}\n",
      "{'loss': 0.03044471, 'seq_acc': 0.3, 'grad_norm': 0.96440715, 'learning_rate': 9.85e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333997, 'epoch': 0.53, 'global_step/max_steps': '510/4800', 'percentage': '10.62%', 'elapsed_time': '6m 22s', 'remaining_time': '53m 33s'}\n",
      "{'loss': 0.02916578, 'seq_acc': 0.7, 'grad_norm': 0.09229706, 'learning_rate': 9.84e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331808, 'epoch': 0.54, 'global_step/max_steps': '520/4800', 'percentage': '10.83%', 'elapsed_time': '6m 30s', 'remaining_time': '53m 31s'}\n",
      "{'loss': 0.03690173, 'seq_acc': 0.5, 'grad_norm': 1.41476989, 'learning_rate': 9.83e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331524, 'epoch': 0.55, 'global_step/max_steps': '530/4800', 'percentage': '11.04%', 'elapsed_time': '6m 37s', 'remaining_time': '53m 24s'}\n",
      "{'loss': 0.0241086, 'seq_acc': 0.4, 'grad_norm': 3.03450584, 'learning_rate': 9.82e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331644, 'epoch': 0.56, 'global_step/max_steps': '540/4800', 'percentage': '11.25%', 'elapsed_time': '6m 45s', 'remaining_time': '53m 16s'}\n",
      "{'loss': 0.03993582, 'seq_acc': 0.3, 'grad_norm': 2.97750521, 'learning_rate': 9.81e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.330996, 'epoch': 0.57, 'global_step/max_steps': '550/4800', 'percentage': '11.46%', 'elapsed_time': '6m 52s', 'remaining_time': '53m 10s'}\n",
      "{'loss': 0.02613271, 'seq_acc': 0.5, 'grad_norm': 0.0580596, 'learning_rate': 9.8e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331469, 'epoch': 0.58, 'global_step/max_steps': '560/4800', 'percentage': '11.67%', 'elapsed_time': '7m 0s', 'remaining_time': '53m 2s'}\n",
      "{'loss': 0.03006436, 'seq_acc': 0.5, 'grad_norm': 1.09173131, 'learning_rate': 9.79e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332792, 'epoch': 0.59, 'global_step/max_steps': '570/4800', 'percentage': '11.88%', 'elapsed_time': '7m 7s', 'remaining_time': '52m 51s'}\n",
      "{'loss': 0.04094568, 'seq_acc': 0.2, 'grad_norm': 1.70686781, 'learning_rate': 9.79e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331057, 'epoch': 0.6, 'global_step/max_steps': '580/4800', 'percentage': '12.08%', 'elapsed_time': '7m 15s', 'remaining_time': '52m 48s'}\n",
      "{'loss': 0.0075952, 'seq_acc': 0.8, 'grad_norm': 0.04589126, 'learning_rate': 9.78e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.330504, 'epoch': 0.61, 'global_step/max_steps': '590/4800', 'percentage': '12.29%', 'elapsed_time': '7m 23s', 'remaining_time': '52m 42s'}\n",
      "{'loss': 0.05508445, 'seq_acc': 0.6, 'grad_norm': 0.55666852, 'learning_rate': 9.77e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.329507, 'epoch': 0.62, 'global_step/max_steps': '600/4800', 'percentage': '12.50%', 'elapsed_time': '7m 30s', 'remaining_time': '52m 36s'}\n",
      "{'loss': 0.08166543, 'seq_acc': 0.1, 'grad_norm': 1.89577758, 'learning_rate': 9.75e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.324813, 'epoch': 0.64, 'global_step/max_steps': '610/4800', 'percentage': '12.71%', 'elapsed_time': '7m 40s', 'remaining_time': '52m 40s'}\n",
      "{'loss': 0.01941505, 'seq_acc': 0.6, 'grad_norm': 3.16400814, 'learning_rate': 9.74e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.324914, 'epoch': 0.65, 'global_step/max_steps': '620/4800', 'percentage': '12.92%', 'elapsed_time': '7m 47s', 'remaining_time': '52m 32s'}\n",
      "{'loss': 0.0125035, 'seq_acc': 0.7, 'grad_norm': 1.33929014, 'learning_rate': 9.73e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.325396, 'epoch': 0.66, 'global_step/max_steps': '630/4800', 'percentage': '13.12%', 'elapsed_time': '7m 55s', 'remaining_time': '52m 24s'}\n",
      "{'loss': 0.01728059, 'seq_acc': 0.6, 'grad_norm': 0.00653594, 'learning_rate': 9.72e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.326272, 'epoch': 0.67, 'global_step/max_steps': '640/4800', 'percentage': '13.33%', 'elapsed_time': '8m 2s', 'remaining_time': '52m 14s'}\n",
      "{'loss': 0.01075326, 'seq_acc': 0.8, 'grad_norm': 0.08249565, 'learning_rate': 9.71e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.328537, 'epoch': 0.68, 'global_step/max_steps': '650/4800', 'percentage': '13.54%', 'elapsed_time': '8m 8s', 'remaining_time': '52m 1s'}\n",
      "{'loss': 0.0515322, 'seq_acc': 0.5, 'grad_norm': 0.0011454, 'learning_rate': 9.7e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.328807, 'epoch': 0.69, 'global_step/max_steps': '660/4800', 'percentage': '13.75%', 'elapsed_time': '8m 16s', 'remaining_time': '51m 53s'}\n",
      "{'loss': 0.03798415, 'seq_acc': 0.4, 'grad_norm': 0.00497681, 'learning_rate': 9.69e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.328522, 'epoch': 0.7, 'global_step/max_steps': '670/4800', 'percentage': '13.96%', 'elapsed_time': '8m 24s', 'remaining_time': '51m 46s'}\n",
      "{'loss': 0.03421568, 'seq_acc': 0.5, 'grad_norm': 0.57801312, 'learning_rate': 9.68e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.329089, 'epoch': 0.71, 'global_step/max_steps': '680/4800', 'percentage': '14.17%', 'elapsed_time': '8m 31s', 'remaining_time': '51m 38s'}\n",
      "{'loss': 0.03127367, 'seq_acc': 0.6, 'grad_norm': 2.16611099, 'learning_rate': 9.66e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.329236, 'epoch': 0.72, 'global_step/max_steps': '690/4800', 'percentage': '14.37%', 'elapsed_time': '8m 38s', 'remaining_time': '51m 30s'}\n",
      "{'loss': 0.03161341, 'seq_acc': 0.6, 'grad_norm': 0.0286566, 'learning_rate': 9.65e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.329769, 'epoch': 0.73, 'global_step/max_steps': '700/4800', 'percentage': '14.58%', 'elapsed_time': '8m 46s', 'remaining_time': '51m 21s'}\n",
      "{'loss': 0.02000933, 'seq_acc': 0.4, 'grad_norm': 0.57309103, 'learning_rate': 9.64e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.329358, 'epoch': 0.74, 'global_step/max_steps': '710/4800', 'percentage': '14.79%', 'elapsed_time': '8m 53s', 'remaining_time': '51m 14s'}\n",
      "{'loss': 0.02352664, 'seq_acc': 0.6, 'grad_norm': 0.06051181, 'learning_rate': 9.63e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331121, 'epoch': 0.75, 'global_step/max_steps': '720/4800', 'percentage': '15.00%', 'elapsed_time': '9m 0s', 'remaining_time': '51m 3s'}\n",
      "{'loss': 0.00970568, 'seq_acc': 0.7, 'grad_norm': 1.84249043, 'learning_rate': 9.61e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.330815, 'epoch': 0.76, 'global_step/max_steps': '730/4800', 'percentage': '15.21%', 'elapsed_time': '9m 8s', 'remaining_time': '50m 56s'}\n",
      "{'loss': 0.0297083, 'seq_acc': 0.4, 'grad_norm': 0.02052817, 'learning_rate': 9.6e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.330319, 'epoch': 0.77, 'global_step/max_steps': '740/4800', 'percentage': '15.42%', 'elapsed_time': '9m 15s', 'remaining_time': '50m 50s'}\n",
      "{'loss': 0.04442059, 'seq_acc': 0.4, 'grad_norm': 3.23380756, 'learning_rate': 9.59e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33082, 'epoch': 0.78, 'global_step/max_steps': '750/4800', 'percentage': '15.62%', 'elapsed_time': '9m 23s', 'remaining_time': '50m 41s'}\n",
      "{'loss': 0.03807801, 'seq_acc': 0.4, 'grad_norm': 0.14472666, 'learning_rate': 9.57e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.329309, 'epoch': 0.79, 'global_step/max_steps': '760/4800', 'percentage': '15.83%', 'elapsed_time': '9m 31s', 'remaining_time': '50m 37s'}\n",
      "{'loss': 0.04707785, 'seq_acc': 0.5, 'grad_norm': 2.7189064, 'learning_rate': 9.56e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.328739, 'epoch': 0.8, 'global_step/max_steps': '770/4800', 'percentage': '16.04%', 'elapsed_time': '9m 39s', 'remaining_time': '50m 31s'}\n",
      "{'loss': 0.02963981, 'seq_acc': 0.6, 'grad_norm': 0.03679496, 'learning_rate': 9.55e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.328791, 'epoch': 0.81, 'global_step/max_steps': '780/4800', 'percentage': '16.25%', 'elapsed_time': '9m 46s', 'remaining_time': '50m 23s'}\n",
      "{'loss': 0.02537045, 'seq_acc': 0.5, 'grad_norm': 0.0196518, 'learning_rate': 9.53e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.329106, 'epoch': 0.82, 'global_step/max_steps': '790/4800', 'percentage': '16.46%', 'elapsed_time': '9m 54s', 'remaining_time': '50m 15s'}\n",
      "{'loss': 0.02888902, 'seq_acc': 0.6, 'grad_norm': 0.06667507, 'learning_rate': 9.52e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.329882, 'epoch': 0.83, 'global_step/max_steps': '800/4800', 'percentage': '16.67%', 'elapsed_time': '10m 1s', 'remaining_time': '50m 6s'}\n",
      "{'loss': 0.03098594, 'seq_acc': 0.4, 'grad_norm': 1.38894165, 'learning_rate': 9.5e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.330615, 'epoch': 0.84, 'global_step/max_steps': '810/4800', 'percentage': '16.88%', 'elapsed_time': '10m 8s', 'remaining_time': '49m 57s'}\n",
      "{'loss': 0.02459187, 'seq_acc': 0.6, 'grad_norm': 0.57578444, 'learning_rate': 9.49e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.330857, 'epoch': 0.85, 'global_step/max_steps': '820/4800', 'percentage': '17.08%', 'elapsed_time': '10m 15s', 'remaining_time': '49m 49s'}\n",
      "{'loss': 0.05080284, 'seq_acc': 0.6, 'grad_norm': 1.48861682, 'learning_rate': 9.47e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331349, 'epoch': 0.86, 'global_step/max_steps': '830/4800', 'percentage': '17.29%', 'elapsed_time': '10m 23s', 'remaining_time': '49m 40s'}\n",
      "{'loss': 0.01368525, 'seq_acc': 0.6, 'grad_norm': 0.17953888, 'learning_rate': 9.46e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331003, 'epoch': 0.88, 'global_step/max_steps': '840/4800', 'percentage': '17.50%', 'elapsed_time': '10m 30s', 'remaining_time': '49m 33s'}\n",
      "{'loss': 0.03296535, 'seq_acc': 0.5, 'grad_norm': 1.97897375, 'learning_rate': 9.44e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332574, 'epoch': 0.89, 'global_step/max_steps': '850/4800', 'percentage': '17.71%', 'elapsed_time': '10m 37s', 'remaining_time': '49m 22s'}\n",
      "{'loss': 0.00925832, 'seq_acc': 0.7, 'grad_norm': 0.01806834, 'learning_rate': 9.43e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332532, 'epoch': 0.9, 'global_step/max_steps': '860/4800', 'percentage': '17.92%', 'elapsed_time': '10m 45s', 'remaining_time': '49m 15s'}\n",
      "{'loss': 0.03043525, 'seq_acc': 0.6, 'grad_norm': 1.44053888, 'learning_rate': 9.41e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331905, 'epoch': 0.91, 'global_step/max_steps': '870/4800', 'percentage': '18.12%', 'elapsed_time': '10m 52s', 'remaining_time': '49m 9s'}\n",
      "{'loss': 0.04570938, 'seq_acc': 0.2, 'grad_norm': 0.280159, 'learning_rate': 9.4e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33187, 'epoch': 0.92, 'global_step/max_steps': '880/4800', 'percentage': '18.33%', 'elapsed_time': '11m 0s', 'remaining_time': '49m 1s'}\n",
      "{'loss': 0.03636339, 'seq_acc': 0.6, 'grad_norm': 0.14453785, 'learning_rate': 9.38e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331629, 'epoch': 0.93, 'global_step/max_steps': '890/4800', 'percentage': '18.54%', 'elapsed_time': '11m 8s', 'remaining_time': '48m 54s'}\n",
      "{'loss': 0.02967282, 'seq_acc': 0.5, 'grad_norm': 3.8299942, 'learning_rate': 9.36e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332306, 'epoch': 0.94, 'global_step/max_steps': '900/4800', 'percentage': '18.75%', 'elapsed_time': '11m 15s', 'remaining_time': '48m 45s'}\n",
      "{'loss': 0.00726164, 'seq_acc': 0.7, 'grad_norm': 0.80799264, 'learning_rate': 9.35e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333139, 'epoch': 0.95, 'global_step/max_steps': '910/4800', 'percentage': '18.96%', 'elapsed_time': '11m 22s', 'remaining_time': '48m 36s'}\n",
      "{'loss': 0.00716926, 'seq_acc': 0.7, 'grad_norm': 1.68474245, 'learning_rate': 9.33e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332417, 'epoch': 0.96, 'global_step/max_steps': '920/4800', 'percentage': '19.17%', 'elapsed_time': '11m 30s', 'remaining_time': '48m 30s'}\n",
      "{'loss': 0.02461932, 'seq_acc': 0.5, 'grad_norm': 0.67233199, 'learning_rate': 9.31e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332613, 'epoch': 0.97, 'global_step/max_steps': '930/4800', 'percentage': '19.38%', 'elapsed_time': '11m 37s', 'remaining_time': '48m 22s'}\n",
      "{'loss': 0.02578438, 'seq_acc': 0.7, 'grad_norm': 0.09420002, 'learning_rate': 9.3e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331855, 'epoch': 0.98, 'global_step/max_steps': '940/4800', 'percentage': '19.58%', 'elapsed_time': '11m 45s', 'remaining_time': '48m 16s'}\n",
      "{'loss': 0.01437061, 'seq_acc': 0.9, 'grad_norm': 0.11458838, 'learning_rate': 9.28e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.331871, 'epoch': 0.99, 'global_step/max_steps': '950/4800', 'percentage': '19.79%', 'elapsed_time': '11m 52s', 'remaining_time': '48m 9s'}\n",
      "{'loss': 0.02786216, 'seq_acc': 0.5, 'grad_norm': 1.51829386, 'learning_rate': 9.26e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333044, 'epoch': 1.0, 'global_step/max_steps': '960/4800', 'percentage': '20.00%', 'elapsed_time': '11m 59s', 'remaining_time': '47m 59s'}\n",
      "{'loss': 0.00786663, 'seq_acc': 0.9, 'grad_norm': 0.01985824, 'learning_rate': 9.24e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334262, 'epoch': 1.01, 'global_step/max_steps': '970/4800', 'percentage': '20.21%', 'elapsed_time': '12m 6s', 'remaining_time': '47m 49s'}\n",
      "{'loss': 0.01032631, 'seq_acc': 0.7, 'grad_norm': 0.06284459, 'learning_rate': 9.23e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334114, 'epoch': 1.02, 'global_step/max_steps': '980/4800', 'percentage': '20.42%', 'elapsed_time': '12m 14s', 'remaining_time': '47m 42s'}\n",
      "{'loss': 0.00430686, 'seq_acc': 0.9, 'grad_norm': 2.34538889, 'learning_rate': 9.21e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334536, 'epoch': 1.03, 'global_step/max_steps': '990/4800', 'percentage': '20.62%', 'elapsed_time': '12m 21s', 'remaining_time': '47m 33s'}\n",
      "{'loss': 0.00596801, 'seq_acc': 0.9, 'grad_norm': 0.52561897, 'learning_rate': 9.19e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.336092, 'epoch': 1.04, 'global_step/max_steps': '1000/4800', 'percentage': '20.83%', 'elapsed_time': '12m 28s', 'remaining_time': '47m 22s'}\n",
      "{'loss': 0.00223883, 'seq_acc': 1.0, 'grad_norm': 0.20871863, 'learning_rate': 9.17e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.336498, 'epoch': 1.05, 'global_step/max_steps': '1010/4800', 'percentage': '21.04%', 'elapsed_time': '12m 35s', 'remaining_time': '47m 14s'}\n",
      "{'loss': 0.00913911, 'seq_acc': 0.7, 'grad_norm': 0.00225056, 'learning_rate': 9.15e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.336213, 'epoch': 1.06, 'global_step/max_steps': '1020/4800', 'percentage': '21.25%', 'elapsed_time': '12m 43s', 'remaining_time': '47m 7s'}\n",
      "{'loss': 0.00736912, 'seq_acc': 0.9, 'grad_norm': 0.01555302, 'learning_rate': 9.13e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.336528, 'epoch': 1.07, 'global_step/max_steps': '1030/4800', 'percentage': '21.46%', 'elapsed_time': '12m 50s', 'remaining_time': '46m 59s'}\n",
      "{'loss': 0.00503776, 'seq_acc': 0.8, 'grad_norm': 0.01548446, 'learning_rate': 9.11e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.336494, 'epoch': 1.08, 'global_step/max_steps': '1040/4800', 'percentage': '21.67%', 'elapsed_time': '12m 57s', 'remaining_time': '46m 52s'}\n",
      "{'loss': 0.02049403, 'seq_acc': 0.6, 'grad_norm': 0.03407635, 'learning_rate': 9.09e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.336216, 'epoch': 1.09, 'global_step/max_steps': '1050/4800', 'percentage': '21.88%', 'elapsed_time': '13m 5s', 'remaining_time': '46m 45s'}\n",
      "{'loss': 0.01338912, 'seq_acc': 0.6, 'grad_norm': 0.07327966, 'learning_rate': 9.08e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.335961, 'epoch': 1.1, 'global_step/max_steps': '1060/4800', 'percentage': '22.08%', 'elapsed_time': '13m 13s', 'remaining_time': '46m 38s'}\n",
      "{'loss': 0.01324822, 'seq_acc': 0.8, 'grad_norm': 0.37730059, 'learning_rate': 9.06e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.336314, 'epoch': 1.11, 'global_step/max_steps': '1070/4800', 'percentage': '22.29%', 'elapsed_time': '13m 20s', 'remaining_time': '46m 30s'}\n",
      "{'loss': 0.01099841, 'seq_acc': 0.9, 'grad_norm': 0.03607528, 'learning_rate': 9.04e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.336349, 'epoch': 1.12, 'global_step/max_steps': '1080/4800', 'percentage': '22.50%', 'elapsed_time': '13m 27s', 'remaining_time': '46m 22s'}\n",
      "{'loss': 0.00719292, 'seq_acc': 0.9, 'grad_norm': 1.52072346, 'learning_rate': 9.02e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.335358, 'epoch': 1.14, 'global_step/max_steps': '1090/4800', 'percentage': '22.71%', 'elapsed_time': '13m 35s', 'remaining_time': '46m 17s'}\n",
      "{'loss': 0.01371808, 'seq_acc': 0.5, 'grad_norm': 0.00478603, 'learning_rate': 9e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33478, 'epoch': 1.15, 'global_step/max_steps': '1100/4800', 'percentage': '22.92%', 'elapsed_time': '13m 43s', 'remaining_time': '46m 10s'}\n",
      "{'loss': 0.02292278, 'seq_acc': 0.7, 'grad_norm': 0.21656685, 'learning_rate': 8.97e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334174, 'epoch': 1.16, 'global_step/max_steps': '1110/4800', 'percentage': '23.12%', 'elapsed_time': '13m 51s', 'remaining_time': '46m 4s'}\n",
      "{'loss': 0.01202621, 'seq_acc': 0.8, 'grad_norm': 0.2160324, 'learning_rate': 8.95e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333994, 'epoch': 1.17, 'global_step/max_steps': '1120/4800', 'percentage': '23.33%', 'elapsed_time': '13m 59s', 'remaining_time': '45m 57s'}\n",
      "{'loss': 0.01380719, 'seq_acc': 0.7, 'grad_norm': 0.04581814, 'learning_rate': 8.93e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333716, 'epoch': 1.18, 'global_step/max_steps': '1130/4800', 'percentage': '23.54%', 'elapsed_time': '14m 6s', 'remaining_time': '45m 50s'}\n",
      "{'loss': 0.01701617, 'seq_acc': 0.5, 'grad_norm': 3.47573686, 'learning_rate': 8.91e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333811, 'epoch': 1.19, 'global_step/max_steps': '1140/4800', 'percentage': '23.75%', 'elapsed_time': '14m 14s', 'remaining_time': '45m 43s'}\n",
      "{'loss': 0.01271339, 'seq_acc': 0.7, 'grad_norm': 0.1046243, 'learning_rate': 8.89e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334876, 'epoch': 1.2, 'global_step/max_steps': '1150/4800', 'percentage': '23.96%', 'elapsed_time': '14m 21s', 'remaining_time': '45m 33s'}\n",
      "{'loss': 0.01567621, 'seq_acc': 0.7, 'grad_norm': 0.51330489, 'learning_rate': 8.87e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334332, 'epoch': 1.21, 'global_step/max_steps': '1160/4800', 'percentage': '24.17%', 'elapsed_time': '14m 29s', 'remaining_time': '45m 27s'}\n",
      "{'loss': 0.01245185, 'seq_acc': 0.7, 'grad_norm': 0.03007896, 'learning_rate': 8.85e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33411, 'epoch': 1.22, 'global_step/max_steps': '1170/4800', 'percentage': '24.38%', 'elapsed_time': '14m 36s', 'remaining_time': '45m 19s'}\n",
      "{'loss': 0.00996226, 'seq_acc': 0.8, 'grad_norm': 0.02845656, 'learning_rate': 8.83e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333945, 'epoch': 1.23, 'global_step/max_steps': '1180/4800', 'percentage': '24.58%', 'elapsed_time': '14m 44s', 'remaining_time': '45m 12s'}\n",
      "{'loss': 0.00797571, 'seq_acc': 0.8, 'grad_norm': 3.75334048, 'learning_rate': 8.81e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333596, 'epoch': 1.24, 'global_step/max_steps': '1190/4800', 'percentage': '24.79%', 'elapsed_time': '14m 52s', 'remaining_time': '45m 6s'}\n",
      "{'loss': 0.01600976, 'seq_acc': 0.7, 'grad_norm': 0.00408909, 'learning_rate': 8.78e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33369, 'epoch': 1.25, 'global_step/max_steps': '1200/4800', 'percentage': '25.00%', 'elapsed_time': '14m 59s', 'remaining_time': '44m 58s'}\n",
      "{'loss': 0.01638497, 'seq_acc': 0.6, 'grad_norm': 1.42376685, 'learning_rate': 8.76e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333956, 'epoch': 1.26, 'global_step/max_steps': '1210/4800', 'percentage': '25.21%', 'elapsed_time': '15m 6s', 'remaining_time': '44m 50s'}\n",
      "{'loss': 0.00544891, 'seq_acc': 0.9, 'grad_norm': 0.09059757, 'learning_rate': 8.74e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334162, 'epoch': 1.27, 'global_step/max_steps': '1220/4800', 'percentage': '25.42%', 'elapsed_time': '15m 14s', 'remaining_time': '44m 42s'}\n",
      "{'loss': 0.00339367, 'seq_acc': 0.8, 'grad_norm': 0.00388845, 'learning_rate': 8.72e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333966, 'epoch': 1.28, 'global_step/max_steps': '1230/4800', 'percentage': '25.62%', 'elapsed_time': '15m 21s', 'remaining_time': '44m 35s'}\n",
      "{'loss': 0.01001098, 'seq_acc': 0.8, 'grad_norm': 0.00729503, 'learning_rate': 8.69e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333221, 'epoch': 1.29, 'global_step/max_steps': '1240/4800', 'percentage': '25.83%', 'elapsed_time': '15m 29s', 'remaining_time': '44m 29s'}\n",
      "{'loss': 0.01609384, 'seq_acc': 0.9, 'grad_norm': 0.97637701, 'learning_rate': 8.67e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333287, 'epoch': 1.3, 'global_step/max_steps': '1250/4800', 'percentage': '26.04%', 'elapsed_time': '15m 37s', 'remaining_time': '44m 21s'}\n",
      "{'loss': 0.00161627, 'seq_acc': 0.9, 'grad_norm': 0.04313088, 'learning_rate': 8.65e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334441, 'epoch': 1.31, 'global_step/max_steps': '1260/4800', 'percentage': '26.25%', 'elapsed_time': '15m 43s', 'remaining_time': '44m 11s'}\n",
      "{'loss': 0.02361262, 'seq_acc': 0.5, 'grad_norm': 0.41753778, 'learning_rate': 8.63e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.335222, 'epoch': 1.32, 'global_step/max_steps': '1270/4800', 'percentage': '26.46%', 'elapsed_time': '15m 50s', 'remaining_time': '44m 2s'}\n",
      "{'loss': 0.01517592, 'seq_acc': 0.8, 'grad_norm': 0.00248958, 'learning_rate': 8.6e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.335047, 'epoch': 1.33, 'global_step/max_steps': '1280/4800', 'percentage': '26.67%', 'elapsed_time': '15m 58s', 'remaining_time': '43m 55s'}\n",
      "{'loss': 0.01432458, 'seq_acc': 0.6, 'grad_norm': 0.19442914, 'learning_rate': 8.58e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.335564, 'epoch': 1.34, 'global_step/max_steps': '1290/4800', 'percentage': '26.88%', 'elapsed_time': '16m 5s', 'remaining_time': '43m 47s'}\n",
      "{'loss': 0.00819963, 'seq_acc': 0.7, 'grad_norm': 0.04912398, 'learning_rate': 8.55e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334422, 'epoch': 1.35, 'global_step/max_steps': '1300/4800', 'percentage': '27.08%', 'elapsed_time': '16m 13s', 'remaining_time': '43m 42s'}\n",
      "{'loss': 0.01164308, 'seq_acc': 0.8, 'grad_norm': 0.04862244, 'learning_rate': 8.53e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334145, 'epoch': 1.36, 'global_step/max_steps': '1310/4800', 'percentage': '27.29%', 'elapsed_time': '16m 21s', 'remaining_time': '43m 35s'}\n",
      "{'loss': 0.01493717, 'seq_acc': 0.9, 'grad_norm': 0.8239339, 'learning_rate': 8.51e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333582, 'epoch': 1.38, 'global_step/max_steps': '1320/4800', 'percentage': '27.50%', 'elapsed_time': '16m 29s', 'remaining_time': '43m 28s'}\n",
      "{'loss': 0.01074001, 'seq_acc': 0.9, 'grad_norm': 0.01764892, 'learning_rate': 8.48e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333331, 'epoch': 1.39, 'global_step/max_steps': '1330/4800', 'percentage': '27.71%', 'elapsed_time': '16m 37s', 'remaining_time': '43m 21s'}\n",
      "{'loss': 0.00556065, 'seq_acc': 0.9, 'grad_norm': 0.05355107, 'learning_rate': 8.46e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333471, 'epoch': 1.4, 'global_step/max_steps': '1340/4800', 'percentage': '27.92%', 'elapsed_time': '16m 44s', 'remaining_time': '43m 13s'}\n",
      "{'loss': 0.00842307, 'seq_acc': 0.7, 'grad_norm': 1.29064429, 'learning_rate': 8.43e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333809, 'epoch': 1.41, 'global_step/max_steps': '1350/4800', 'percentage': '28.12%', 'elapsed_time': '16m 51s', 'remaining_time': '43m 5s'}\n",
      "{'loss': 0.00897695, 'seq_acc': 0.7, 'grad_norm': 0.06965269, 'learning_rate': 8.41e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334128, 'epoch': 1.42, 'global_step/max_steps': '1360/4800', 'percentage': '28.33%', 'elapsed_time': '16m 59s', 'remaining_time': '42m 57s'}\n",
      "{'loss': 0.00838232, 'seq_acc': 0.8, 'grad_norm': 2.13991141, 'learning_rate': 8.38e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333965, 'epoch': 1.43, 'global_step/max_steps': '1370/4800', 'percentage': '28.54%', 'elapsed_time': '17m 6s', 'remaining_time': '42m 50s'}\n",
      "{'loss': 0.03767136, 'seq_acc': 0.6, 'grad_norm': 1.78262568, 'learning_rate': 8.36e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334363, 'epoch': 1.44, 'global_step/max_steps': '1380/4800', 'percentage': '28.75%', 'elapsed_time': '17m 13s', 'remaining_time': '42m 42s'}\n",
      "{'loss': 0.01805388, 'seq_acc': 0.5, 'grad_norm': 1.75663435, 'learning_rate': 8.33e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333718, 'epoch': 1.45, 'global_step/max_steps': '1390/4800', 'percentage': '28.96%', 'elapsed_time': '17m 21s', 'remaining_time': '42m 36s'}\n",
      "{'loss': 0.00075179, 'seq_acc': 1.0, 'grad_norm': 0.14039023, 'learning_rate': 8.31e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333502, 'epoch': 1.46, 'global_step/max_steps': '1400/4800', 'percentage': '29.17%', 'elapsed_time': '17m 29s', 'remaining_time': '42m 28s'}\n",
      "{'loss': 0.00682939, 'seq_acc': 0.8, 'grad_norm': 0.01691866, 'learning_rate': 8.28e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333837, 'epoch': 1.47, 'global_step/max_steps': '1410/4800', 'percentage': '29.38%', 'elapsed_time': '17m 36s', 'remaining_time': '42m 20s'}\n",
      "{'loss': 0.00979389, 'seq_acc': 0.9, 'grad_norm': 1.22115624, 'learning_rate': 8.26e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333498, 'epoch': 1.48, 'global_step/max_steps': '1420/4800', 'percentage': '29.58%', 'elapsed_time': '17m 44s', 'remaining_time': '42m 13s'}\n",
      "{'loss': 0.02906007, 'seq_acc': 0.7, 'grad_norm': 0.00258688, 'learning_rate': 8.23e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333305, 'epoch': 1.49, 'global_step/max_steps': '1430/4800', 'percentage': '29.79%', 'elapsed_time': '17m 52s', 'remaining_time': '42m 6s'}\n",
      "{'loss': 0.00751362, 'seq_acc': 0.9, 'grad_norm': 0.28036806, 'learning_rate': 8.21e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332867, 'epoch': 1.5, 'global_step/max_steps': '1440/4800', 'percentage': '30.00%', 'elapsed_time': '18m 0s', 'remaining_time': '42m 0s'}\n",
      "{'loss': 0.00704231, 'seq_acc': 0.9, 'grad_norm': 0.00627968, 'learning_rate': 8.18e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333078, 'epoch': 1.51, 'global_step/max_steps': '1450/4800', 'percentage': '30.21%', 'elapsed_time': '18m 7s', 'remaining_time': '41m 52s'}\n",
      "{'loss': 0.00056947, 'seq_acc': 1.0, 'grad_norm': 0.20979273, 'learning_rate': 8.15e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333548, 'epoch': 1.52, 'global_step/max_steps': '1460/4800', 'percentage': '30.42%', 'elapsed_time': '18m 14s', 'remaining_time': '41m 43s'}\n",
      "{'loss': 0.01073357, 'seq_acc': 0.9, 'grad_norm': 0.15032071, 'learning_rate': 8.13e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332989, 'epoch': 1.53, 'global_step/max_steps': '1470/4800', 'percentage': '30.63%', 'elapsed_time': '18m 22s', 'remaining_time': '41m 37s'}\n",
      "{'loss': 0.00711658, 'seq_acc': 0.9, 'grad_norm': 0.00303521, 'learning_rate': 8.1e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332933, 'epoch': 1.54, 'global_step/max_steps': '1480/4800', 'percentage': '30.83%', 'elapsed_time': '18m 30s', 'remaining_time': '41m 30s'}\n",
      "{'loss': 0.00766093, 'seq_acc': 0.9, 'grad_norm': 2.49549651, 'learning_rate': 8.08e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333142, 'epoch': 1.55, 'global_step/max_steps': '1490/4800', 'percentage': '31.04%', 'elapsed_time': '18m 37s', 'remaining_time': '41m 22s'}\n",
      "{'loss': 0.02063577, 'seq_acc': 0.7, 'grad_norm': 0.03791086, 'learning_rate': 8.05e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332661, 'epoch': 1.56, 'global_step/max_steps': '1500/4800', 'percentage': '31.25%', 'elapsed_time': '18m 45s', 'remaining_time': '41m 15s'}\n",
      "{'loss': 0.00990452, 'seq_acc': 0.9, 'grad_norm': 0.02337831, 'learning_rate': 8.02e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332515, 'epoch': 1.57, 'global_step/max_steps': '1510/4800', 'percentage': '31.46%', 'elapsed_time': '18m 52s', 'remaining_time': '41m 8s'}\n",
      "{'loss': 0.03354998, 'seq_acc': 0.4, 'grad_norm': 4.95585108, 'learning_rate': 8e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332744, 'epoch': 1.58, 'global_step/max_steps': '1520/4800', 'percentage': '31.67%', 'elapsed_time': '19m 0s', 'remaining_time': '41m 0s'}\n",
      "{'loss': 0.00429697, 'seq_acc': 0.9, 'grad_norm': 0.03671197, 'learning_rate': 7.97e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332373, 'epoch': 1.59, 'global_step/max_steps': '1530/4800', 'percentage': '31.87%', 'elapsed_time': '19m 8s', 'remaining_time': '40m 53s'}\n",
      "{'loss': 0.01799807, 'seq_acc': 0.7, 'grad_norm': 0.03211433, 'learning_rate': 7.94e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332591, 'epoch': 1.6, 'global_step/max_steps': '1540/4800', 'percentage': '32.08%', 'elapsed_time': '19m 15s', 'remaining_time': '40m 45s'}\n",
      "{'loss': 0.00986952, 'seq_acc': 0.8, 'grad_norm': 3.08793545, 'learning_rate': 7.91e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332465, 'epoch': 1.61, 'global_step/max_steps': '1550/4800', 'percentage': '32.29%', 'elapsed_time': '19m 22s', 'remaining_time': '40m 38s'}\n",
      "{'loss': 0.00825971, 'seq_acc': 0.8, 'grad_norm': 0.0784663, 'learning_rate': 7.89e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332793, 'epoch': 1.62, 'global_step/max_steps': '1560/4800', 'percentage': '32.50%', 'elapsed_time': '19m 30s', 'remaining_time': '40m 30s'}\n",
      "{'loss': 0.02294418, 'seq_acc': 0.5, 'grad_norm': 3.00016046, 'learning_rate': 7.86e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332973, 'epoch': 1.64, 'global_step/max_steps': '1570/4800', 'percentage': '32.71%', 'elapsed_time': '19m 37s', 'remaining_time': '40m 22s'}\n",
      "{'loss': 0.00888042, 'seq_acc': 0.9, 'grad_norm': 0.07372076, 'learning_rate': 7.83e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333563, 'epoch': 1.65, 'global_step/max_steps': '1580/4800', 'percentage': '32.92%', 'elapsed_time': '19m 44s', 'remaining_time': '40m 13s'}\n",
      "{'loss': 0.01662317, 'seq_acc': 0.6, 'grad_norm': 0.01093685, 'learning_rate': 7.8e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334002, 'epoch': 1.66, 'global_step/max_steps': '1590/4800', 'percentage': '33.12%', 'elapsed_time': '19m 51s', 'remaining_time': '40m 5s'}\n",
      "{'loss': 0.00923714, 'seq_acc': 0.8, 'grad_norm': 0.06183292, 'learning_rate': 7.78e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333479, 'epoch': 1.67, 'global_step/max_steps': '1600/4800', 'percentage': '33.33%', 'elapsed_time': '19m 59s', 'remaining_time': '39m 59s'}\n",
      "{'loss': 0.01310008, 'seq_acc': 0.6, 'grad_norm': 0.174908, 'learning_rate': 7.75e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333367, 'epoch': 1.68, 'global_step/max_steps': '1610/4800', 'percentage': '33.54%', 'elapsed_time': '20m 7s', 'remaining_time': '39m 51s'}\n",
      "{'loss': 0.01895685, 'seq_acc': 0.6, 'grad_norm': 0.02143413, 'learning_rate': 7.72e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333207, 'epoch': 1.69, 'global_step/max_steps': '1620/4800', 'percentage': '33.75%', 'elapsed_time': '20m 14s', 'remaining_time': '39m 44s'}\n",
      "{'loss': 0.0249435, 'seq_acc': 0.7, 'grad_norm': 0.0038899, 'learning_rate': 7.69e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333361, 'epoch': 1.7, 'global_step/max_steps': '1630/4800', 'percentage': '33.96%', 'elapsed_time': '20m 22s', 'remaining_time': '39m 36s'}\n",
      "{'loss': 0.01121371, 'seq_acc': 0.7, 'grad_norm': 0.91784084, 'learning_rate': 7.66e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333747, 'epoch': 1.71, 'global_step/max_steps': '1640/4800', 'percentage': '34.17%', 'elapsed_time': '20m 29s', 'remaining_time': '39m 28s'}\n",
      "{'loss': 0.0081757, 'seq_acc': 0.7, 'grad_norm': 4.70658064, 'learning_rate': 7.63e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333578, 'epoch': 1.72, 'global_step/max_steps': '1650/4800', 'percentage': '34.38%', 'elapsed_time': '20m 36s', 'remaining_time': '39m 21s'}\n",
      "{'loss': 0.00771608, 'seq_acc': 0.9, 'grad_norm': 0.95677769, 'learning_rate': 7.6e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333913, 'epoch': 1.73, 'global_step/max_steps': '1660/4800', 'percentage': '34.58%', 'elapsed_time': '20m 44s', 'remaining_time': '39m 13s'}\n",
      "{'loss': 0.05794131, 'seq_acc': 0.4, 'grad_norm': 5.0460248, 'learning_rate': 7.58e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333893, 'epoch': 1.74, 'global_step/max_steps': '1670/4800', 'percentage': '34.79%', 'elapsed_time': '20m 51s', 'remaining_time': '39m 5s'}\n",
      "{'loss': 0.00992896, 'seq_acc': 0.8, 'grad_norm': 0.02970991, 'learning_rate': 7.55e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334205, 'epoch': 1.75, 'global_step/max_steps': '1680/4800', 'percentage': '35.00%', 'elapsed_time': '20m 58s', 'remaining_time': '38m 57s'}\n",
      "{'loss': 0.01700466, 'seq_acc': 0.8, 'grad_norm': 0.09406403, 'learning_rate': 7.52e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333937, 'epoch': 1.76, 'global_step/max_steps': '1690/4800', 'percentage': '35.21%', 'elapsed_time': '21m 6s', 'remaining_time': '38m 50s'}\n",
      "{'loss': 0.00698384, 'seq_acc': 0.6, 'grad_norm': 0.81197017, 'learning_rate': 7.49e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334207, 'epoch': 1.77, 'global_step/max_steps': '1700/4800', 'percentage': '35.42%', 'elapsed_time': '21m 13s', 'remaining_time': '38m 42s'}\n",
      "{'loss': 0.01397932, 'seq_acc': 0.8, 'grad_norm': 0.66527861, 'learning_rate': 7.46e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334147, 'epoch': 1.78, 'global_step/max_steps': '1710/4800', 'percentage': '35.62%', 'elapsed_time': '21m 21s', 'remaining_time': '38m 35s'}\n",
      "{'loss': 0.0175025, 'seq_acc': 0.6, 'grad_norm': 1.013183, 'learning_rate': 7.43e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334032, 'epoch': 1.79, 'global_step/max_steps': '1720/4800', 'percentage': '35.83%', 'elapsed_time': '21m 29s', 'remaining_time': '38m 28s'}\n",
      "{'loss': 0.0111351, 'seq_acc': 0.8, 'grad_norm': 0.00967298, 'learning_rate': 7.4e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333955, 'epoch': 1.8, 'global_step/max_steps': '1730/4800', 'percentage': '36.04%', 'elapsed_time': '21m 36s', 'remaining_time': '38m 20s'}\n",
      "{'loss': 0.01286105, 'seq_acc': 0.8, 'grad_norm': 0.07365089, 'learning_rate': 7.37e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334178, 'epoch': 1.81, 'global_step/max_steps': '1740/4800', 'percentage': '36.25%', 'elapsed_time': '21m 43s', 'remaining_time': '38m 13s'}\n",
      "{'loss': 0.01556541, 'seq_acc': 0.6, 'grad_norm': 0.06797791, 'learning_rate': 7.34e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334549, 'epoch': 1.82, 'global_step/max_steps': '1750/4800', 'percentage': '36.46%', 'elapsed_time': '21m 51s', 'remaining_time': '38m 4s'}\n",
      "{'loss': 0.00912604, 'seq_acc': 0.8, 'grad_norm': 0.03343865, 'learning_rate': 7.31e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333498, 'epoch': 1.83, 'global_step/max_steps': '1760/4800', 'percentage': '36.67%', 'elapsed_time': '21m 59s', 'remaining_time': '37m 59s'}\n",
      "{'loss': 0.01695758, 'seq_acc': 0.6, 'grad_norm': 2.25467896, 'learning_rate': 7.28e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333734, 'epoch': 1.84, 'global_step/max_steps': '1770/4800', 'percentage': '36.88%', 'elapsed_time': '22m 6s', 'remaining_time': '37m 51s'}\n",
      "{'loss': 0.00222989, 'seq_acc': 1.0, 'grad_norm': 0.18023242, 'learning_rate': 7.25e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333576, 'epoch': 1.85, 'global_step/max_steps': '1780/4800', 'percentage': '37.08%', 'elapsed_time': '22m 14s', 'remaining_time': '37m 44s'}\n",
      "{'loss': 0.00114786, 'seq_acc': 1.0, 'grad_norm': 0.02582613, 'learning_rate': 7.22e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333923, 'epoch': 1.86, 'global_step/max_steps': '1790/4800', 'percentage': '37.29%', 'elapsed_time': '22m 21s', 'remaining_time': '37m 36s'}\n",
      "{'loss': 0.00512473, 'seq_acc': 0.8, 'grad_norm': 0.04075259, 'learning_rate': 7.19e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334016, 'epoch': 1.88, 'global_step/max_steps': '1800/4800', 'percentage': '37.50%', 'elapsed_time': '22m 29s', 'remaining_time': '37m 28s'}\n",
      "{'loss': 0.00154019, 'seq_acc': 0.9, 'grad_norm': 0.06563053, 'learning_rate': 7.16e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33437, 'epoch': 1.89, 'global_step/max_steps': '1810/4800', 'percentage': '37.71%', 'elapsed_time': '22m 36s', 'remaining_time': '37m 20s'}\n",
      "{'loss': 0.0109732, 'seq_acc': 0.9, 'grad_norm': 0.00508199, 'learning_rate': 7.13e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333689, 'epoch': 1.9, 'global_step/max_steps': '1820/4800', 'percentage': '37.92%', 'elapsed_time': '22m 44s', 'remaining_time': '37m 13s'}\n",
      "{'loss': 0.00435727, 'seq_acc': 0.8, 'grad_norm': 0.12393236, 'learning_rate': 7.1e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333566, 'epoch': 1.91, 'global_step/max_steps': '1830/4800', 'percentage': '38.12%', 'elapsed_time': '22m 51s', 'remaining_time': '37m 6s'}\n",
      "{'loss': 0.00063046, 'seq_acc': 1.0, 'grad_norm': 0.00207578, 'learning_rate': 7.07e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333503, 'epoch': 1.92, 'global_step/max_steps': '1840/4800', 'percentage': '38.33%', 'elapsed_time': '22m 59s', 'remaining_time': '36m 59s'}\n",
      "{'loss': 0.00616075, 'seq_acc': 0.9, 'grad_norm': 0.13853508, 'learning_rate': 7.04e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333466, 'epoch': 1.93, 'global_step/max_steps': '1850/4800', 'percentage': '38.54%', 'elapsed_time': '23m 7s', 'remaining_time': '36m 51s'}\n",
      "{'loss': 0.000368, 'seq_acc': 1.0, 'grad_norm': 0.14242347, 'learning_rate': 7.01e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333558, 'epoch': 1.94, 'global_step/max_steps': '1860/4800', 'percentage': '38.75%', 'elapsed_time': '23m 14s', 'remaining_time': '36m 44s'}\n",
      "{'loss': 0.01503523, 'seq_acc': 0.7, 'grad_norm': 0.02139017, 'learning_rate': 6.98e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333174, 'epoch': 1.95, 'global_step/max_steps': '1870/4800', 'percentage': '38.96%', 'elapsed_time': '23m 22s', 'remaining_time': '36m 37s'}\n",
      "{'loss': 0.01484895, 'seq_acc': 0.8, 'grad_norm': 0.51374078, 'learning_rate': 6.94e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333095, 'epoch': 1.96, 'global_step/max_steps': '1880/4800', 'percentage': '39.17%', 'elapsed_time': '23m 29s', 'remaining_time': '36m 29s'}\n",
      "{'loss': 0.0309199, 'seq_acc': 0.3, 'grad_norm': 0.04373766, 'learning_rate': 6.91e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333332, 'epoch': 1.97, 'global_step/max_steps': '1890/4800', 'percentage': '39.38%', 'elapsed_time': '23m 37s', 'remaining_time': '36m 22s'}\n",
      "{'loss': 0.01181955, 'seq_acc': 0.8, 'grad_norm': 1.92048323, 'learning_rate': 6.88e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333843, 'epoch': 1.98, 'global_step/max_steps': '1900/4800', 'percentage': '39.58%', 'elapsed_time': '23m 44s', 'remaining_time': '36m 13s'}\n",
      "{'loss': 0.00913765, 'seq_acc': 0.8, 'grad_norm': 0.04840055, 'learning_rate': 6.85e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333929, 'epoch': 1.99, 'global_step/max_steps': '1910/4800', 'percentage': '39.79%', 'elapsed_time': '23m 51s', 'remaining_time': '36m 6s'}\n",
      "{'loss': 0.01413209, 'seq_acc': 0.7, 'grad_norm': 0.57429951, 'learning_rate': 6.82e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333816, 'epoch': 2.0, 'global_step/max_steps': '1920/4800', 'percentage': '40.00%', 'elapsed_time': '23m 59s', 'remaining_time': '35m 58s'}\n",
      "{'loss': 0.00261367, 'seq_acc': 0.9, 'grad_norm': 0.05416379, 'learning_rate': 6.79e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334193, 'epoch': 2.01, 'global_step/max_steps': '1930/4800', 'percentage': '40.21%', 'elapsed_time': '24m 6s', 'remaining_time': '35m 50s'}\n",
      "{'loss': 0.00610632, 'seq_acc': 0.9, 'grad_norm': 0.25855011, 'learning_rate': 6.76e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33427, 'epoch': 2.02, 'global_step/max_steps': '1940/4800', 'percentage': '40.42%', 'elapsed_time': '24m 13s', 'remaining_time': '35m 43s'}\n",
      "{'loss': 0.01375914, 'seq_acc': 0.8, 'grad_norm': 0.02779734, 'learning_rate': 6.72e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334486, 'epoch': 2.03, 'global_step/max_steps': '1950/4800', 'percentage': '40.62%', 'elapsed_time': '24m 20s', 'remaining_time': '35m 35s'}\n",
      "{'loss': 0.00182185, 'seq_acc': 1.0, 'grad_norm': 0.00232332, 'learning_rate': 6.69e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334102, 'epoch': 2.04, 'global_step/max_steps': '1960/4800', 'percentage': '40.83%', 'elapsed_time': '24m 28s', 'remaining_time': '35m 28s'}\n",
      "{'loss': 0.00775951, 'seq_acc': 0.8, 'grad_norm': 0.01048602, 'learning_rate': 6.66e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33398, 'epoch': 2.05, 'global_step/max_steps': '1970/4800', 'percentage': '41.04%', 'elapsed_time': '24m 36s', 'remaining_time': '35m 21s'}\n",
      "{'loss': 0.00905905, 'seq_acc': 0.8, 'grad_norm': 0.12764129, 'learning_rate': 6.63e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334146, 'epoch': 2.06, 'global_step/max_steps': '1980/4800', 'percentage': '41.25%', 'elapsed_time': '24m 43s', 'remaining_time': '35m 13s'}\n",
      "{'loss': 0.00616766, 'seq_acc': 0.9, 'grad_norm': 1.80237591, 'learning_rate': 6.6e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333939, 'epoch': 2.07, 'global_step/max_steps': '1990/4800', 'percentage': '41.46%', 'elapsed_time': '24m 51s', 'remaining_time': '35m 6s'}\n",
      "{'loss': 0.02132885, 'seq_acc': 0.6, 'grad_norm': 0.04695696, 'learning_rate': 6.57e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333721, 'epoch': 2.08, 'global_step/max_steps': '2000/4800', 'percentage': '41.67%', 'elapsed_time': '24m 59s', 'remaining_time': '34m 58s'}\n",
      "{'loss': 0.00161256, 'seq_acc': 1.0, 'grad_norm': 0.02805734, 'learning_rate': 6.53e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333857, 'epoch': 2.09, 'global_step/max_steps': '2010/4800', 'percentage': '41.88%', 'elapsed_time': '25m 6s', 'remaining_time': '34m 51s'}\n",
      "{'loss': 0.00283799, 'seq_acc': 1.0, 'grad_norm': 0.0599721, 'learning_rate': 6.5e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334271, 'epoch': 2.1, 'global_step/max_steps': '2020/4800', 'percentage': '42.08%', 'elapsed_time': '25m 13s', 'remaining_time': '34m 43s'}\n",
      "{'loss': 0.00630893, 'seq_acc': 0.9, 'grad_norm': 0.02754976, 'learning_rate': 6.47e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334659, 'epoch': 2.11, 'global_step/max_steps': '2030/4800', 'percentage': '42.29%', 'elapsed_time': '25m 20s', 'remaining_time': '34m 35s'}\n",
      "{'loss': 0.00643964, 'seq_acc': 0.9, 'grad_norm': 0.46497318, 'learning_rate': 6.44e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33501, 'epoch': 2.12, 'global_step/max_steps': '2040/4800', 'percentage': '42.50%', 'elapsed_time': '25m 27s', 'remaining_time': '34m 26s'}\n",
      "{'loss': 0.00486048, 'seq_acc': 0.9, 'grad_norm': 0.36330289, 'learning_rate': 6.4e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.335065, 'epoch': 2.14, 'global_step/max_steps': '2050/4800', 'percentage': '42.71%', 'elapsed_time': '25m 35s', 'remaining_time': '34m 19s'}\n",
      "{'loss': 0.00314393, 'seq_acc': 0.9, 'grad_norm': 0.01011963, 'learning_rate': 6.37e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334814, 'epoch': 2.15, 'global_step/max_steps': '2060/4800', 'percentage': '42.92%', 'elapsed_time': '25m 42s', 'remaining_time': '34m 12s'}\n",
      "{'loss': 0.00656551, 'seq_acc': 0.8, 'grad_norm': 0.01215753, 'learning_rate': 6.34e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334488, 'epoch': 2.16, 'global_step/max_steps': '2070/4800', 'percentage': '43.12%', 'elapsed_time': '25m 50s', 'remaining_time': '34m 5s'}\n",
      "{'loss': 0.00163357, 'seq_acc': 0.9, 'grad_norm': 0.63695782, 'learning_rate': 6.31e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334125, 'epoch': 2.17, 'global_step/max_steps': '2080/4800', 'percentage': '43.33%', 'elapsed_time': '25m 58s', 'remaining_time': '33m 58s'}\n",
      "{'loss': 0.00102152, 'seq_acc': 1.0, 'grad_norm': 0.00112294, 'learning_rate': 6.27e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334306, 'epoch': 2.18, 'global_step/max_steps': '2090/4800', 'percentage': '43.54%', 'elapsed_time': '26m 6s', 'remaining_time': '33m 50s'}\n",
      "{'loss': 0.00163984, 'seq_acc': 0.9, 'grad_norm': 1.64576983, 'learning_rate': 6.24e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334383, 'epoch': 2.19, 'global_step/max_steps': '2100/4800', 'percentage': '43.75%', 'elapsed_time': '26m 13s', 'remaining_time': '33m 43s'}\n",
      "{'loss': 0.00028441, 'seq_acc': 1.0, 'grad_norm': 0.00393472, 'learning_rate': 6.21e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333897, 'epoch': 2.2, 'global_step/max_steps': '2110/4800', 'percentage': '43.96%', 'elapsed_time': '26m 21s', 'remaining_time': '33m 36s'}\n",
      "{'loss': 3.081e-05, 'seq_acc': 1.0, 'grad_norm': 0.00102915, 'learning_rate': 6.18e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.3344, 'epoch': 2.21, 'global_step/max_steps': '2120/4800', 'percentage': '44.17%', 'elapsed_time': '26m 28s', 'remaining_time': '33m 28s'}\n",
      "{'loss': 0.00038336, 'seq_acc': 1.0, 'grad_norm': 0.00060696, 'learning_rate': 6.14e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333906, 'epoch': 2.22, 'global_step/max_steps': '2130/4800', 'percentage': '44.38%', 'elapsed_time': '26m 36s', 'remaining_time': '33m 21s'}\n",
      "{'loss': 0.00481252, 'seq_acc': 0.9, 'grad_norm': 0.00176963, 'learning_rate': 6.11e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333743, 'epoch': 2.23, 'global_step/max_steps': '2140/4800', 'percentage': '44.58%', 'elapsed_time': '26m 44s', 'remaining_time': '33m 14s'}\n",
      "{'loss': 0.00493976, 'seq_acc': 0.8, 'grad_norm': 0.06012369, 'learning_rate': 6.08e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333489, 'epoch': 2.24, 'global_step/max_steps': '2150/4800', 'percentage': '44.79%', 'elapsed_time': '26m 52s', 'remaining_time': '33m 6s'}\n",
      "{'loss': 0.00321348, 'seq_acc': 0.9, 'grad_norm': 0.15576059, 'learning_rate': 6.04e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.3332, 'epoch': 2.25, 'global_step/max_steps': '2160/4800', 'percentage': '45.00%', 'elapsed_time': '26m 59s', 'remaining_time': '32m 59s'}\n",
      "{'loss': 0.00405484, 'seq_acc': 0.9, 'grad_norm': 0.00224704, 'learning_rate': 6.01e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332914, 'epoch': 2.26, 'global_step/max_steps': '2170/4800', 'percentage': '45.21%', 'elapsed_time': '27m 7s', 'remaining_time': '32m 52s'}\n",
      "{'loss': 0.00403219, 'seq_acc': 0.9, 'grad_norm': 0.04717705, 'learning_rate': 5.98e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333198, 'epoch': 2.27, 'global_step/max_steps': '2180/4800', 'percentage': '45.42%', 'elapsed_time': '27m 14s', 'remaining_time': '32m 44s'}\n",
      "{'loss': 0.00030455, 'seq_acc': 1.0, 'grad_norm': 0.04641817, 'learning_rate': 5.95e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33308, 'epoch': 2.28, 'global_step/max_steps': '2190/4800', 'percentage': '45.62%', 'elapsed_time': '27m 22s', 'remaining_time': '32m 37s'}\n",
      "{'loss': 0.00921366, 'seq_acc': 0.9, 'grad_norm': 0.01112221, 'learning_rate': 5.91e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33308, 'epoch': 2.29, 'global_step/max_steps': '2200/4800', 'percentage': '45.83%', 'elapsed_time': '27m 30s', 'remaining_time': '32m 30s'}\n",
      "{'loss': 0.00701433, 'seq_acc': 0.8, 'grad_norm': 0.0020798, 'learning_rate': 5.88e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332683, 'epoch': 2.3, 'global_step/max_steps': '2210/4800', 'percentage': '46.04%', 'elapsed_time': '27m 38s', 'remaining_time': '32m 23s'}\n",
      "{'loss': 0.00038457, 'seq_acc': 1.0, 'grad_norm': 0.31574038, 'learning_rate': 5.85e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332685, 'epoch': 2.31, 'global_step/max_steps': '2220/4800', 'percentage': '46.25%', 'elapsed_time': '27m 45s', 'remaining_time': '32m 15s'}\n",
      "{'loss': 0.00446897, 'seq_acc': 0.9, 'grad_norm': 0.03294382, 'learning_rate': 5.81e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332682, 'epoch': 2.32, 'global_step/max_steps': '2230/4800', 'percentage': '46.46%', 'elapsed_time': '27m 53s', 'remaining_time': '32m 8s'}\n",
      "{'loss': 0.00028491, 'seq_acc': 1.0, 'grad_norm': 0.00162205, 'learning_rate': 5.78e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332799, 'epoch': 2.33, 'global_step/max_steps': '2240/4800', 'percentage': '46.67%', 'elapsed_time': '28m 0s', 'remaining_time': '32m 0s'}\n",
      "{'loss': 0.00482023, 'seq_acc': 0.9, 'grad_norm': 0.02417833, 'learning_rate': 5.75e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333012, 'epoch': 2.34, 'global_step/max_steps': '2250/4800', 'percentage': '46.88%', 'elapsed_time': '28m 7s', 'remaining_time': '31m 52s'}\n",
      "{'loss': 0.0084147, 'seq_acc': 0.9, 'grad_norm': 0.0253248, 'learning_rate': 5.71e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332947, 'epoch': 2.35, 'global_step/max_steps': '2260/4800', 'percentage': '47.08%', 'elapsed_time': '28m 15s', 'remaining_time': '31m 45s'}\n",
      "{'loss': 0.00041714, 'seq_acc': 1.0, 'grad_norm': 0.0308494, 'learning_rate': 5.68e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332899, 'epoch': 2.36, 'global_step/max_steps': '2270/4800', 'percentage': '47.29%', 'elapsed_time': '28m 22s', 'remaining_time': '31m 37s'}\n",
      "{'loss': 0.00520574, 'seq_acc': 0.9, 'grad_norm': 0.00573807, 'learning_rate': 5.65e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333055, 'epoch': 2.38, 'global_step/max_steps': '2280/4800', 'percentage': '47.50%', 'elapsed_time': '28m 30s', 'remaining_time': '31m 30s'}\n",
      "{'loss': 0.00494169, 'seq_acc': 0.9, 'grad_norm': 1.68754482, 'learning_rate': 5.61e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333105, 'epoch': 2.39, 'global_step/max_steps': '2290/4800', 'percentage': '47.71%', 'elapsed_time': '28m 37s', 'remaining_time': '31m 22s'}\n",
      "{'loss': 0.00069971, 'seq_acc': 1.0, 'grad_norm': 0.04943395, 'learning_rate': 5.58e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332671, 'epoch': 2.4, 'global_step/max_steps': '2300/4800', 'percentage': '47.92%', 'elapsed_time': '28m 45s', 'remaining_time': '31m 15s'}\n",
      "{'loss': 0.00943581, 'seq_acc': 0.6, 'grad_norm': 0.61458004, 'learning_rate': 5.55e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332555, 'epoch': 2.41, 'global_step/max_steps': '2310/4800', 'percentage': '48.12%', 'elapsed_time': '28m 53s', 'remaining_time': '31m 8s'}\n",
      "{'loss': 0.00198043, 'seq_acc': 0.9, 'grad_norm': 0.8070119, 'learning_rate': 5.51e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332577, 'epoch': 2.42, 'global_step/max_steps': '2320/4800', 'percentage': '48.33%', 'elapsed_time': '29m 0s', 'remaining_time': '31m 0s'}\n",
      "{'loss': 0.00681503, 'seq_acc': 0.9, 'grad_norm': 0.0019548, 'learning_rate': 5.48e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332893, 'epoch': 2.43, 'global_step/max_steps': '2330/4800', 'percentage': '48.54%', 'elapsed_time': '29m 7s', 'remaining_time': '30m 52s'}\n",
      "{'loss': 0.00412024, 'seq_acc': 0.9, 'grad_norm': 0.00027001, 'learning_rate': 5.44e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333179, 'epoch': 2.44, 'global_step/max_steps': '2340/4800', 'percentage': '48.75%', 'elapsed_time': '29m 14s', 'remaining_time': '30m 44s'}\n",
      "{'loss': 0.00218365, 'seq_acc': 0.9, 'grad_norm': 0.00059245, 'learning_rate': 5.41e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333688, 'epoch': 2.45, 'global_step/max_steps': '2350/4800', 'percentage': '48.96%', 'elapsed_time': '29m 21s', 'remaining_time': '30m 36s'}\n",
      "{'loss': 0.00405627, 'seq_acc': 0.8, 'grad_norm': 2.79344368, 'learning_rate': 5.38e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333617, 'epoch': 2.46, 'global_step/max_steps': '2360/4800', 'percentage': '49.17%', 'elapsed_time': '29m 29s', 'remaining_time': '30m 29s'}\n",
      "{'loss': 0.00025811, 'seq_acc': 1.0, 'grad_norm': 0.00146348, 'learning_rate': 5.34e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333003, 'epoch': 2.47, 'global_step/max_steps': '2370/4800', 'percentage': '49.38%', 'elapsed_time': '29m 37s', 'remaining_time': '30m 22s'}\n",
      "{'loss': 0.00321787, 'seq_acc': 0.9, 'grad_norm': 0.00212968, 'learning_rate': 5.31e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333087, 'epoch': 2.48, 'global_step/max_steps': '2380/4800', 'percentage': '49.58%', 'elapsed_time': '29m 45s', 'remaining_time': '30m 15s'}\n",
      "{'loss': 0.00759884, 'seq_acc': 0.8, 'grad_norm': 10.69770813, 'learning_rate': 5.28e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332753, 'epoch': 2.49, 'global_step/max_steps': '2390/4800', 'percentage': '49.79%', 'elapsed_time': '29m 52s', 'remaining_time': '30m 7s'}\n",
      "{'loss': 0.00169763, 'seq_acc': 0.9, 'grad_norm': 0.00170689, 'learning_rate': 5.24e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332407, 'epoch': 2.5, 'global_step/max_steps': '2400/4800', 'percentage': '50.00%', 'elapsed_time': '30m 0s', 'remaining_time': '30m 0s'}\n",
      "{'loss': 0.0040786, 'seq_acc': 0.9, 'grad_norm': 0.0516006, 'learning_rate': 5.21e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332484, 'epoch': 2.51, 'global_step/max_steps': '2410/4800', 'percentage': '50.21%', 'elapsed_time': '30m 8s', 'remaining_time': '29m 53s'}\n",
      "{'loss': 0.00448643, 'seq_acc': 0.9, 'grad_norm': 0.11557295, 'learning_rate': 5.18e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332824, 'epoch': 2.52, 'global_step/max_steps': '2420/4800', 'percentage': '50.42%', 'elapsed_time': '30m 15s', 'remaining_time': '29m 45s'}\n",
      "{'loss': 0.00931913, 'seq_acc': 0.9, 'grad_norm': 0.02855335, 'learning_rate': 5.14e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332403, 'epoch': 2.53, 'global_step/max_steps': '2430/4800', 'percentage': '50.62%', 'elapsed_time': '30m 23s', 'remaining_time': '29m 38s'}\n",
      "{'loss': 0.01183069, 'seq_acc': 0.8, 'grad_norm': 1.36563218, 'learning_rate': 5.11e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332085, 'epoch': 2.54, 'global_step/max_steps': '2440/4800', 'percentage': '50.83%', 'elapsed_time': '30m 31s', 'remaining_time': '29m 31s'}\n",
      "{'loss': 0.00059884, 'seq_acc': 1.0, 'grad_norm': 0.005287, 'learning_rate': 5.07e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332227, 'epoch': 2.55, 'global_step/max_steps': '2450/4800', 'percentage': '51.04%', 'elapsed_time': '30m 38s', 'remaining_time': '29m 23s'}\n",
      "{'loss': 0.0063373, 'seq_acc': 0.8, 'grad_norm': 0.08920856, 'learning_rate': 5.04e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332319, 'epoch': 2.56, 'global_step/max_steps': '2460/4800', 'percentage': '51.25%', 'elapsed_time': '30m 46s', 'remaining_time': '29m 16s'}\n",
      "{'loss': 0.00232914, 'seq_acc': 0.9, 'grad_norm': 0.05450953, 'learning_rate': 5.01e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332222, 'epoch': 2.57, 'global_step/max_steps': '2470/4800', 'percentage': '51.46%', 'elapsed_time': '30m 53s', 'remaining_time': '29m 8s'}\n",
      "{'loss': 0.00290294, 'seq_acc': 0.9, 'grad_norm': 0.12432586, 'learning_rate': 4.97e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33234, 'epoch': 2.58, 'global_step/max_steps': '2480/4800', 'percentage': '51.67%', 'elapsed_time': '31m 1s', 'remaining_time': '29m 1s'}\n",
      "{'loss': 0.0019623, 'seq_acc': 0.9, 'grad_norm': 1.60124195, 'learning_rate': 4.94e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332781, 'epoch': 2.59, 'global_step/max_steps': '2490/4800', 'percentage': '51.88%', 'elapsed_time': '31m 7s', 'remaining_time': '28m 52s'}\n",
      "{'loss': 0.01254279, 'seq_acc': 0.8, 'grad_norm': 0.01727591, 'learning_rate': 4.91e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332787, 'epoch': 2.6, 'global_step/max_steps': '2500/4800', 'percentage': '52.08%', 'elapsed_time': '31m 15s', 'remaining_time': '28m 45s'}\n",
      "{'loss': 0.00810785, 'seq_acc': 0.8, 'grad_norm': 3.59062958, 'learning_rate': 4.87e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332864, 'epoch': 2.61, 'global_step/max_steps': '2510/4800', 'percentage': '52.29%', 'elapsed_time': '31m 22s', 'remaining_time': '28m 37s'}\n",
      "{'loss': 0.03395926, 'seq_acc': 0.6, 'grad_norm': 0.01829918, 'learning_rate': 4.84e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332909, 'epoch': 2.62, 'global_step/max_steps': '2520/4800', 'percentage': '52.50%', 'elapsed_time': '31m 30s', 'remaining_time': '28m 30s'}\n",
      "{'loss': 0.0378288, 'seq_acc': 0.6, 'grad_norm': 0.63222951, 'learning_rate': 4.8e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33267, 'epoch': 2.64, 'global_step/max_steps': '2530/4800', 'percentage': '52.71%', 'elapsed_time': '31m 38s', 'remaining_time': '28m 23s'}\n",
      "{'loss': 0.00174471, 'seq_acc': 1.0, 'grad_norm': 0.11902541, 'learning_rate': 4.77e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33248, 'epoch': 2.65, 'global_step/max_steps': '2540/4800', 'percentage': '52.92%', 'elapsed_time': '31m 45s', 'remaining_time': '28m 15s'}\n",
      "{'loss': 0.00197864, 'seq_acc': 0.9, 'grad_norm': 0.04519429, 'learning_rate': 4.74e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332574, 'epoch': 2.66, 'global_step/max_steps': '2550/4800', 'percentage': '53.12%', 'elapsed_time': '31m 53s', 'remaining_time': '28m 8s'}\n",
      "{'loss': 0.00470456, 'seq_acc': 0.8, 'grad_norm': 0.02187181, 'learning_rate': 4.7e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332927, 'epoch': 2.67, 'global_step/max_steps': '2560/4800', 'percentage': '53.33%', 'elapsed_time': '32m 0s', 'remaining_time': '28m 0s'}\n",
      "{'loss': 0.00674873, 'seq_acc': 0.8, 'grad_norm': 0.1042617, 'learning_rate': 4.67e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332632, 'epoch': 2.68, 'global_step/max_steps': '2570/4800', 'percentage': '53.54%', 'elapsed_time': '32m 8s', 'remaining_time': '27m 53s'}\n",
      "{'loss': 0.00089846, 'seq_acc': 1.0, 'grad_norm': 0.00368985, 'learning_rate': 4.64e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33253, 'epoch': 2.69, 'global_step/max_steps': '2580/4800', 'percentage': '53.75%', 'elapsed_time': '32m 15s', 'remaining_time': '27m 45s'}\n",
      "{'loss': 0.00486077, 'seq_acc': 0.8, 'grad_norm': 0.08462505, 'learning_rate': 4.6e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332505, 'epoch': 2.7, 'global_step/max_steps': '2590/4800', 'percentage': '53.96%', 'elapsed_time': '32m 23s', 'remaining_time': '27m 38s'}\n",
      "{'loss': 0.00420707, 'seq_acc': 0.9, 'grad_norm': 0.96383446, 'learning_rate': 4.57e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332503, 'epoch': 2.71, 'global_step/max_steps': '2600/4800', 'percentage': '54.17%', 'elapsed_time': '32m 30s', 'remaining_time': '27m 30s'}\n",
      "{'loss': 0.00013007, 'seq_acc': 1.0, 'grad_norm': 0.01080559, 'learning_rate': 4.54e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332466, 'epoch': 2.72, 'global_step/max_steps': '2610/4800', 'percentage': '54.37%', 'elapsed_time': '32m 38s', 'remaining_time': '27m 23s'}\n",
      "{'loss': 0.00052982, 'seq_acc': 1.0, 'grad_norm': 0.00262806, 'learning_rate': 4.5e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332817, 'epoch': 2.73, 'global_step/max_steps': '2620/4800', 'percentage': '54.58%', 'elapsed_time': '32m 45s', 'remaining_time': '27m 15s'}\n",
      "{'loss': 0.00564577, 'seq_acc': 0.9, 'grad_norm': 3.08003068, 'learning_rate': 4.47e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332766, 'epoch': 2.74, 'global_step/max_steps': '2630/4800', 'percentage': '54.79%', 'elapsed_time': '32m 53s', 'remaining_time': '27m 7s'}\n",
      "{'loss': 0.00052206, 'seq_acc': 1.0, 'grad_norm': 0.55546784, 'learning_rate': 4.43e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332876, 'epoch': 2.75, 'global_step/max_steps': '2640/4800', 'percentage': '55.00%', 'elapsed_time': '33m 0s', 'remaining_time': '27m 0s'}\n",
      "{'loss': 0.00148865, 'seq_acc': 0.9, 'grad_norm': 0.0009753, 'learning_rate': 4.4e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332676, 'epoch': 2.76, 'global_step/max_steps': '2650/4800', 'percentage': '55.21%', 'elapsed_time': '33m 8s', 'remaining_time': '26m 53s'}\n",
      "{'loss': 0.00030174, 'seq_acc': 1.0, 'grad_norm': 0.00084795, 'learning_rate': 4.37e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332764, 'epoch': 2.77, 'global_step/max_steps': '2660/4800', 'percentage': '55.42%', 'elapsed_time': '33m 15s', 'remaining_time': '26m 45s'}\n",
      "{'loss': 0.00161228, 'seq_acc': 0.9, 'grad_norm': 0.00626982, 'learning_rate': 4.33e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332769, 'epoch': 2.78, 'global_step/max_steps': '2670/4800', 'percentage': '55.62%', 'elapsed_time': '33m 23s', 'remaining_time': '26m 37s'}\n",
      "{'loss': 0.00236458, 'seq_acc': 0.8, 'grad_norm': 0.00199614, 'learning_rate': 4.3e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333118, 'epoch': 2.79, 'global_step/max_steps': '2680/4800', 'percentage': '55.83%', 'elapsed_time': '33m 30s', 'remaining_time': '26m 30s'}\n",
      "{'loss': 0.00613988, 'seq_acc': 0.8, 'grad_norm': 1.6483835, 'learning_rate': 4.27e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332766, 'epoch': 2.8, 'global_step/max_steps': '2690/4800', 'percentage': '56.04%', 'elapsed_time': '33m 38s', 'remaining_time': '26m 22s'}\n",
      "{'loss': 0.00686451, 'seq_acc': 0.9, 'grad_norm': 0.01816936, 'learning_rate': 4.23e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333182, 'epoch': 2.81, 'global_step/max_steps': '2700/4800', 'percentage': '56.25%', 'elapsed_time': '33m 44s', 'remaining_time': '26m 14s'}\n",
      "{'loss': 0.000825, 'seq_acc': 1.0, 'grad_norm': 0.01236905, 'learning_rate': 4.2e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333357, 'epoch': 2.82, 'global_step/max_steps': '2710/4800', 'percentage': '56.46%', 'elapsed_time': '33m 52s', 'remaining_time': '26m 7s'}\n",
      "{'loss': 0.00048641, 'seq_acc': 1.0, 'grad_norm': 0.03230627, 'learning_rate': 4.17e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333394, 'epoch': 2.83, 'global_step/max_steps': '2720/4800', 'percentage': '56.67%', 'elapsed_time': '33m 59s', 'remaining_time': '25m 59s'}\n",
      "{'loss': 0.00849356, 'seq_acc': 0.9, 'grad_norm': 0.00882941, 'learning_rate': 4.13e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333686, 'epoch': 2.84, 'global_step/max_steps': '2730/4800', 'percentage': '56.88%', 'elapsed_time': '34m 6s', 'remaining_time': '25m 51s'}\n",
      "{'loss': 0.00296931, 'seq_acc': 0.9, 'grad_norm': 0.02436201, 'learning_rate': 4.1e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33354, 'epoch': 2.85, 'global_step/max_steps': '2740/4800', 'percentage': '57.08%', 'elapsed_time': '34m 14s', 'remaining_time': '25m 44s'}\n",
      "{'loss': 0.00394507, 'seq_acc': 0.9, 'grad_norm': 0.0439024, 'learning_rate': 4.07e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333368, 'epoch': 2.86, 'global_step/max_steps': '2750/4800', 'percentage': '57.29%', 'elapsed_time': '34m 22s', 'remaining_time': '25m 37s'}\n",
      "{'loss': 0.00534802, 'seq_acc': 0.9, 'grad_norm': 0.02653576, 'learning_rate': 4.03e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333387, 'epoch': 2.88, 'global_step/max_steps': '2760/4800', 'percentage': '57.50%', 'elapsed_time': '34m 29s', 'remaining_time': '25m 29s'}\n",
      "{'loss': 0.00567453, 'seq_acc': 0.9, 'grad_norm': 0.0180897, 'learning_rate': 4e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333527, 'epoch': 2.89, 'global_step/max_steps': '2770/4800', 'percentage': '57.71%', 'elapsed_time': '34m 36s', 'remaining_time': '25m 22s'}\n",
      "{'loss': 0.00322624, 'seq_acc': 0.8, 'grad_norm': 0.05214748, 'learning_rate': 3.97e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333288, 'epoch': 2.9, 'global_step/max_steps': '2780/4800', 'percentage': '57.92%', 'elapsed_time': '34m 44s', 'remaining_time': '25m 14s'}\n",
      "{'loss': 0.00437959, 'seq_acc': 0.8, 'grad_norm': 0.06594276, 'learning_rate': 3.94e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333012, 'epoch': 2.91, 'global_step/max_steps': '2790/4800', 'percentage': '58.13%', 'elapsed_time': '34m 52s', 'remaining_time': '25m 7s'}\n",
      "{'loss': 0.00085846, 'seq_acc': 1.0, 'grad_norm': 0.00470087, 'learning_rate': 3.9e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333099, 'epoch': 2.92, 'global_step/max_steps': '2800/4800', 'percentage': '58.33%', 'elapsed_time': '35m 0s', 'remaining_time': '25m 0s'}\n",
      "{'loss': 0.00559185, 'seq_acc': 0.9, 'grad_norm': 1.79048026, 'learning_rate': 3.87e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.332974, 'epoch': 2.93, 'global_step/max_steps': '2810/4800', 'percentage': '58.54%', 'elapsed_time': '35m 7s', 'remaining_time': '24m 52s'}\n",
      "{'loss': 0.00616868, 'seq_acc': 0.9, 'grad_norm': 0.00870928, 'learning_rate': 3.84e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333018, 'epoch': 2.94, 'global_step/max_steps': '2820/4800', 'percentage': '58.75%', 'elapsed_time': '35m 15s', 'remaining_time': '24m 45s'}\n",
      "{'loss': 0.00024029, 'seq_acc': 1.0, 'grad_norm': 0.01093879, 'learning_rate': 3.8e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333363, 'epoch': 2.95, 'global_step/max_steps': '2830/4800', 'percentage': '58.96%', 'elapsed_time': '35m 22s', 'remaining_time': '24m 37s'}\n",
      "{'loss': 0.00041176, 'seq_acc': 1.0, 'grad_norm': 0.01539097, 'learning_rate': 3.77e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333589, 'epoch': 2.96, 'global_step/max_steps': '2840/4800', 'percentage': '59.17%', 'elapsed_time': '35m 29s', 'remaining_time': '24m 29s'}\n",
      "{'loss': 0.00117909, 'seq_acc': 0.9, 'grad_norm': 0.01116415, 'learning_rate': 3.74e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333645, 'epoch': 2.97, 'global_step/max_steps': '2850/4800', 'percentage': '59.38%', 'elapsed_time': '35m 36s', 'remaining_time': '24m 21s'}\n",
      "{'loss': 0.00351333, 'seq_acc': 0.9, 'grad_norm': 0.03896874, 'learning_rate': 3.71e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333474, 'epoch': 2.98, 'global_step/max_steps': '2860/4800', 'percentage': '59.58%', 'elapsed_time': '35m 44s', 'remaining_time': '24m 14s'}\n",
      "{'loss': 0.00687555, 'seq_acc': 0.8, 'grad_norm': 1.89818192, 'learning_rate': 3.67e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333539, 'epoch': 2.99, 'global_step/max_steps': '2870/4800', 'percentage': '59.79%', 'elapsed_time': '35m 51s', 'remaining_time': '24m 7s'}\n",
      "{'loss': 0.00028766, 'seq_acc': 1.0, 'grad_norm': 0.03707266, 'learning_rate': 3.64e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333471, 'epoch': 3.0, 'global_step/max_steps': '2880/4800', 'percentage': '60.00%', 'elapsed_time': '35m 59s', 'remaining_time': '23m 59s'}\n",
      "{'loss': 0.00019591, 'seq_acc': 1.0, 'grad_norm': 0.02011579, 'learning_rate': 3.61e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333669, 'epoch': 3.01, 'global_step/max_steps': '2890/4800', 'percentage': '60.21%', 'elapsed_time': '36m 6s', 'remaining_time': '23m 51s'}\n",
      "{'loss': 0.00033351, 'seq_acc': 1.0, 'grad_norm': 0.00106854, 'learning_rate': 3.58e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333259, 'epoch': 3.02, 'global_step/max_steps': '2900/4800', 'percentage': '60.42%', 'elapsed_time': '36m 14s', 'remaining_time': '23m 44s'}\n",
      "{'loss': 0.00176704, 'seq_acc': 0.9, 'grad_norm': 0.00052339, 'learning_rate': 3.54e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333156, 'epoch': 3.03, 'global_step/max_steps': '2910/4800', 'percentage': '60.62%', 'elapsed_time': '36m 22s', 'remaining_time': '23m 37s'}\n",
      "{'loss': 0.00016171, 'seq_acc': 1.0, 'grad_norm': 0.15483874, 'learning_rate': 3.51e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33335, 'epoch': 3.04, 'global_step/max_steps': '2920/4800', 'percentage': '60.83%', 'elapsed_time': '36m 29s', 'remaining_time': '23m 29s'}\n",
      "{'loss': 6.93e-05, 'seq_acc': 1.0, 'grad_norm': 0.01390804, 'learning_rate': 3.48e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333456, 'epoch': 3.05, 'global_step/max_steps': '2930/4800', 'percentage': '61.04%', 'elapsed_time': '36m 37s', 'remaining_time': '23m 22s'}\n",
      "{'loss': 5.172e-05, 'seq_acc': 1.0, 'grad_norm': 0.00216222, 'learning_rate': 3.45e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333752, 'epoch': 3.06, 'global_step/max_steps': '2940/4800', 'percentage': '61.25%', 'elapsed_time': '36m 44s', 'remaining_time': '23m 14s'}\n",
      "{'loss': 0.00108536, 'seq_acc': 0.9, 'grad_norm': 0.00226894, 'learning_rate': 3.42e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333806, 'epoch': 3.07, 'global_step/max_steps': '2950/4800', 'percentage': '61.46%', 'elapsed_time': '36m 51s', 'remaining_time': '23m 6s'}\n",
      "{'loss': 5.804e-05, 'seq_acc': 1.0, 'grad_norm': 0.00073748, 'learning_rate': 3.38e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334012, 'epoch': 3.08, 'global_step/max_steps': '2960/4800', 'percentage': '61.67%', 'elapsed_time': '36m 58s', 'remaining_time': '22m 59s'}\n",
      "{'loss': 0.00297085, 'seq_acc': 0.9, 'grad_norm': 0.00040433, 'learning_rate': 3.35e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334319, 'epoch': 3.09, 'global_step/max_steps': '2970/4800', 'percentage': '61.88%', 'elapsed_time': '37m 5s', 'remaining_time': '22m 51s'}\n",
      "{'loss': 0.00182101, 'seq_acc': 0.9, 'grad_norm': 0.00051643, 'learning_rate': 3.32e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334526, 'epoch': 3.1, 'global_step/max_steps': '2980/4800', 'percentage': '62.08%', 'elapsed_time': '37m 12s', 'remaining_time': '22m 43s'}\n",
      "{'loss': 0.00025713, 'seq_acc': 1.0, 'grad_norm': 0.00096431, 'learning_rate': 3.29e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33453, 'epoch': 3.11, 'global_step/max_steps': '2990/4800', 'percentage': '62.29%', 'elapsed_time': '37m 20s', 'remaining_time': '22m 36s'}\n",
      "{'loss': 0.00037503, 'seq_acc': 1.0, 'grad_norm': 0.00036011, 'learning_rate': 3.26e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334812, 'epoch': 3.12, 'global_step/max_steps': '3000/4800', 'percentage': '62.50%', 'elapsed_time': '37m 27s', 'remaining_time': '22m 28s'}\n",
      "{'loss': 0.00292524, 'seq_acc': 0.9, 'grad_norm': 0.00350328, 'learning_rate': 3.22e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334813, 'epoch': 3.14, 'global_step/max_steps': '3010/4800', 'percentage': '62.71%', 'elapsed_time': '37m 34s', 'remaining_time': '22m 20s'}\n",
      "{'loss': 0.00102882, 'seq_acc': 0.9, 'grad_norm': 0.10222854, 'learning_rate': 3.19e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334912, 'epoch': 3.15, 'global_step/max_steps': '3020/4800', 'percentage': '62.92%', 'elapsed_time': '37m 42s', 'remaining_time': '22m 13s'}\n",
      "{'loss': 0.00820399, 'seq_acc': 0.8, 'grad_norm': 0.00234958, 'learning_rate': 3.16e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334666, 'epoch': 3.16, 'global_step/max_steps': '3030/4800', 'percentage': '63.12%', 'elapsed_time': '37m 49s', 'remaining_time': '22m 6s'}\n",
      "{'loss': 0.01335425, 'seq_acc': 0.9, 'grad_norm': 0.00094605, 'learning_rate': 3.13e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334832, 'epoch': 3.17, 'global_step/max_steps': '3040/4800', 'percentage': '63.33%', 'elapsed_time': '37m 57s', 'remaining_time': '21m 58s'}\n",
      "{'loss': 0.00065681, 'seq_acc': 1.0, 'grad_norm': 0.07657261, 'learning_rate': 3.1e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33444, 'epoch': 3.18, 'global_step/max_steps': '3050/4800', 'percentage': '63.54%', 'elapsed_time': '38m 5s', 'remaining_time': '21m 51s'}\n",
      "{'loss': 0.00336975, 'seq_acc': 0.9, 'grad_norm': 0.58584672, 'learning_rate': 3.07e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33429, 'epoch': 3.19, 'global_step/max_steps': '3060/4800', 'percentage': '63.75%', 'elapsed_time': '38m 13s', 'remaining_time': '21m 43s'}\n",
      "{'loss': 0.00012074, 'seq_acc': 1.0, 'grad_norm': 0.00511658, 'learning_rate': 3.04e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334215, 'epoch': 3.2, 'global_step/max_steps': '3070/4800', 'percentage': '63.96%', 'elapsed_time': '38m 20s', 'remaining_time': '21m 36s'}\n",
      "{'loss': 5.91e-05, 'seq_acc': 1.0, 'grad_norm': 0.00077133, 'learning_rate': 3.01e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334169, 'epoch': 3.21, 'global_step/max_steps': '3080/4800', 'percentage': '64.17%', 'elapsed_time': '38m 28s', 'remaining_time': '21m 29s'}\n",
      "{'loss': 0.00028507, 'seq_acc': 1.0, 'grad_norm': 0.00125011, 'learning_rate': 2.97e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333949, 'epoch': 3.22, 'global_step/max_steps': '3090/4800', 'percentage': '64.38%', 'elapsed_time': '38m 36s', 'remaining_time': '21m 21s'}\n",
      "{'loss': 0.00020172, 'seq_acc': 1.0, 'grad_norm': 0.03839581, 'learning_rate': 2.94e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334068, 'epoch': 3.23, 'global_step/max_steps': '3100/4800', 'percentage': '64.58%', 'elapsed_time': '38m 43s', 'remaining_time': '21m 14s'}\n",
      "{'loss': 6.294e-05, 'seq_acc': 1.0, 'grad_norm': 0.00565653, 'learning_rate': 2.91e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334097, 'epoch': 3.24, 'global_step/max_steps': '3110/4800', 'percentage': '64.79%', 'elapsed_time': '38m 50s', 'remaining_time': '21m 6s'}\n",
      "{'loss': 0.00303924, 'seq_acc': 0.9, 'grad_norm': 0.03032373, 'learning_rate': 2.88e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334169, 'epoch': 3.25, 'global_step/max_steps': '3120/4800', 'percentage': '65.00%', 'elapsed_time': '38m 58s', 'remaining_time': '20m 59s'}\n",
      "{'loss': 0.00158999, 'seq_acc': 0.9, 'grad_norm': 0.00096598, 'learning_rate': 2.85e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334467, 'epoch': 3.26, 'global_step/max_steps': '3130/4800', 'percentage': '65.21%', 'elapsed_time': '39m 5s', 'remaining_time': '20m 51s'}\n",
      "{'loss': 7.792e-05, 'seq_acc': 1.0, 'grad_norm': 0.00320874, 'learning_rate': 2.82e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.3343, 'epoch': 3.27, 'global_step/max_steps': '3140/4800', 'percentage': '65.42%', 'elapsed_time': '39m 12s', 'remaining_time': '20m 43s'}\n",
      "{'loss': 0.00565057, 'seq_acc': 0.8, 'grad_norm': 0.00087187, 'learning_rate': 2.79e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334549, 'epoch': 3.28, 'global_step/max_steps': '3150/4800', 'percentage': '65.62%', 'elapsed_time': '39m 20s', 'remaining_time': '20m 36s'}\n",
      "{'loss': 0.00503114, 'seq_acc': 0.9, 'grad_norm': 0.01061581, 'learning_rate': 2.76e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334401, 'epoch': 3.29, 'global_step/max_steps': '3160/4800', 'percentage': '65.83%', 'elapsed_time': '39m 27s', 'remaining_time': '20m 28s'}\n",
      "{'loss': 0.00038943, 'seq_acc': 1.0, 'grad_norm': 0.33090889, 'learning_rate': 2.73e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334281, 'epoch': 3.3, 'global_step/max_steps': '3170/4800', 'percentage': '66.04%', 'elapsed_time': '39m 35s', 'remaining_time': '20m 21s'}\n",
      "{'loss': 0.00168291, 'seq_acc': 0.9, 'grad_norm': 0.00622041, 'learning_rate': 2.7e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334095, 'epoch': 3.31, 'global_step/max_steps': '3180/4800', 'percentage': '66.25%', 'elapsed_time': '39m 43s', 'remaining_time': '20m 14s'}\n",
      "{'loss': 4.245e-05, 'seq_acc': 1.0, 'grad_norm': 0.00819648, 'learning_rate': 2.67e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334094, 'epoch': 3.32, 'global_step/max_steps': '3190/4800', 'percentage': '66.46%', 'elapsed_time': '39m 50s', 'remaining_time': '20m 6s'}\n",
      "{'loss': 0.00015117, 'seq_acc': 1.0, 'grad_norm': 0.00851753, 'learning_rate': 2.64e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334134, 'epoch': 3.33, 'global_step/max_steps': '3200/4800', 'percentage': '66.67%', 'elapsed_time': '39m 58s', 'remaining_time': '19m 59s'}\n",
      "{'loss': 0.00011061, 'seq_acc': 1.0, 'grad_norm': 0.00048169, 'learning_rate': 2.61e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334286, 'epoch': 3.34, 'global_step/max_steps': '3210/4800', 'percentage': '66.88%', 'elapsed_time': '40m 5s', 'remaining_time': '19m 51s'}\n",
      "{'loss': 6.537e-05, 'seq_acc': 1.0, 'grad_norm': 0.00294311, 'learning_rate': 2.58e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333851, 'epoch': 3.35, 'global_step/max_steps': '3220/4800', 'percentage': '67.08%', 'elapsed_time': '40m 13s', 'remaining_time': '19m 44s'}\n",
      "{'loss': 0.00025911, 'seq_acc': 1.0, 'grad_norm': 0.00506494, 'learning_rate': 2.55e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333766, 'epoch': 3.36, 'global_step/max_steps': '3230/4800', 'percentage': '67.29%', 'elapsed_time': '40m 21s', 'remaining_time': '19m 36s'}\n",
      "{'loss': 0.00049336, 'seq_acc': 1.0, 'grad_norm': 0.00242219, 'learning_rate': 2.52e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334151, 'epoch': 3.38, 'global_step/max_steps': '3240/4800', 'percentage': '67.50%', 'elapsed_time': '40m 28s', 'remaining_time': '19m 29s'}\n",
      "{'loss': 0.01432883, 'seq_acc': 0.8, 'grad_norm': 0.00043575, 'learning_rate': 2.49e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333823, 'epoch': 3.39, 'global_step/max_steps': '3250/4800', 'percentage': '67.71%', 'elapsed_time': '40m 36s', 'remaining_time': '19m 21s'}\n",
      "{'loss': 0.00417959, 'seq_acc': 0.9, 'grad_norm': 0.0341485, 'learning_rate': 2.47e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333945, 'epoch': 3.4, 'global_step/max_steps': '3260/4800', 'percentage': '67.92%', 'elapsed_time': '40m 43s', 'remaining_time': '19m 14s'}\n",
      "{'loss': 0.00305201, 'seq_acc': 0.9, 'grad_norm': 0.69436264, 'learning_rate': 2.44e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333718, 'epoch': 3.41, 'global_step/max_steps': '3270/4800', 'percentage': '68.12%', 'elapsed_time': '40m 51s', 'remaining_time': '19m 7s'}\n",
      "{'loss': 0.0064587, 'seq_acc': 0.8, 'grad_norm': 0.00305404, 'learning_rate': 2.41e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333456, 'epoch': 3.42, 'global_step/max_steps': '3280/4800', 'percentage': '68.33%', 'elapsed_time': '40m 59s', 'remaining_time': '18m 59s'}\n",
      "{'loss': 0.00033316, 'seq_acc': 1.0, 'grad_norm': 0.00405493, 'learning_rate': 2.38e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333263, 'epoch': 3.43, 'global_step/max_steps': '3290/4800', 'percentage': '68.54%', 'elapsed_time': '41m 7s', 'remaining_time': '18m 52s'}\n",
      "{'loss': 0.00061393, 'seq_acc': 1.0, 'grad_norm': 0.01657378, 'learning_rate': 2.35e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333433, 'epoch': 3.44, 'global_step/max_steps': '3300/4800', 'percentage': '68.75%', 'elapsed_time': '41m 14s', 'remaining_time': '18m 44s'}\n",
      "{'loss': 0.00021772, 'seq_acc': 1.0, 'grad_norm': 0.00457499, 'learning_rate': 2.32e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33328, 'epoch': 3.45, 'global_step/max_steps': '3310/4800', 'percentage': '68.96%', 'elapsed_time': '41m 22s', 'remaining_time': '18m 37s'}\n",
      "{'loss': 0.00717913, 'seq_acc': 0.9, 'grad_norm': 5.06447983, 'learning_rate': 2.29e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333203, 'epoch': 3.46, 'global_step/max_steps': '3320/4800', 'percentage': '69.17%', 'elapsed_time': '41m 29s', 'remaining_time': '18m 29s'}\n",
      "{'loss': 0.00204764, 'seq_acc': 0.9, 'grad_norm': 0.00422468, 'learning_rate': 2.26e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33351, 'epoch': 3.47, 'global_step/max_steps': '3330/4800', 'percentage': '69.38%', 'elapsed_time': '41m 36s', 'remaining_time': '18m 22s'}\n",
      "{'loss': 0.00099309, 'seq_acc': 1.0, 'grad_norm': 0.00190813, 'learning_rate': 2.24e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333755, 'epoch': 3.48, 'global_step/max_steps': '3340/4800', 'percentage': '69.58%', 'elapsed_time': '41m 43s', 'remaining_time': '18m 14s'}\n",
      "{'loss': 0.0001518, 'seq_acc': 1.0, 'grad_norm': 0.01062573, 'learning_rate': 2.21e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333939, 'epoch': 3.49, 'global_step/max_steps': '3350/4800', 'percentage': '69.79%', 'elapsed_time': '41m 51s', 'remaining_time': '18m 6s'}\n",
      "{'loss': 0.00097533, 'seq_acc': 1.0, 'grad_norm': 0.00018075, 'learning_rate': 2.18e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333815, 'epoch': 3.5, 'global_step/max_steps': '3360/4800', 'percentage': '70.00%', 'elapsed_time': '41m 58s', 'remaining_time': '17m 59s'}\n",
      "{'loss': 0.0009898, 'seq_acc': 1.0, 'grad_norm': 0.00246147, 'learning_rate': 2.15e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333883, 'epoch': 3.51, 'global_step/max_steps': '3370/4800', 'percentage': '70.21%', 'elapsed_time': '42m 6s', 'remaining_time': '17m 51s'}\n",
      "{'loss': 7.903e-05, 'seq_acc': 1.0, 'grad_norm': 0.00664843, 'learning_rate': 2.12e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333826, 'epoch': 3.52, 'global_step/max_steps': '3380/4800', 'percentage': '70.42%', 'elapsed_time': '42m 13s', 'remaining_time': '17m 44s'}\n",
      "{'loss': 0.00033638, 'seq_acc': 1.0, 'grad_norm': 0.00055968, 'learning_rate': 2.1e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333631, 'epoch': 3.53, 'global_step/max_steps': '3390/4800', 'percentage': '70.62%', 'elapsed_time': '42m 21s', 'remaining_time': '17m 37s'}\n",
      "{'loss': 0.00272601, 'seq_acc': 0.9, 'grad_norm': 0.0003192, 'learning_rate': 2.07e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333995, 'epoch': 3.54, 'global_step/max_steps': '3400/4800', 'percentage': '70.83%', 'elapsed_time': '42m 28s', 'remaining_time': '17m 29s'}\n",
      "{'loss': 2.783e-05, 'seq_acc': 1.0, 'grad_norm': 0.00150078, 'learning_rate': 2.04e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334171, 'epoch': 3.55, 'global_step/max_steps': '3410/4800', 'percentage': '71.04%', 'elapsed_time': '42m 35s', 'remaining_time': '17m 21s'}\n",
      "{'loss': 0.00373962, 'seq_acc': 0.9, 'grad_norm': 0.00079335, 'learning_rate': 2.02e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334164, 'epoch': 3.56, 'global_step/max_steps': '3420/4800', 'percentage': '71.25%', 'elapsed_time': '42m 43s', 'remaining_time': '17m 14s'}\n",
      "{'loss': 3.125e-05, 'seq_acc': 1.0, 'grad_norm': 0.00089911, 'learning_rate': 1.99e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334351, 'epoch': 3.57, 'global_step/max_steps': '3430/4800', 'percentage': '71.46%', 'elapsed_time': '42m 50s', 'remaining_time': '17m 6s'}\n",
      "{'loss': 8.505e-05, 'seq_acc': 1.0, 'grad_norm': 0.00019785, 'learning_rate': 1.96e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334328, 'epoch': 3.58, 'global_step/max_steps': '3440/4800', 'percentage': '71.67%', 'elapsed_time': '42m 57s', 'remaining_time': '16m 59s'}\n",
      "{'loss': 0.00171056, 'seq_acc': 0.9, 'grad_norm': 0.00295737, 'learning_rate': 1.93e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334444, 'epoch': 3.59, 'global_step/max_steps': '3450/4800', 'percentage': '71.88%', 'elapsed_time': '43m 5s', 'remaining_time': '16m 51s'}\n",
      "{'loss': 3.638e-05, 'seq_acc': 1.0, 'grad_norm': 0.00052705, 'learning_rate': 1.91e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334514, 'epoch': 3.6, 'global_step/max_steps': '3460/4800', 'percentage': '72.08%', 'elapsed_time': '43m 12s', 'remaining_time': '16m 43s'}\n",
      "{'loss': 0.00014187, 'seq_acc': 1.0, 'grad_norm': 0.00100375, 'learning_rate': 1.88e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334709, 'epoch': 3.61, 'global_step/max_steps': '3470/4800', 'percentage': '72.29%', 'elapsed_time': '43m 19s', 'remaining_time': '16m 36s'}\n",
      "{'loss': 1.695e-05, 'seq_acc': 1.0, 'grad_norm': 0.00031402, 'learning_rate': 1.86e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334838, 'epoch': 3.62, 'global_step/max_steps': '3480/4800', 'percentage': '72.50%', 'elapsed_time': '43m 26s', 'remaining_time': '16m 28s'}\n",
      "{'loss': 0.00779234, 'seq_acc': 0.9, 'grad_norm': 0.05954657, 'learning_rate': 1.83e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334962, 'epoch': 3.64, 'global_step/max_steps': '3490/4800', 'percentage': '72.71%', 'elapsed_time': '43m 34s', 'remaining_time': '16m 21s'}\n",
      "{'loss': 0.00014731, 'seq_acc': 1.0, 'grad_norm': 0.05876929, 'learning_rate': 1.8e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334476, 'epoch': 3.65, 'global_step/max_steps': '3500/4800', 'percentage': '72.92%', 'elapsed_time': '43m 42s', 'remaining_time': '16m 14s'}\n",
      "{'loss': 0.00370306, 'seq_acc': 0.9, 'grad_norm': 0.01004128, 'learning_rate': 1.78e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334254, 'epoch': 3.66, 'global_step/max_steps': '3510/4800', 'percentage': '73.12%', 'elapsed_time': '43m 50s', 'remaining_time': '16m 6s'}\n",
      "{'loss': 0.00027909, 'seq_acc': 1.0, 'grad_norm': 0.03617361, 'learning_rate': 1.75e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334134, 'epoch': 3.67, 'global_step/max_steps': '3520/4800', 'percentage': '73.33%', 'elapsed_time': '43m 58s', 'remaining_time': '15m 59s'}\n",
      "{'loss': 0.00014222, 'seq_acc': 1.0, 'grad_norm': 0.06036845, 'learning_rate': 1.73e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333746, 'epoch': 3.68, 'global_step/max_steps': '3530/4800', 'percentage': '73.54%', 'elapsed_time': '44m 6s', 'remaining_time': '15m 52s'}\n",
      "{'loss': 4.672e-05, 'seq_acc': 1.0, 'grad_norm': 0.00018493, 'learning_rate': 1.7e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333726, 'epoch': 3.69, 'global_step/max_steps': '3540/4800', 'percentage': '73.75%', 'elapsed_time': '44m 13s', 'remaining_time': '15m 44s'}\n",
      "{'loss': 0.00128313, 'seq_acc': 0.9, 'grad_norm': 0.00604747, 'learning_rate': 1.68e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333803, 'epoch': 3.7, 'global_step/max_steps': '3550/4800', 'percentage': '73.96%', 'elapsed_time': '44m 21s', 'remaining_time': '15m 37s'}\n",
      "{'loss': 0.00013688, 'seq_acc': 1.0, 'grad_norm': 0.00040897, 'learning_rate': 1.65e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333697, 'epoch': 3.71, 'global_step/max_steps': '3560/4800', 'percentage': '74.17%', 'elapsed_time': '44m 28s', 'remaining_time': '15m 29s'}\n",
      "{'loss': 6.204e-05, 'seq_acc': 1.0, 'grad_norm': 5.307e-05, 'learning_rate': 1.63e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333699, 'epoch': 3.72, 'global_step/max_steps': '3570/4800', 'percentage': '74.38%', 'elapsed_time': '44m 36s', 'remaining_time': '15m 22s'}\n",
      "{'loss': 0.00237977, 'seq_acc': 0.9, 'grad_norm': 0.0009229, 'learning_rate': 1.6e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333849, 'epoch': 3.73, 'global_step/max_steps': '3580/4800', 'percentage': '74.58%', 'elapsed_time': '44m 43s', 'remaining_time': '15m 14s'}\n",
      "{'loss': 1.788e-05, 'seq_acc': 1.0, 'grad_norm': 0.00384901, 'learning_rate': 1.58e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334039, 'epoch': 3.74, 'global_step/max_steps': '3590/4800', 'percentage': '74.79%', 'elapsed_time': '44m 50s', 'remaining_time': '15m 6s'}\n",
      "{'loss': 0.00019902, 'seq_acc': 1.0, 'grad_norm': 0.01643828, 'learning_rate': 1.55e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334014, 'epoch': 3.75, 'global_step/max_steps': '3600/4800', 'percentage': '75.00%', 'elapsed_time': '44m 58s', 'remaining_time': '14m 59s'}\n",
      "{'loss': 0.00106801, 'seq_acc': 1.0, 'grad_norm': 0.062174, 'learning_rate': 1.53e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.3341, 'epoch': 3.76, 'global_step/max_steps': '3610/4800', 'percentage': '75.21%', 'elapsed_time': '45m 5s', 'remaining_time': '14m 51s'}\n",
      "{'loss': 0.0007383, 'seq_acc': 1.0, 'grad_norm': 0.00440442, 'learning_rate': 1.5e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334273, 'epoch': 3.77, 'global_step/max_steps': '3620/4800', 'percentage': '75.42%', 'elapsed_time': '45m 12s', 'remaining_time': '14m 44s'}\n",
      "{'loss': 0.00010322, 'seq_acc': 1.0, 'grad_norm': 0.00492875, 'learning_rate': 1.48e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334148, 'epoch': 3.78, 'global_step/max_steps': '3630/4800', 'percentage': '75.62%', 'elapsed_time': '45m 20s', 'remaining_time': '14m 36s'}\n",
      "{'loss': 0.00016659, 'seq_acc': 1.0, 'grad_norm': 0.00192545, 'learning_rate': 1.45e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334245, 'epoch': 3.79, 'global_step/max_steps': '3640/4800', 'percentage': '75.83%', 'elapsed_time': '45m 27s', 'remaining_time': '14m 29s'}\n",
      "{'loss': 0.00012979, 'seq_acc': 1.0, 'grad_norm': 0.00169256, 'learning_rate': 1.43e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334434, 'epoch': 3.8, 'global_step/max_steps': '3650/4800', 'percentage': '76.04%', 'elapsed_time': '45m 34s', 'remaining_time': '14m 21s'}\n",
      "{'loss': 0.00214858, 'seq_acc': 0.9, 'grad_norm': 0.00522594, 'learning_rate': 1.41e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334429, 'epoch': 3.81, 'global_step/max_steps': '3660/4800', 'percentage': '76.25%', 'elapsed_time': '45m 42s', 'remaining_time': '14m 14s'}\n",
      "{'loss': 0.00012558, 'seq_acc': 1.0, 'grad_norm': 0.00315873, 'learning_rate': 1.38e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334299, 'epoch': 3.82, 'global_step/max_steps': '3670/4800', 'percentage': '76.46%', 'elapsed_time': '45m 50s', 'remaining_time': '14m 6s'}\n",
      "{'loss': 0.00031727, 'seq_acc': 1.0, 'grad_norm': 0.35029143, 'learning_rate': 1.36e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334213, 'epoch': 3.83, 'global_step/max_steps': '3680/4800', 'percentage': '76.67%', 'elapsed_time': '45m 57s', 'remaining_time': '13m 59s'}\n",
      "{'loss': 5.704e-05, 'seq_acc': 1.0, 'grad_norm': 0.0018367, 'learning_rate': 1.34e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334271, 'epoch': 3.84, 'global_step/max_steps': '3690/4800', 'percentage': '76.88%', 'elapsed_time': '46m 5s', 'remaining_time': '13m 51s'}\n",
      "{'loss': 0.00064933, 'seq_acc': 1.0, 'grad_norm': 0.9196133, 'learning_rate': 1.32e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334144, 'epoch': 3.85, 'global_step/max_steps': '3700/4800', 'percentage': '77.08%', 'elapsed_time': '46m 13s', 'remaining_time': '13m 44s'}\n",
      "{'loss': 0.00058969, 'seq_acc': 1.0, 'grad_norm': 0.01570489, 'learning_rate': 1.29e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334261, 'epoch': 3.86, 'global_step/max_steps': '3710/4800', 'percentage': '77.29%', 'elapsed_time': '46m 20s', 'remaining_time': '13m 36s'}\n",
      "{'loss': 0.00015364, 'seq_acc': 1.0, 'grad_norm': 0.00033485, 'learning_rate': 1.27e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334367, 'epoch': 3.88, 'global_step/max_steps': '3720/4800', 'percentage': '77.50%', 'elapsed_time': '46m 27s', 'remaining_time': '13m 29s'}\n",
      "{'loss': 0.00026977, 'seq_acc': 1.0, 'grad_norm': 0.00041272, 'learning_rate': 1.25e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334393, 'epoch': 3.89, 'global_step/max_steps': '3730/4800', 'percentage': '77.71%', 'elapsed_time': '46m 34s', 'remaining_time': '13m 21s'}\n",
      "{'loss': 0.00016318, 'seq_acc': 1.0, 'grad_norm': 0.01203156, 'learning_rate': 1.23e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334515, 'epoch': 3.9, 'global_step/max_steps': '3740/4800', 'percentage': '77.92%', 'elapsed_time': '46m 42s', 'remaining_time': '13m 14s'}\n",
      "{'loss': 0.00127834, 'seq_acc': 0.9, 'grad_norm': 0.0003567, 'learning_rate': 1.2e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334504, 'epoch': 3.91, 'global_step/max_steps': '3750/4800', 'percentage': '78.12%', 'elapsed_time': '46m 49s', 'remaining_time': '13m 6s'}\n",
      "{'loss': 0.00015345, 'seq_acc': 1.0, 'grad_norm': 0.00431026, 'learning_rate': 1.18e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334297, 'epoch': 3.92, 'global_step/max_steps': '3760/4800', 'percentage': '78.33%', 'elapsed_time': '46m 57s', 'remaining_time': '12m 59s'}\n",
      "{'loss': 0.00144695, 'seq_acc': 1.0, 'grad_norm': 0.22155623, 'learning_rate': 1.16e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334284, 'epoch': 3.93, 'global_step/max_steps': '3770/4800', 'percentage': '78.54%', 'elapsed_time': '47m 5s', 'remaining_time': '12m 51s'}\n",
      "{'loss': 0.00202208, 'seq_acc': 0.9, 'grad_norm': 2.08675408, 'learning_rate': 1.14e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334356, 'epoch': 3.94, 'global_step/max_steps': '3780/4800', 'percentage': '78.75%', 'elapsed_time': '47m 12s', 'remaining_time': '12m 44s'}\n",
      "{'loss': 0.00215402, 'seq_acc': 0.9, 'grad_norm': 0.06731025, 'learning_rate': 1.12e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334044, 'epoch': 3.95, 'global_step/max_steps': '3790/4800', 'percentage': '78.96%', 'elapsed_time': '47m 20s', 'remaining_time': '12m 37s'}\n",
      "{'loss': 0.00013318, 'seq_acc': 1.0, 'grad_norm': 0.0285471, 'learning_rate': 1.1e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333929, 'epoch': 3.96, 'global_step/max_steps': '3800/4800', 'percentage': '79.17%', 'elapsed_time': '47m 28s', 'remaining_time': '12m 29s'}\n",
      "{'loss': 0.00038419, 'seq_acc': 1.0, 'grad_norm': 0.00285288, 'learning_rate': 1.07e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334229, 'epoch': 3.97, 'global_step/max_steps': '3810/4800', 'percentage': '79.38%', 'elapsed_time': '47m 35s', 'remaining_time': '12m 21s'}\n",
      "{'loss': 0.00017328, 'seq_acc': 1.0, 'grad_norm': 0.03343513, 'learning_rate': 1.05e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333914, 'epoch': 3.98, 'global_step/max_steps': '3820/4800', 'percentage': '79.58%', 'elapsed_time': '47m 43s', 'remaining_time': '12m 14s'}\n",
      "{'loss': 0.00062958, 'seq_acc': 1.0, 'grad_norm': 0.22896153, 'learning_rate': 1.03e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333766, 'epoch': 3.99, 'global_step/max_steps': '3830/4800', 'percentage': '79.79%', 'elapsed_time': '47m 51s', 'remaining_time': '12m 7s'}\n",
      "{'loss': 0.00186886, 'seq_acc': 0.9, 'grad_norm': 0.01646626, 'learning_rate': 1.01e-06, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333716, 'epoch': 4.0, 'global_step/max_steps': '3840/4800', 'percentage': '80.00%', 'elapsed_time': '47m 58s', 'remaining_time': '11m 59s'}\n",
      "{'loss': 5.865e-05, 'seq_acc': 1.0, 'grad_norm': 0.01060395, 'learning_rate': 9.9e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333749, 'epoch': 4.01, 'global_step/max_steps': '3850/4800', 'percentage': '80.21%', 'elapsed_time': '48m 6s', 'remaining_time': '11m 52s'}\n",
      "{'loss': 0.00010335, 'seq_acc': 1.0, 'grad_norm': 0.02625793, 'learning_rate': 9.7e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333877, 'epoch': 4.02, 'global_step/max_steps': '3860/4800', 'percentage': '80.42%', 'elapsed_time': '48m 13s', 'remaining_time': '11m 44s'}\n",
      "{'loss': 0.00014268, 'seq_acc': 1.0, 'grad_norm': 0.00184138, 'learning_rate': 9.5e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333923, 'epoch': 4.03, 'global_step/max_steps': '3870/4800', 'percentage': '80.62%', 'elapsed_time': '48m 20s', 'remaining_time': '11m 37s'}\n",
      "{'loss': 3.624e-05, 'seq_acc': 1.0, 'grad_norm': 0.0001704, 'learning_rate': 9.3e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333822, 'epoch': 4.04, 'global_step/max_steps': '3880/4800', 'percentage': '80.83%', 'elapsed_time': '48m 28s', 'remaining_time': '11m 29s'}\n",
      "{'loss': 0.00018607, 'seq_acc': 1.0, 'grad_norm': 0.00294026, 'learning_rate': 9.1e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333841, 'epoch': 4.05, 'global_step/max_steps': '3890/4800', 'percentage': '81.04%', 'elapsed_time': '48m 36s', 'remaining_time': '11m 22s'}\n",
      "{'loss': 0.00021419, 'seq_acc': 1.0, 'grad_norm': 0.00523672, 'learning_rate': 8.9e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333824, 'epoch': 4.06, 'global_step/max_steps': '3900/4800', 'percentage': '81.25%', 'elapsed_time': '48m 43s', 'remaining_time': '11m 14s'}\n",
      "{'loss': 6.797e-05, 'seq_acc': 1.0, 'grad_norm': 0.00130498, 'learning_rate': 8.7e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333774, 'epoch': 4.07, 'global_step/max_steps': '3910/4800', 'percentage': '81.46%', 'elapsed_time': '48m 51s', 'remaining_time': '11m 7s'}\n",
      "{'loss': 1.012e-05, 'seq_acc': 1.0, 'grad_norm': 0.00022783, 'learning_rate': 8.6e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33386, 'epoch': 4.08, 'global_step/max_steps': '3920/4800', 'percentage': '81.67%', 'elapsed_time': '48m 58s', 'remaining_time': '10m 59s'}\n",
      "{'loss': 2.58e-05, 'seq_acc': 1.0, 'grad_norm': 0.00066297, 'learning_rate': 8.4e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334011, 'epoch': 4.09, 'global_step/max_steps': '3930/4800', 'percentage': '81.88%', 'elapsed_time': '49m 5s', 'remaining_time': '10m 52s'}\n",
      "{'loss': 0.00053431, 'seq_acc': 1.0, 'grad_norm': 0.00048963, 'learning_rate': 8.2e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334148, 'epoch': 4.1, 'global_step/max_steps': '3940/4800', 'percentage': '82.08%', 'elapsed_time': '49m 12s', 'remaining_time': '10m 44s'}\n",
      "{'loss': 2.616e-05, 'seq_acc': 1.0, 'grad_norm': 0.00132282, 'learning_rate': 8e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334055, 'epoch': 4.11, 'global_step/max_steps': '3950/4800', 'percentage': '82.29%', 'elapsed_time': '49m 20s', 'remaining_time': '10m 37s'}\n",
      "{'loss': 0.00013004, 'seq_acc': 1.0, 'grad_norm': 0.02675999, 'learning_rate': 7.8e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333952, 'epoch': 4.12, 'global_step/max_steps': '3960/4800', 'percentage': '82.50%', 'elapsed_time': '49m 28s', 'remaining_time': '10m 29s'}\n",
      "{'loss': 0.00071857, 'seq_acc': 0.9, 'grad_norm': 0.00922139, 'learning_rate': 7.6e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333783, 'epoch': 4.14, 'global_step/max_steps': '3970/4800', 'percentage': '82.71%', 'elapsed_time': '49m 36s', 'remaining_time': '10m 22s'}\n",
      "{'loss': 4.709e-05, 'seq_acc': 1.0, 'grad_norm': 0.00016039, 'learning_rate': 7.5e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333938, 'epoch': 4.15, 'global_step/max_steps': '3980/4800', 'percentage': '82.92%', 'elapsed_time': '49m 43s', 'remaining_time': '10m 14s'}\n",
      "{'loss': 0.00061879, 'seq_acc': 1.0, 'grad_norm': 0.01633936, 'learning_rate': 7.3e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334003, 'epoch': 4.16, 'global_step/max_steps': '3990/4800', 'percentage': '83.12%', 'elapsed_time': '49m 50s', 'remaining_time': '10m 7s'}\n",
      "{'loss': 2.402e-05, 'seq_acc': 1.0, 'grad_norm': 0.00287622, 'learning_rate': 7.1e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334165, 'epoch': 4.17, 'global_step/max_steps': '4000/4800', 'percentage': '83.33%', 'elapsed_time': '49m 57s', 'remaining_time': '9m 59s'}\n",
      "{'loss': 7.692e-05, 'seq_acc': 1.0, 'grad_norm': 0.00056395, 'learning_rate': 6.9e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334282, 'epoch': 4.18, 'global_step/max_steps': '4010/4800', 'percentage': '83.54%', 'elapsed_time': '50m 5s', 'remaining_time': '9m 52s'}\n",
      "{'loss': 2.658e-05, 'seq_acc': 1.0, 'grad_norm': 0.00020263, 'learning_rate': 6.8e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33432, 'epoch': 4.19, 'global_step/max_steps': '4020/4800', 'percentage': '83.75%', 'elapsed_time': '50m 12s', 'remaining_time': '9m 44s'}\n",
      "{'loss': 6.901e-05, 'seq_acc': 1.0, 'grad_norm': 0.00323114, 'learning_rate': 6.6e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334457, 'epoch': 4.2, 'global_step/max_steps': '4030/4800', 'percentage': '83.96%', 'elapsed_time': '50m 19s', 'remaining_time': '9m 36s'}\n",
      "{'loss': 3.552e-05, 'seq_acc': 1.0, 'grad_norm': 0.00060581, 'learning_rate': 6.4e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334439, 'epoch': 4.21, 'global_step/max_steps': '4040/4800', 'percentage': '84.17%', 'elapsed_time': '50m 27s', 'remaining_time': '9m 29s'}\n",
      "{'loss': 4.045e-05, 'seq_acc': 1.0, 'grad_norm': 0.01171634, 'learning_rate': 6.3e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334572, 'epoch': 4.22, 'global_step/max_steps': '4050/4800', 'percentage': '84.38%', 'elapsed_time': '50m 34s', 'remaining_time': '9m 21s'}\n",
      "{'loss': 2.401e-05, 'seq_acc': 1.0, 'grad_norm': 0.00164682, 'learning_rate': 6.1e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33468, 'epoch': 4.23, 'global_step/max_steps': '4060/4800', 'percentage': '84.58%', 'elapsed_time': '50m 41s', 'remaining_time': '9m 14s'}\n",
      "{'loss': 0.00010975, 'seq_acc': 1.0, 'grad_norm': 0.00152896, 'learning_rate': 5.9e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334416, 'epoch': 4.24, 'global_step/max_steps': '4070/4800', 'percentage': '84.79%', 'elapsed_time': '50m 49s', 'remaining_time': '9m 7s'}\n",
      "{'loss': 3.663e-05, 'seq_acc': 1.0, 'grad_norm': 0.00058543, 'learning_rate': 5.8e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334603, 'epoch': 4.25, 'global_step/max_steps': '4080/4800', 'percentage': '85.00%', 'elapsed_time': '50m 56s', 'remaining_time': '8m 59s'}\n",
      "{'loss': 2.628e-05, 'seq_acc': 1.0, 'grad_norm': 0.00401669, 'learning_rate': 5.6e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334624, 'epoch': 4.26, 'global_step/max_steps': '4090/4800', 'percentage': '85.21%', 'elapsed_time': '51m 4s', 'remaining_time': '8m 51s'}\n",
      "{'loss': 0.00077596, 'seq_acc': 1.0, 'grad_norm': 0.82919604, 'learning_rate': 5.5e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334695, 'epoch': 4.27, 'global_step/max_steps': '4100/4800', 'percentage': '85.42%', 'elapsed_time': '51m 11s', 'remaining_time': '8m 44s'}\n",
      "{'loss': 5.366e-05, 'seq_acc': 1.0, 'grad_norm': 9.945e-05, 'learning_rate': 5.3e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334694, 'epoch': 4.28, 'global_step/max_steps': '4110/4800', 'percentage': '85.62%', 'elapsed_time': '51m 19s', 'remaining_time': '8m 36s'}\n",
      "{'loss': 0.00019476, 'seq_acc': 1.0, 'grad_norm': 0.00666561, 'learning_rate': 5.2e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33459, 'epoch': 4.29, 'global_step/max_steps': '4120/4800', 'percentage': '85.83%', 'elapsed_time': '51m 26s', 'remaining_time': '8m 29s'}\n",
      "{'loss': 0.00140216, 'seq_acc': 0.9, 'grad_norm': 0.07575492, 'learning_rate': 5e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334512, 'epoch': 4.3, 'global_step/max_steps': '4130/4800', 'percentage': '86.04%', 'elapsed_time': '51m 34s', 'remaining_time': '8m 22s'}\n",
      "{'loss': 0.00206518, 'seq_acc': 0.9, 'grad_norm': 8.51062202, 'learning_rate': 4.9e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334288, 'epoch': 4.31, 'global_step/max_steps': '4140/4800', 'percentage': '86.25%', 'elapsed_time': '51m 42s', 'remaining_time': '8m 14s'}\n",
      "{'loss': 1.432e-05, 'seq_acc': 1.0, 'grad_norm': 7.972e-05, 'learning_rate': 4.7e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334214, 'epoch': 4.32, 'global_step/max_steps': '4150/4800', 'percentage': '86.46%', 'elapsed_time': '51m 50s', 'remaining_time': '8m 7s'}\n",
      "{'loss': 3.826e-05, 'seq_acc': 1.0, 'grad_norm': 0.00018444, 'learning_rate': 4.6e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334331, 'epoch': 4.33, 'global_step/max_steps': '4160/4800', 'percentage': '86.67%', 'elapsed_time': '51m 57s', 'remaining_time': '7m 59s'}\n",
      "{'loss': 2.278e-05, 'seq_acc': 1.0, 'grad_norm': 0.0020765, 'learning_rate': 4.4e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334134, 'epoch': 4.34, 'global_step/max_steps': '4170/4800', 'percentage': '86.88%', 'elapsed_time': '52m 5s', 'remaining_time': '7m 52s'}\n",
      "{'loss': 6.603e-05, 'seq_acc': 1.0, 'grad_norm': 0.0008202, 'learning_rate': 4.3e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334108, 'epoch': 4.35, 'global_step/max_steps': '4180/4800', 'percentage': '87.08%', 'elapsed_time': '52m 12s', 'remaining_time': '7m 44s'}\n",
      "{'loss': 3.22e-05, 'seq_acc': 1.0, 'grad_norm': 0.0118585, 'learning_rate': 4.2e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334051, 'epoch': 4.36, 'global_step/max_steps': '4190/4800', 'percentage': '87.29%', 'elapsed_time': '52m 20s', 'remaining_time': '7m 37s'}\n",
      "{'loss': 1.885e-05, 'seq_acc': 1.0, 'grad_norm': 0.00049455, 'learning_rate': 4e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334187, 'epoch': 4.38, 'global_step/max_steps': '4200/4800', 'percentage': '87.50%', 'elapsed_time': '52m 27s', 'remaining_time': '7m 29s'}\n",
      "{'loss': 7.282e-05, 'seq_acc': 1.0, 'grad_norm': 0.00148005, 'learning_rate': 3.9e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334149, 'epoch': 4.39, 'global_step/max_steps': '4210/4800', 'percentage': '87.71%', 'elapsed_time': '52m 35s', 'remaining_time': '7m 22s'}\n",
      "{'loss': 1.388e-05, 'seq_acc': 1.0, 'grad_norm': 0.0005268, 'learning_rate': 3.8e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334132, 'epoch': 4.4, 'global_step/max_steps': '4220/4800', 'percentage': '87.92%', 'elapsed_time': '52m 42s', 'remaining_time': '7m 14s'}\n",
      "{'loss': 1.809e-05, 'seq_acc': 1.0, 'grad_norm': 0.00089771, 'learning_rate': 3.7e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334012, 'epoch': 4.41, 'global_step/max_steps': '4230/4800', 'percentage': '88.12%', 'elapsed_time': '52m 50s', 'remaining_time': '7m 7s'}\n",
      "{'loss': 5.301e-05, 'seq_acc': 1.0, 'grad_norm': 8.958e-05, 'learning_rate': 3.5e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333497, 'epoch': 4.42, 'global_step/max_steps': '4240/4800', 'percentage': '88.33%', 'elapsed_time': '52m 59s', 'remaining_time': '6m 59s'}\n",
      "{'loss': 2.237e-05, 'seq_acc': 1.0, 'grad_norm': 0.00554696, 'learning_rate': 3.4e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333752, 'epoch': 4.43, 'global_step/max_steps': '4250/4800', 'percentage': '88.54%', 'elapsed_time': '53m 6s', 'remaining_time': '6m 52s'}\n",
      "{'loss': 3.343e-05, 'seq_acc': 1.0, 'grad_norm': 8.355e-05, 'learning_rate': 3.3e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333868, 'epoch': 4.44, 'global_step/max_steps': '4260/4800', 'percentage': '88.75%', 'elapsed_time': '53m 13s', 'remaining_time': '6m 44s'}\n",
      "{'loss': 1.796e-05, 'seq_acc': 1.0, 'grad_norm': 0.00204003, 'learning_rate': 3.2e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333988, 'epoch': 4.45, 'global_step/max_steps': '4270/4800', 'percentage': '88.96%', 'elapsed_time': '53m 20s', 'remaining_time': '6m 37s'}\n",
      "{'loss': 2.138e-05, 'seq_acc': 1.0, 'grad_norm': 0.00016222, 'learning_rate': 3e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333867, 'epoch': 4.46, 'global_step/max_steps': '4280/4800', 'percentage': '89.17%', 'elapsed_time': '53m 28s', 'remaining_time': '6m 29s'}\n",
      "{'loss': 1.957e-05, 'seq_acc': 1.0, 'grad_norm': 5.912e-05, 'learning_rate': 2.9e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334137, 'epoch': 4.47, 'global_step/max_steps': '4290/4800', 'percentage': '89.38%', 'elapsed_time': '53m 35s', 'remaining_time': '6m 22s'}\n",
      "{'loss': 9.759e-05, 'seq_acc': 1.0, 'grad_norm': 3.717e-05, 'learning_rate': 2.8e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334247, 'epoch': 4.48, 'global_step/max_steps': '4300/4800', 'percentage': '89.58%', 'elapsed_time': '53m 42s', 'remaining_time': '6m 14s'}\n",
      "{'loss': 7.854e-05, 'seq_acc': 1.0, 'grad_norm': 6.083e-05, 'learning_rate': 2.7e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334111, 'epoch': 4.49, 'global_step/max_steps': '4310/4800', 'percentage': '89.79%', 'elapsed_time': '53m 50s', 'remaining_time': '6m 7s'}\n",
      "{'loss': 2.116e-05, 'seq_acc': 1.0, 'grad_norm': 9.411e-05, 'learning_rate': 2.6e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334181, 'epoch': 4.5, 'global_step/max_steps': '4320/4800', 'percentage': '90.00%', 'elapsed_time': '53m 57s', 'remaining_time': '5m 59s'}\n",
      "{'loss': 3.853e-05, 'seq_acc': 1.0, 'grad_norm': 0.00098679, 'learning_rate': 2.5e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334303, 'epoch': 4.51, 'global_step/max_steps': '4330/4800', 'percentage': '90.21%', 'elapsed_time': '54m 4s', 'remaining_time': '5m 52s'}\n",
      "{'loss': 4.478e-05, 'seq_acc': 1.0, 'grad_norm': 0.01329043, 'learning_rate': 2.4e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334256, 'epoch': 4.52, 'global_step/max_steps': '4340/4800', 'percentage': '90.42%', 'elapsed_time': '54m 12s', 'remaining_time': '5m 44s'}\n",
      "{'loss': 6.416e-05, 'seq_acc': 1.0, 'grad_norm': 0.00299975, 'learning_rate': 2.3e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334227, 'epoch': 4.53, 'global_step/max_steps': '4350/4800', 'percentage': '90.62%', 'elapsed_time': '54m 20s', 'remaining_time': '5m 37s'}\n",
      "{'loss': 1.097e-05, 'seq_acc': 1.0, 'grad_norm': 0.00066648, 'learning_rate': 2.2e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334235, 'epoch': 4.54, 'global_step/max_steps': '4360/4800', 'percentage': '90.83%', 'elapsed_time': '54m 27s', 'remaining_time': '5m 29s'}\n",
      "{'loss': 8.224e-05, 'seq_acc': 1.0, 'grad_norm': 0.00132709, 'learning_rate': 2.1e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334337, 'epoch': 4.55, 'global_step/max_steps': '4370/4800', 'percentage': '91.04%', 'elapsed_time': '54m 34s', 'remaining_time': '5m 22s'}\n",
      "{'loss': 1.751e-05, 'seq_acc': 1.0, 'grad_norm': 0.00292017, 'learning_rate': 2e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334308, 'epoch': 4.56, 'global_step/max_steps': '4380/4800', 'percentage': '91.25%', 'elapsed_time': '54m 42s', 'remaining_time': '5m 14s'}\n",
      "{'loss': 7.201e-05, 'seq_acc': 1.0, 'grad_norm': 0.00461969, 'learning_rate': 1.9e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334163, 'epoch': 4.57, 'global_step/max_steps': '4390/4800', 'percentage': '91.46%', 'elapsed_time': '54m 50s', 'remaining_time': '5m 7s'}\n",
      "{'loss': 0.00027368, 'seq_acc': 1.0, 'grad_norm': 0.00110634, 'learning_rate': 1.8e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334069, 'epoch': 4.58, 'global_step/max_steps': '4400/4800', 'percentage': '91.67%', 'elapsed_time': '54m 57s', 'remaining_time': '4m 59s'}\n",
      "{'loss': 5.105e-05, 'seq_acc': 1.0, 'grad_norm': 0.0013089, 'learning_rate': 1.7e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334284, 'epoch': 4.59, 'global_step/max_steps': '4410/4800', 'percentage': '91.88%', 'elapsed_time': '55m 4s', 'remaining_time': '4m 52s'}\n",
      "{'loss': 2.755e-05, 'seq_acc': 1.0, 'grad_norm': 0.00162947, 'learning_rate': 1.6e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334338, 'epoch': 4.6, 'global_step/max_steps': '4420/4800', 'percentage': '92.08%', 'elapsed_time': '55m 12s', 'remaining_time': '4m 44s'}\n",
      "{'loss': 2.317e-05, 'seq_acc': 1.0, 'grad_norm': 0.00075499, 'learning_rate': 1.6e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334549, 'epoch': 4.61, 'global_step/max_steps': '4430/4800', 'percentage': '92.29%', 'elapsed_time': '55m 19s', 'remaining_time': '4m 37s'}\n",
      "{'loss': 0.00214195, 'seq_acc': 0.9, 'grad_norm': 0.00089141, 'learning_rate': 1.5e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334539, 'epoch': 4.62, 'global_step/max_steps': '4440/4800', 'percentage': '92.50%', 'elapsed_time': '55m 26s', 'remaining_time': '4m 29s'}\n",
      "{'loss': 4.936e-05, 'seq_acc': 1.0, 'grad_norm': 0.00015208, 'learning_rate': 1.4e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334249, 'epoch': 4.64, 'global_step/max_steps': '4450/4800', 'percentage': '92.71%', 'elapsed_time': '55m 34s', 'remaining_time': '4m 22s'}\n",
      "{'loss': 1.905e-05, 'seq_acc': 1.0, 'grad_norm': 0.00054796, 'learning_rate': 1.3e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334167, 'epoch': 4.65, 'global_step/max_steps': '4460/4800', 'percentage': '92.92%', 'elapsed_time': '55m 42s', 'remaining_time': '4m 14s'}\n",
      "{'loss': 0.00014324, 'seq_acc': 1.0, 'grad_norm': 0.00695618, 'learning_rate': 1.2e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334205, 'epoch': 4.66, 'global_step/max_steps': '4470/4800', 'percentage': '93.12%', 'elapsed_time': '55m 50s', 'remaining_time': '4m 7s'}\n",
      "{'loss': 5.737e-05, 'seq_acc': 1.0, 'grad_norm': 0.00067136, 'learning_rate': 1.2e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334061, 'epoch': 4.67, 'global_step/max_steps': '4480/4800', 'percentage': '93.33%', 'elapsed_time': '55m 57s', 'remaining_time': '3m 59s'}\n",
      "{'loss': 2.289e-05, 'seq_acc': 1.0, 'grad_norm': 0.00023147, 'learning_rate': 1.1e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334155, 'epoch': 4.68, 'global_step/max_steps': '4490/4800', 'percentage': '93.54%', 'elapsed_time': '56m 5s', 'remaining_time': '3m 52s'}\n",
      "{'loss': 5.738e-05, 'seq_acc': 1.0, 'grad_norm': 0.00345042, 'learning_rate': 1e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.3342, 'epoch': 4.69, 'global_step/max_steps': '4500/4800', 'percentage': '93.75%', 'elapsed_time': '56m 12s', 'remaining_time': '3m 44s'}\n",
      "{'loss': 1.499e-05, 'seq_acc': 1.0, 'grad_norm': 0.00040733, 'learning_rate': 1e-07, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334184, 'epoch': 4.7, 'global_step/max_steps': '4510/4800', 'percentage': '93.96%', 'elapsed_time': '56m 20s', 'remaining_time': '3m 37s'}\n",
      "{'loss': 5.841e-05, 'seq_acc': 1.0, 'grad_norm': 0.00350153, 'learning_rate': 9e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334196, 'epoch': 4.71, 'global_step/max_steps': '4520/4800', 'percentage': '94.17%', 'elapsed_time': '56m 27s', 'remaining_time': '3m 29s'}\n",
      "{'loss': 6.955e-05, 'seq_acc': 1.0, 'grad_norm': 0.00100777, 'learning_rate': 8e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33419, 'epoch': 4.72, 'global_step/max_steps': '4530/4800', 'percentage': '94.38%', 'elapsed_time': '56m 35s', 'remaining_time': '3m 22s'}\n",
      "{'loss': 0.00028649, 'seq_acc': 1.0, 'grad_norm': 0.0003555, 'learning_rate': 8e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33413, 'epoch': 4.73, 'global_step/max_steps': '4540/4800', 'percentage': '94.58%', 'elapsed_time': '56m 42s', 'remaining_time': '3m 14s'}\n",
      "{'loss': 4.24e-05, 'seq_acc': 1.0, 'grad_norm': 0.00516457, 'learning_rate': 7e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334016, 'epoch': 4.74, 'global_step/max_steps': '4550/4800', 'percentage': '94.79%', 'elapsed_time': '56m 50s', 'remaining_time': '3m 7s'}\n",
      "{'loss': 4.062e-05, 'seq_acc': 1.0, 'grad_norm': 0.00044946, 'learning_rate': 7e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.334034, 'epoch': 4.75, 'global_step/max_steps': '4560/4800', 'percentage': '95.00%', 'elapsed_time': '56m 57s', 'remaining_time': '2m 59s'}\n",
      "{'loss': 6.169e-05, 'seq_acc': 1.0, 'grad_norm': 0.00075102, 'learning_rate': 6e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33402, 'epoch': 4.76, 'global_step/max_steps': '4570/4800', 'percentage': '95.21%', 'elapsed_time': '57m 5s', 'remaining_time': '2m 52s'}\n",
      "{'loss': 2.501e-05, 'seq_acc': 1.0, 'grad_norm': 0.00123995, 'learning_rate': 5e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333851, 'epoch': 4.77, 'global_step/max_steps': '4580/4800', 'percentage': '95.42%', 'elapsed_time': '57m 13s', 'remaining_time': '2m 44s'}\n",
      "{'loss': 2.197e-05, 'seq_acc': 1.0, 'grad_norm': 0.0025331, 'learning_rate': 5e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333557, 'epoch': 4.78, 'global_step/max_steps': '4590/4800', 'percentage': '95.62%', 'elapsed_time': '57m 21s', 'remaining_time': '2m 37s'}\n",
      "{'loss': 2.64e-05, 'seq_acc': 1.0, 'grad_norm': 0.0016036, 'learning_rate': 5e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333516, 'epoch': 4.79, 'global_step/max_steps': '4600/4800', 'percentage': '95.83%', 'elapsed_time': '57m 29s', 'remaining_time': '2m 29s'}\n",
      "{'loss': 0.00013327, 'seq_acc': 1.0, 'grad_norm': 0.00135383, 'learning_rate': 4e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333583, 'epoch': 4.8, 'global_step/max_steps': '4610/4800', 'percentage': '96.04%', 'elapsed_time': '57m 36s', 'remaining_time': '2m 22s'}\n",
      "{'loss': 2.526e-05, 'seq_acc': 1.0, 'grad_norm': 0.0021406, 'learning_rate': 4e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333583, 'epoch': 4.81, 'global_step/max_steps': '4620/4800', 'percentage': '96.25%', 'elapsed_time': '57m 44s', 'remaining_time': '2m 14s'}\n",
      "{'loss': 1.905e-05, 'seq_acc': 1.0, 'grad_norm': 0.00055123, 'learning_rate': 3e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333581, 'epoch': 4.82, 'global_step/max_steps': '4630/4800', 'percentage': '96.46%', 'elapsed_time': '57m 51s', 'remaining_time': '2m 7s'}\n",
      "{'loss': 4.645e-05, 'seq_acc': 1.0, 'grad_norm': 0.00066746, 'learning_rate': 3e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33371, 'epoch': 4.83, 'global_step/max_steps': '4640/4800', 'percentage': '96.67%', 'elapsed_time': '57m 58s', 'remaining_time': '1m 59s'}\n",
      "{'loss': 0.00058078, 'seq_acc': 1.0, 'grad_norm': 0.00031735, 'learning_rate': 3e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333727, 'epoch': 4.84, 'global_step/max_steps': '4650/4800', 'percentage': '96.88%', 'elapsed_time': '58m 6s', 'remaining_time': '1m 52s'}\n",
      "{'loss': 1.13e-05, 'seq_acc': 1.0, 'grad_norm': 0.000488, 'learning_rate': 2e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333751, 'epoch': 4.85, 'global_step/max_steps': '4660/4800', 'percentage': '97.08%', 'elapsed_time': '58m 13s', 'remaining_time': '1m 44s'}\n",
      "{'loss': 1.524e-05, 'seq_acc': 1.0, 'grad_norm': 0.00039594, 'learning_rate': 2e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333568, 'epoch': 4.86, 'global_step/max_steps': '4670/4800', 'percentage': '97.29%', 'elapsed_time': '58m 21s', 'remaining_time': '1m 37s'}\n",
      "{'loss': 3.506e-05, 'seq_acc': 1.0, 'grad_norm': 0.00637708, 'learning_rate': 2e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333496, 'epoch': 4.88, 'global_step/max_steps': '4680/4800', 'percentage': '97.50%', 'elapsed_time': '58m 29s', 'remaining_time': '1m 29s'}\n",
      "{'loss': 4.691e-05, 'seq_acc': 1.0, 'grad_norm': 0.00087894, 'learning_rate': 1e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333356, 'epoch': 4.89, 'global_step/max_steps': '4690/4800', 'percentage': '97.71%', 'elapsed_time': '58m 37s', 'remaining_time': '1m 22s'}\n",
      "{'loss': 3.525e-05, 'seq_acc': 1.0, 'grad_norm': 0.00111044, 'learning_rate': 1e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333389, 'epoch': 4.9, 'global_step/max_steps': '4700/4800', 'percentage': '97.92%', 'elapsed_time': '58m 44s', 'remaining_time': '1m 14s'}\n",
      "{'loss': 1.114e-05, 'seq_acc': 1.0, 'grad_norm': 0.00042209, 'learning_rate': 1e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333547, 'epoch': 4.91, 'global_step/max_steps': '4710/4800', 'percentage': '98.12%', 'elapsed_time': '58m 51s', 'remaining_time': '1m 7s'}\n",
      "{'loss': 7.56e-06, 'seq_acc': 1.0, 'grad_norm': 0.00318759, 'learning_rate': 1e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.33361, 'epoch': 4.92, 'global_step/max_steps': '4720/4800', 'percentage': '98.33%', 'elapsed_time': '58m 58s', 'remaining_time': '59s'}\n",
      "{'loss': 1.416e-05, 'seq_acc': 1.0, 'grad_norm': 0.0012738, 'learning_rate': 1e-08, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333672, 'epoch': 4.93, 'global_step/max_steps': '4730/4800', 'percentage': '98.54%', 'elapsed_time': '59m 6s', 'remaining_time': '52s'}\n",
      "{'loss': 7.016e-05, 'seq_acc': 1.0, 'grad_norm': 0.00046094, 'learning_rate': 0.0, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333672, 'epoch': 4.94, 'global_step/max_steps': '4740/4800', 'percentage': '98.75%', 'elapsed_time': '59m 13s', 'remaining_time': '44s'}\n",
      "{'loss': 3.048e-05, 'seq_acc': 1.0, 'grad_norm': 7.758e-05, 'learning_rate': 0.0, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333784, 'epoch': 4.95, 'global_step/max_steps': '4750/4800', 'percentage': '98.96%', 'elapsed_time': '59m 21s', 'remaining_time': '37s'}\n",
      "{'loss': 1.181e-05, 'seq_acc': 1.0, 'grad_norm': 0.00034048, 'learning_rate': 0.0, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333795, 'epoch': 4.96, 'global_step/max_steps': '4760/4800', 'percentage': '99.17%', 'elapsed_time': '59m 28s', 'remaining_time': '29s'}\n",
      "{'loss': 4.996e-05, 'seq_acc': 1.0, 'grad_norm': 0.00036137, 'learning_rate': 0.0, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333685, 'epoch': 4.97, 'global_step/max_steps': '4770/4800', 'percentage': '99.38%', 'elapsed_time': '59m 36s', 'remaining_time': '22s'}\n",
      "{'loss': 0.00012208, 'seq_acc': 1.0, 'grad_norm': 0.00081492, 'learning_rate': 0.0, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333727, 'epoch': 4.98, 'global_step/max_steps': '4780/4800', 'percentage': '99.58%', 'elapsed_time': '59m 43s', 'remaining_time': '14s'}\n",
      "{'loss': 3.927e-05, 'seq_acc': 1.0, 'grad_norm': 0.00064586, 'learning_rate': 0.0, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333863, 'epoch': 4.99, 'global_step/max_steps': '4790/4800', 'percentage': '99.79%', 'elapsed_time': '59m 50s', 'remaining_time': '7s'}\n",
      "{'loss': 1.937e-05, 'seq_acc': 1.0, 'grad_norm': 0.00015211, 'learning_rate': 0.0, 'memory(GiB)': 68.98, 'train_speed(iter/s)': 1.333729, 'epoch': 5.0, 'global_step/max_steps': '4800/4800', 'percentage': '100.00%', 'elapsed_time': '59m 58s', 'remaining_time': '0s'}\n",
      "[INFO:swift] Saving model checkpoint to /data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800\n",
      "{'train_runtime': 3608.7183, 'train_samples_per_second': 1.33, 'train_steps_per_second': 1.33, 'train_loss': 0.01069459, 'epoch': 5.0, 'global_step/max_steps': '4800/4800', 'percentage': '100.00%', 'elapsed_time': '1h 0m 8s', 'remaining_time': '0s'}\n",
      "Train: 100%|██████████████████████████████| 4800/4800 [1:00:08<00:00,  1.33it/s]\n",
      "[INFO:swift] last_model_checkpoint: /data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800\n",
      "[INFO:swift] best_model_checkpoint: None\n",
      "[INFO:swift] images_dir: /data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/images\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:31:08.818624\n"
     ]
    }
   ],
   "source": [
    "from IPython import get_ipython\n",
    "get_ipython().system(f'''CUDA_VISIBLE_DEVICES=0,1,2,3 swift sft \\\n",
    "    --model ../../public_data/Qwen2-7B-Instruct \\\n",
    "    --output_dir data/models/{version} \\\n",
    "    --model_revision master \\\n",
    "    --torch_dtype bfloat16 \\\n",
    "    --dataset data/feature/train_{version}.jsonl \\\n",
    "    --train_type lora \\\n",
    "    --lora_rank 512 \\\n",
    "    --lora_alpha 1024 \\\n",
    "    --target_modules all-linear \\\n",
    "    --num_train_epochs 5 \\\n",
    "    --max_length 8192 \\\n",
    "    --gradient_checkpointing true \\\n",
    "    --per_device_train_batch_size 1 \\\n",
    "    --learning_rate 1e-5 \\\n",
    "    --gradient_accumulation_steps 1 \\\n",
    "    --max_grad_norm 1.0 \\\n",
    "    --warmup_ratio 0.03 \\\n",
    "    --eval_steps 1000000 \\\n",
    "    --save_steps 1000000 \\\n",
    "    --save_total_limit 1 \\\n",
    "    --attn_impl flash_attn \\\n",
    "    --save_only_model true \\\n",
    "    --freeze_llm false \\\n",
    "    --freeze_vit true \\\n",
    "    --freeze_aligner true \\\n",
    "    --split_dataset_ratio 0 \\\n",
    "    --acc_strategy seq \\\n",
    "    --logging_steps 10''')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "691781b6-17ae-4c53-983d-b73f2e61c5ac",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_checkpoint_path(version):\n",
    "    model_path=f'models/{version}'\n",
    "    l1=os.listdir(path+f'{model_path}')\n",
    "    path2=os.path.join(path+f'{model_path}',sorted(l1)[-1])\n",
    "    checkpoint_df=pd.DataFrame()\n",
    "    checkpoint_df['checkpoint-path']=[i for i in os.listdir(path2) if 'checkpoint-' in i ]\n",
    "    checkpoint_df['steps']=checkpoint_df['checkpoint-path'].apply(lambda x:int(x.split('-')[1]))\n",
    "    checkpoint_df=checkpoint_df.sort_values(by=['steps'],ascending=[False]).reset_index(drop=True)\n",
    "    checkpoint_path=checkpoint_df['checkpoint-path'][0]\n",
    "    checkpoint_path=os.path.join(path2,checkpoint_path)\n",
    "    return checkpoint_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "50aef7f9-d323-4452-92d8-0ee5445ca37c",
   "metadata": {},
   "outputs": [],
   "source": [
    "checkpoint_path=get_checkpoint_path(version)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "d54d05bd-2614-411c-94ae-21c4fb4eab61",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/infer.py --model ../../public_data/Qwen2-7B-Instruct --adapters data/models/L4090-v012/v0-20250318-112919/checkpoint-4800 --merge_lora true --infer_backend vllm --model_revision master --torch_dtype bfloat16 --max_length 8192 --attn_impl flash_attn --max_new_tokens 1000 --temperature 0.1 --top_p 0.7 --tensor_parallel_size 1 --max_model_len 8192 --enforce_eager true --split_dataset_ratio 0 --repetition_penalty 1.05 --device_map auto --result_path data/submissions/test_preds_L4090-v012.jsonl --val_dataset data/feature/test_L4090-v012.jsonl`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] Loading the model using model_dir: data/models/L4090-v012/v0-20250318-112919/checkpoint-4800\n",
      "[INFO:swift] Successfully loaded /data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800/args.json.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Because len(args.val_dataset) > 0, setting split_dataset_ratio: 0.0\n",
      "[INFO:swift] Setting args.eval_human: False\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: InferArguments(model='../../public_data/Qwen2-7B-Instruct', model_type='qwen2', model_revision='master', task_type='causal_lm', torch_dtype=torch.bfloat16, attn_impl='flash_attn', num_labels=None, rope_scaling=None, device_map='auto', max_memory={}, local_repo_path=None, template='qwen', system=None, max_length=8192, truncation_strategy='delete', max_pixels=None, tools_prompt='react_en', norm_bbox=None, response_prefix=None, padding_side='right', loss_scale='default', sequence_parallel_size=1, use_chat_template=True, template_backend='swift', dataset=[], val_dataset=['data/feature/test_L4090-v012.jsonl'], split_dataset_ratio=0.0, data_seed=42, dataset_num_proc=1, streaming=False, enable_cache=False, download_mode='reuse_dataset_if_exists', columns={}, strict=False, remove_unused_columns=True, model_name=[None, None], model_author=[None, None], custom_dataset_info=[], quant_method=None, quant_bits=None, hqq_axis=None, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_quant_storage=None, max_new_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stream=False, stop_words=[], logprobs=False, top_logprobs=None, ckpt_dir='/data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800', load_dataset_config=None, lora_modules=[], tuner_backend='peft', train_type='lora', adapters=['/data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800'], external_plugins=[], seed=42, model_kwargs={}, load_args=True, load_data_args=False, use_hf=False, hub_token=None, custom_register_path=[], ignore_args_error=False, use_swift_lora=False, tp=1, session_len=None, cache_max_entry_count=0.8, quant_policy=0, vision_batch_size=1, gpu_memory_utilization=0.9, tensor_parallel_size=1, pipeline_parallel_size=1, max_num_seqs=256, max_model_len=8192, disable_custom_all_reduce=False, enforce_eager=True, limit_mm_per_prompt={}, vllm_max_lora_rank=16, enable_prefix_caching=False, merge_lora=True, safe_serialization=True, max_shard_size='5GB', infer_backend='vllm', result_path='/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012.jsonl', metric=None, max_batch_size=1, ddp_backend=None, val_dataset_sample=None)\n",
      "[INFO:swift] merge_device_map: cpu\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] model_kwargs: {'device_map': 'cpu'}\n",
      "Loading checkpoint shards: 100%|██████████████████| 4/4 [00:00<00:00,  4.99it/s]\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Merge LoRA...\n",
      "[INFO:swift] Saving merged weights...\n",
      "[INFO:swift] Successfully merged LoRA and saved in /data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800-merged.\n",
      "[INFO:swift] Loading the model using model_dir: /data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800-merged\n",
      "[INFO:swift] Successfully loaded /data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800-merged/args.json.\n",
      "INFO 03-18 12:33:47 __init__.py:207] Automatically detected platform cuda.\n",
      "INFO 03-18 12:33:53 config.py:549] This model supports multiple tasks: {'generate', 'embed', 'score', 'reward', 'classify'}. Defaulting to 'generate'.\n",
      "WARNING 03-18 12:33:53 cuda.py:95] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
      "WARNING 03-18 12:33:53 config.py:685] Async output processing is not supported on the current platform type cuda.\n",
      "INFO 03-18 12:33:53 llm_engine.py:234] Initializing a V0 LLM engine (v0.7.3) with config: model='/data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800-merged', speculative_config=None, tokenizer='/data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800-merged', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800-merged, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={\"splitting_ops\":[],\"compile_sizes\":[],\"cudagraph_capture_sizes\":[],\"max_capture_size\":0}, use_cached_outputs=False, \n",
      "INFO 03-18 12:33:54 cuda.py:229] Using Flash Attention backend.\n",
      "INFO 03-18 12:33:55 model_runner.py:1110] Starting to load model /data/xiaosa/Competitions/synthetic_data/data/models/L4090-v012/v0-20250318-112919/checkpoint-4800-merged...\n",
      "Loading safetensors checkpoint shards:   0% Completed | 0/4 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:  25% Completed | 1/4 [00:01<00:03,  1.12s/it]\n",
      "Loading safetensors checkpoint shards:  50% Completed | 2/4 [00:02<00:02,  1.26s/it]\n",
      "Loading safetensors checkpoint shards:  75% Completed | 3/4 [00:03<00:01,  1.32s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.05it/s]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.07s/it]\n",
      "\n",
      "INFO 03-18 12:33:59 model_runner.py:1115] Loading model weights took 14.2487 GB\n",
      "INFO 03-18 12:34:01 worker.py:267] Memory profiling takes 1.29 seconds\n",
      "INFO 03-18 12:34:01 worker.py:267] the current vLLM instance can use total_gpu_memory (23.64GiB) x gpu_memory_utilization (0.90) = 21.28GiB\n",
      "INFO 03-18 12:34:01 worker.py:267] model weights take 14.25GiB; non_torch_memory takes 0.07GiB; PyTorch activation peak memory takes 1.44GiB; the rest of the memory reserved for KV Cache is 5.52GiB.\n",
      "INFO 03-18 12:34:01 executor_base.py:111] # cuda blocks: 6457, # CPU blocks: 4681\n",
      "INFO 03-18 12:34:01 executor_base.py:116] Maximum concurrency for 8192 tokens per request: 12.61x\n",
      "INFO 03-18 12:34:05 llm_engine.py:436] init engine (profile, create kv cache, warmup model) took 5.99 seconds\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Start time of running main: 2025-03-18 12:34:05.998190\n",
      "[INFO:swift] request_config: RequestConfig(max_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stop=[], seed=None, stream=False, logprobs=False, top_logprobs=None, n=1, best_of=None, presence_penalty=0.0, frequency_penalty=0.0, length_penalty=1.0)\n",
      "Generating train split: 2289 examples [00:00, 19086.02 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-sw3p0yx5\n",
      "Map: 100%|████████████████████████| 2289/2289 [00:00<00:00, 15304.04 examples/s]\n",
      "[INFO:swift] val_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 2289\n",
      "})\n",
      "100%|███████████████████████████████████████| 2289/2289 [07:34<00:00,  5.04it/s]\n",
      "[rank-1] {'num_prompt_tokens': 3182871, 'num_generated_tokens': 199683, 'num_samples': 2289, 'runtime': 454.8538416659994, 'samples/s': 5.032385769494764, 'tokens/s': 439.0047564919279}\n",
      "[INFO:swift] The inference results have been saved to result_path: `/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012.jsonl`.\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:41:41.884185\n",
      "[rank0]:[W318 12:41:42.464307863 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present,  but this warning has only been added since PyTorch 2.4 (function operator())\n"
     ]
    }
   ],
   "source": [
    "from IPython import get_ipython\n",
    "get_ipython().system(f'''CUDA_VISIBLE_DEVICES=2 swift infer \\\n",
    "    --model ../../public_data/Qwen2-7B-Instruct \\\n",
    "    --adapters {checkpoint_path} \\\n",
    "    --merge_lora true \\\n",
    "    --infer_backend vllm \\\n",
    "    --model_revision master \\\n",
    "    --torch_dtype bfloat16 \\\n",
    "    --max_length 8192 \\\n",
    "    --attn_impl flash_attn \\\n",
    "    --max_new_tokens 1000 \\\n",
    "    --temperature 0.1 \\\n",
    "    --top_p 0.7 \\\n",
    "    --tensor_parallel_size 1 \\\n",
    "    --max_model_len 8192 \\\n",
    "    --enforce_eager true \\\n",
    "    --split_dataset_ratio 0 \\\n",
    "    --repetition_penalty 1.05 \\\n",
    "    --device_map auto \\\n",
    "    --result_path data/submissions/test_preds_{version}.jsonl \\\n",
    "    --val_dataset data/feature/test_{version}.jsonl''')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "49290993-0a92-4e2b-9651-ee24094a2810",
   "metadata": {},
   "outputs": [],
   "source": [
    "get_ipython().system(f'rm -rf {checkpoint_path}-merged')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "303af5c4-ff72-4eb3-8968-00b7b8dfbebc",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_infer_df=pd.read_json(f'data/submissions/test_preds_{version}.jsonl',lines=True)\n",
    "test_infer_df['id']=test_df_b['id']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "84091b2c-df77-4d13-bf4b-d56dd65875e3",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/10 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/infer.py --model ../../public_data/Qwen2-7B-Instruct --merge_lora false --infer_backend vllm --model_revision master --torch_dtype bfloat16 --max_length 8192 --attn_impl flash_attn --max_new_tokens 1000 --temperature 0.1 --top_p 0.7 --tensor_parallel_size 1 --max_model_len 8192 --enforce_eager true --split_dataset_ratio 0 --repetition_penalty 1.05 --device_map auto --result_path data/submissions/test_preds_L4090-v012_process_response_0.jsonl --val_dataset data/feature/test_L4090-v012_process_response_0.jsonl`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Because len(args.val_dataset) > 0, setting split_dataset_ratio: 0.0\n",
      "[INFO:swift] Setting args.eval_human: False\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: InferArguments(model='../../public_data/Qwen2-7B-Instruct', model_type='qwen2', model_revision='master', task_type='causal_lm', torch_dtype=torch.bfloat16, attn_impl='flash_attn', num_labels=None, rope_scaling=None, device_map='auto', max_memory={}, local_repo_path=None, template='qwen', system=None, max_length=8192, truncation_strategy='delete', max_pixels=None, tools_prompt='react_en', norm_bbox=None, response_prefix=None, padding_side='right', loss_scale='default', sequence_parallel_size=1, use_chat_template=True, template_backend='swift', dataset=[], val_dataset=['data/feature/test_L4090-v012_process_response_0.jsonl'], split_dataset_ratio=0.0, data_seed=42, dataset_num_proc=1, streaming=False, enable_cache=False, download_mode='reuse_dataset_if_exists', columns={}, strict=False, remove_unused_columns=True, model_name=[None, None], model_author=[None, None], custom_dataset_info=[], quant_method=None, quant_bits=None, hqq_axis=None, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_quant_storage=None, max_new_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stream=False, stop_words=[], logprobs=False, top_logprobs=None, ckpt_dir=None, load_dataset_config=None, lora_modules=[], tuner_backend='peft', train_type='lora', adapters=[], external_plugins=[], seed=42, model_kwargs={}, load_args=True, load_data_args=False, use_hf=False, hub_token=None, custom_register_path=[], ignore_args_error=False, use_swift_lora=False, tp=1, session_len=None, cache_max_entry_count=0.8, quant_policy=0, vision_batch_size=1, gpu_memory_utilization=0.9, tensor_parallel_size=1, pipeline_parallel_size=1, max_num_seqs=256, max_model_len=8192, disable_custom_all_reduce=False, enforce_eager=True, limit_mm_per_prompt={}, vllm_max_lora_rank=16, enable_prefix_caching=False, merge_lora=False, safe_serialization=True, max_shard_size='5GB', infer_backend='vllm', result_path='/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_0.jsonl', metric=None, max_batch_size=1, ddp_backend=None, val_dataset_sample=None)\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "INFO 03-18 12:41:58 __init__.py:207] Automatically detected platform cuda.\n",
      "INFO 03-18 12:42:04 config.py:549] This model supports multiple tasks: {'score', 'embed', 'classify', 'reward', 'generate'}. Defaulting to 'generate'.\n",
      "WARNING 03-18 12:42:04 cuda.py:95] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
      "WARNING 03-18 12:42:04 config.py:685] Async output processing is not supported on the current platform type cuda.\n",
      "INFO 03-18 12:42:04 llm_engine.py:234] Initializing a V0 LLM engine (v0.7.3) with config: model='/data/xiaosa/public_data/Qwen2-7B-Instruct', speculative_config=None, tokenizer='/data/xiaosa/public_data/Qwen2-7B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/data/xiaosa/public_data/Qwen2-7B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={\"splitting_ops\":[],\"compile_sizes\":[],\"cudagraph_capture_sizes\":[],\"max_capture_size\":0}, use_cached_outputs=False, \n",
      "INFO 03-18 12:42:06 cuda.py:229] Using Flash Attention backend.\n",
      "INFO 03-18 12:42:06 model_runner.py:1110] Starting to load model /data/xiaosa/public_data/Qwen2-7B-Instruct...\n",
      "Loading safetensors checkpoint shards:   0% Completed | 0/4 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:  25% Completed | 1/4 [00:01<00:03,  1.01s/it]\n",
      "Loading safetensors checkpoint shards:  50% Completed | 2/4 [00:02<00:02,  1.07s/it]\n",
      "Loading safetensors checkpoint shards:  75% Completed | 3/4 [00:03<00:01,  1.08s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.05s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.06s/it]\n",
      "\n",
      "INFO 03-18 12:42:11 model_runner.py:1115] Loading model weights took 14.2487 GB\n",
      "INFO 03-18 12:42:12 worker.py:267] Memory profiling takes 1.34 seconds\n",
      "INFO 03-18 12:42:12 worker.py:267] the current vLLM instance can use total_gpu_memory (23.64GiB) x gpu_memory_utilization (0.90) = 21.28GiB\n",
      "INFO 03-18 12:42:12 worker.py:267] model weights take 14.25GiB; non_torch_memory takes 0.07GiB; PyTorch activation peak memory takes 1.44GiB; the rest of the memory reserved for KV Cache is 5.52GiB.\n",
      "INFO 03-18 12:42:12 executor_base.py:111] # cuda blocks: 6457, # CPU blocks: 4681\n",
      "INFO 03-18 12:42:12 executor_base.py:116] Maximum concurrency for 8192 tokens per request: 12.61x\n",
      "INFO 03-18 12:42:17 llm_engine.py:436] init engine (profile, create kv cache, warmup model) took 6.02 seconds\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Start time of running main: 2025-03-18 12:42:17.303660\n",
      "[INFO:swift] request_config: RequestConfig(max_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stop=[], seed=None, stream=False, logprobs=False, top_logprobs=None, n=1, best_of=None, presence_penalty=0.0, frequency_penalty=0.0, length_penalty=1.0)\n",
      "Generating train split: 36 examples [00:00, 11040.06 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-8pbg0bjq\n",
      "Map: 100%|█████████████████████████████| 36/36 [00:00<00:00, 5407.35 examples/s]\n",
      "[INFO:swift] val_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 36\n",
      "})\n",
      "100%|███████████████████████████████████████████| 36/36 [00:13<00:00,  2.57it/s]\n",
      "[rank-1] {'num_prompt_tokens': 6766, 'num_generated_tokens': 8812, 'num_samples': 36, 'runtime': 14.115540411999973, 'samples/s': 2.550380569871452, 'tokens/s': 624.2764883807565}\n",
      "[INFO:swift] The inference results have been saved to result_path: `/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_0.jsonl`.\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:42:32.181888\n",
      "[rank0]:[W318 12:42:32.801010530 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present,  but this warning has only been added since PyTorch 2.4 (function operator())\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 10%|█         | 1/10 [00:47<07:09, 47.68s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/infer.py --model ../../public_data/Qwen2-7B-Instruct --merge_lora false --infer_backend vllm --model_revision master --torch_dtype bfloat16 --max_length 8192 --attn_impl flash_attn --max_new_tokens 1000 --temperature 0.1 --top_p 0.7 --tensor_parallel_size 1 --max_model_len 8192 --enforce_eager true --split_dataset_ratio 0 --repetition_penalty 1.05 --device_map auto --result_path data/submissions/test_preds_L4090-v012_process_response_1.jsonl --val_dataset data/feature/test_L4090-v012_process_response_1.jsonl`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Because len(args.val_dataset) > 0, setting split_dataset_ratio: 0.0\n",
      "[INFO:swift] Setting args.eval_human: False\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: InferArguments(model='../../public_data/Qwen2-7B-Instruct', model_type='qwen2', model_revision='master', task_type='causal_lm', torch_dtype=torch.bfloat16, attn_impl='flash_attn', num_labels=None, rope_scaling=None, device_map='auto', max_memory={}, local_repo_path=None, template='qwen', system=None, max_length=8192, truncation_strategy='delete', max_pixels=None, tools_prompt='react_en', norm_bbox=None, response_prefix=None, padding_side='right', loss_scale='default', sequence_parallel_size=1, use_chat_template=True, template_backend='swift', dataset=[], val_dataset=['data/feature/test_L4090-v012_process_response_1.jsonl'], split_dataset_ratio=0.0, data_seed=42, dataset_num_proc=1, streaming=False, enable_cache=False, download_mode='reuse_dataset_if_exists', columns={}, strict=False, remove_unused_columns=True, model_name=[None, None], model_author=[None, None], custom_dataset_info=[], quant_method=None, quant_bits=None, hqq_axis=None, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_quant_storage=None, max_new_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stream=False, stop_words=[], logprobs=False, top_logprobs=None, ckpt_dir=None, load_dataset_config=None, lora_modules=[], tuner_backend='peft', train_type='lora', adapters=[], external_plugins=[], seed=42, model_kwargs={}, load_args=True, load_data_args=False, use_hf=False, hub_token=None, custom_register_path=[], ignore_args_error=False, use_swift_lora=False, tp=1, session_len=None, cache_max_entry_count=0.8, quant_policy=0, vision_batch_size=1, gpu_memory_utilization=0.9, tensor_parallel_size=1, pipeline_parallel_size=1, max_num_seqs=256, max_model_len=8192, disable_custom_all_reduce=False, enforce_eager=True, limit_mm_per_prompt={}, vllm_max_lora_rank=16, enable_prefix_caching=False, merge_lora=False, safe_serialization=True, max_shard_size='5GB', infer_backend='vllm', result_path='/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_1.jsonl', metric=None, max_batch_size=1, ddp_backend=None, val_dataset_sample=None)\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "INFO 03-18 12:42:46 __init__.py:207] Automatically detected platform cuda.\n",
      "INFO 03-18 12:42:52 config.py:549] This model supports multiple tasks: {'reward', 'score', 'embed', 'classify', 'generate'}. Defaulting to 'generate'.\n",
      "WARNING 03-18 12:42:52 cuda.py:95] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
      "WARNING 03-18 12:42:52 config.py:685] Async output processing is not supported on the current platform type cuda.\n",
      "INFO 03-18 12:42:52 llm_engine.py:234] Initializing a V0 LLM engine (v0.7.3) with config: model='/data/xiaosa/public_data/Qwen2-7B-Instruct', speculative_config=None, tokenizer='/data/xiaosa/public_data/Qwen2-7B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/data/xiaosa/public_data/Qwen2-7B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={\"splitting_ops\":[],\"compile_sizes\":[],\"cudagraph_capture_sizes\":[],\"max_capture_size\":0}, use_cached_outputs=False, \n",
      "INFO 03-18 12:42:53 cuda.py:229] Using Flash Attention backend.\n",
      "INFO 03-18 12:42:54 model_runner.py:1110] Starting to load model /data/xiaosa/public_data/Qwen2-7B-Instruct...\n",
      "Loading safetensors checkpoint shards:   0% Completed | 0/4 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:  25% Completed | 1/4 [00:01<00:03,  1.01s/it]\n",
      "Loading safetensors checkpoint shards:  50% Completed | 2/4 [00:02<00:02,  1.07s/it]\n",
      "Loading safetensors checkpoint shards:  75% Completed | 3/4 [00:03<00:01,  1.08s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.05s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.06s/it]\n",
      "\n",
      "INFO 03-18 12:42:58 model_runner.py:1115] Loading model weights took 14.2487 GB\n",
      "INFO 03-18 12:43:00 worker.py:267] Memory profiling takes 1.34 seconds\n",
      "INFO 03-18 12:43:00 worker.py:267] the current vLLM instance can use total_gpu_memory (23.64GiB) x gpu_memory_utilization (0.90) = 21.28GiB\n",
      "INFO 03-18 12:43:00 worker.py:267] model weights take 14.25GiB; non_torch_memory takes 0.07GiB; PyTorch activation peak memory takes 1.44GiB; the rest of the memory reserved for KV Cache is 5.52GiB.\n",
      "INFO 03-18 12:43:00 executor_base.py:111] # cuda blocks: 6457, # CPU blocks: 4681\n",
      "INFO 03-18 12:43:00 executor_base.py:116] Maximum concurrency for 8192 tokens per request: 12.61x\n",
      "INFO 03-18 12:43:04 llm_engine.py:436] init engine (profile, create kv cache, warmup model) took 6.01 seconds\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Start time of running main: 2025-03-18 12:43:05.041784\n",
      "[INFO:swift] request_config: RequestConfig(max_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stop=[], seed=None, stream=False, logprobs=False, top_logprobs=None, n=1, best_of=None, presence_penalty=0.0, frequency_penalty=0.0, length_penalty=1.0)\n",
      "Generating train split: 36 examples [00:00, 10981.45 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-w5dmkfg6\n",
      "Map: 100%|█████████████████████████████| 36/36 [00:00<00:00, 5246.89 examples/s]\n",
      "[INFO:swift] val_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 36\n",
      "})\n",
      "100%|███████████████████████████████████████████| 36/36 [00:17<00:00,  2.02it/s]\n",
      "[rank-1] {'num_prompt_tokens': 6766, 'num_generated_tokens': 8938, 'num_samples': 36, 'runtime': 17.986754793999353, 'samples/s': 2.0014727732881603, 'tokens/s': 496.9212124347105}\n",
      "[INFO:swift] The inference results have been saved to result_path: `/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_1.jsonl`.\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:43:23.830312\n",
      "[rank0]:[W318 12:43:24.464215338 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present,  but this warning has only been added since PyTorch 2.4 (function operator())\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 20%|██        | 2/10 [01:39<06:40, 50.02s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/infer.py --model ../../public_data/Qwen2-7B-Instruct --merge_lora false --infer_backend vllm --model_revision master --torch_dtype bfloat16 --max_length 8192 --attn_impl flash_attn --max_new_tokens 1000 --temperature 0.1 --top_p 0.7 --tensor_parallel_size 1 --max_model_len 8192 --enforce_eager true --split_dataset_ratio 0 --repetition_penalty 1.05 --device_map auto --result_path data/submissions/test_preds_L4090-v012_process_response_2.jsonl --val_dataset data/feature/test_L4090-v012_process_response_2.jsonl`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Because len(args.val_dataset) > 0, setting split_dataset_ratio: 0.0\n",
      "[INFO:swift] Setting args.eval_human: False\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: InferArguments(model='../../public_data/Qwen2-7B-Instruct', model_type='qwen2', model_revision='master', task_type='causal_lm', torch_dtype=torch.bfloat16, attn_impl='flash_attn', num_labels=None, rope_scaling=None, device_map='auto', max_memory={}, local_repo_path=None, template='qwen', system=None, max_length=8192, truncation_strategy='delete', max_pixels=None, tools_prompt='react_en', norm_bbox=None, response_prefix=None, padding_side='right', loss_scale='default', sequence_parallel_size=1, use_chat_template=True, template_backend='swift', dataset=[], val_dataset=['data/feature/test_L4090-v012_process_response_2.jsonl'], split_dataset_ratio=0.0, data_seed=42, dataset_num_proc=1, streaming=False, enable_cache=False, download_mode='reuse_dataset_if_exists', columns={}, strict=False, remove_unused_columns=True, model_name=[None, None], model_author=[None, None], custom_dataset_info=[], quant_method=None, quant_bits=None, hqq_axis=None, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_quant_storage=None, max_new_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stream=False, stop_words=[], logprobs=False, top_logprobs=None, ckpt_dir=None, load_dataset_config=None, lora_modules=[], tuner_backend='peft', train_type='lora', adapters=[], external_plugins=[], seed=42, model_kwargs={}, load_args=True, load_data_args=False, use_hf=False, hub_token=None, custom_register_path=[], ignore_args_error=False, use_swift_lora=False, tp=1, session_len=None, cache_max_entry_count=0.8, quant_policy=0, vision_batch_size=1, gpu_memory_utilization=0.9, tensor_parallel_size=1, pipeline_parallel_size=1, max_num_seqs=256, max_model_len=8192, disable_custom_all_reduce=False, enforce_eager=True, limit_mm_per_prompt={}, vllm_max_lora_rank=16, enable_prefix_caching=False, merge_lora=False, safe_serialization=True, max_shard_size='5GB', infer_backend='vllm', result_path='/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_2.jsonl', metric=None, max_batch_size=1, ddp_backend=None, val_dataset_sample=None)\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "INFO 03-18 12:43:37 __init__.py:207] Automatically detected platform cuda.\n",
      "INFO 03-18 12:43:44 config.py:549] This model supports multiple tasks: {'reward', 'classify', 'score', 'embed', 'generate'}. Defaulting to 'generate'.\n",
      "WARNING 03-18 12:43:44 cuda.py:95] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
      "WARNING 03-18 12:43:44 config.py:685] Async output processing is not supported on the current platform type cuda.\n",
      "INFO 03-18 12:43:44 llm_engine.py:234] Initializing a V0 LLM engine (v0.7.3) with config: model='/data/xiaosa/public_data/Qwen2-7B-Instruct', speculative_config=None, tokenizer='/data/xiaosa/public_data/Qwen2-7B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/data/xiaosa/public_data/Qwen2-7B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={\"splitting_ops\":[],\"compile_sizes\":[],\"cudagraph_capture_sizes\":[],\"max_capture_size\":0}, use_cached_outputs=False, \n",
      "INFO 03-18 12:43:45 cuda.py:229] Using Flash Attention backend.\n",
      "INFO 03-18 12:43:45 model_runner.py:1110] Starting to load model /data/xiaosa/public_data/Qwen2-7B-Instruct...\n",
      "Loading safetensors checkpoint shards:   0% Completed | 0/4 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:  25% Completed | 1/4 [00:01<00:03,  1.01s/it]\n",
      "Loading safetensors checkpoint shards:  50% Completed | 2/4 [00:02<00:02,  1.07s/it]\n",
      "Loading safetensors checkpoint shards:  75% Completed | 3/4 [00:03<00:01,  1.08s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.05s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.06s/it]\n",
      "\n",
      "INFO 03-18 12:43:50 model_runner.py:1115] Loading model weights took 14.2487 GB\n",
      "INFO 03-18 12:43:51 worker.py:267] Memory profiling takes 1.34 seconds\n",
      "INFO 03-18 12:43:51 worker.py:267] the current vLLM instance can use total_gpu_memory (23.64GiB) x gpu_memory_utilization (0.90) = 21.28GiB\n",
      "INFO 03-18 12:43:51 worker.py:267] model weights take 14.25GiB; non_torch_memory takes 0.07GiB; PyTorch activation peak memory takes 1.44GiB; the rest of the memory reserved for KV Cache is 5.52GiB.\n",
      "INFO 03-18 12:43:52 executor_base.py:111] # cuda blocks: 6457, # CPU blocks: 4681\n",
      "INFO 03-18 12:43:52 executor_base.py:116] Maximum concurrency for 8192 tokens per request: 12.61x\n",
      "INFO 03-18 12:43:56 llm_engine.py:436] init engine (profile, create kv cache, warmup model) took 6.01 seconds\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Start time of running main: 2025-03-18 12:43:56.576356\n",
      "[INFO:swift] request_config: RequestConfig(max_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stop=[], seed=None, stream=False, logprobs=False, top_logprobs=None, n=1, best_of=None, presence_penalty=0.0, frequency_penalty=0.0, length_penalty=1.0)\n",
      "Generating train split: 36 examples [00:00, 10792.29 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-wmng9uug\n",
      "Map: 100%|█████████████████████████████| 36/36 [00:00<00:00, 5268.49 examples/s]\n",
      "[INFO:swift] val_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 36\n",
      "})\n",
      "100%|███████████████████████████████████████████| 36/36 [00:11<00:00,  3.04it/s]\n",
      "[rank-1] {'num_prompt_tokens': 6766, 'num_generated_tokens': 8573, 'num_samples': 36, 'runtime': 11.966993946000002, 'samples/s': 3.0082742719221556, 'tokens/s': 716.3870925885733}\n",
      "[INFO:swift] The inference results have been saved to result_path: `/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_2.jsonl`.\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:44:09.324767\n",
      "[rank0]:[W318 12:44:10.939360150 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present,  but this warning has only been added since PyTorch 2.4 (function operator())\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 30%|███       | 3/10 [02:24<05:35, 47.94s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/infer.py --model ../../public_data/Qwen2-7B-Instruct --merge_lora false --infer_backend vllm --model_revision master --torch_dtype bfloat16 --max_length 8192 --attn_impl flash_attn --max_new_tokens 1000 --temperature 0.1 --top_p 0.7 --tensor_parallel_size 1 --max_model_len 8192 --enforce_eager true --split_dataset_ratio 0 --repetition_penalty 1.05 --device_map auto --result_path data/submissions/test_preds_L4090-v012_process_response_3.jsonl --val_dataset data/feature/test_L4090-v012_process_response_3.jsonl`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Because len(args.val_dataset) > 0, setting split_dataset_ratio: 0.0\n",
      "[INFO:swift] Setting args.eval_human: False\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: InferArguments(model='../../public_data/Qwen2-7B-Instruct', model_type='qwen2', model_revision='master', task_type='causal_lm', torch_dtype=torch.bfloat16, attn_impl='flash_attn', num_labels=None, rope_scaling=None, device_map='auto', max_memory={}, local_repo_path=None, template='qwen', system=None, max_length=8192, truncation_strategy='delete', max_pixels=None, tools_prompt='react_en', norm_bbox=None, response_prefix=None, padding_side='right', loss_scale='default', sequence_parallel_size=1, use_chat_template=True, template_backend='swift', dataset=[], val_dataset=['data/feature/test_L4090-v012_process_response_3.jsonl'], split_dataset_ratio=0.0, data_seed=42, dataset_num_proc=1, streaming=False, enable_cache=False, download_mode='reuse_dataset_if_exists', columns={}, strict=False, remove_unused_columns=True, model_name=[None, None], model_author=[None, None], custom_dataset_info=[], quant_method=None, quant_bits=None, hqq_axis=None, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_quant_storage=None, max_new_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stream=False, stop_words=[], logprobs=False, top_logprobs=None, ckpt_dir=None, load_dataset_config=None, lora_modules=[], tuner_backend='peft', train_type='lora', adapters=[], external_plugins=[], seed=42, model_kwargs={}, load_args=True, load_data_args=False, use_hf=False, hub_token=None, custom_register_path=[], ignore_args_error=False, use_swift_lora=False, tp=1, session_len=None, cache_max_entry_count=0.8, quant_policy=0, vision_batch_size=1, gpu_memory_utilization=0.9, tensor_parallel_size=1, pipeline_parallel_size=1, max_num_seqs=256, max_model_len=8192, disable_custom_all_reduce=False, enforce_eager=True, limit_mm_per_prompt={}, vllm_max_lora_rank=16, enable_prefix_caching=False, merge_lora=False, safe_serialization=True, max_shard_size='5GB', infer_backend='vllm', result_path='/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_3.jsonl', metric=None, max_batch_size=1, ddp_backend=None, val_dataset_sample=None)\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "INFO 03-18 12:44:23 __init__.py:207] Automatically detected platform cuda.\n",
      "INFO 03-18 12:44:29 config.py:549] This model supports multiple tasks: {'classify', 'embed', 'generate', 'score', 'reward'}. Defaulting to 'generate'.\n",
      "WARNING 03-18 12:44:29 cuda.py:95] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
      "WARNING 03-18 12:44:29 config.py:685] Async output processing is not supported on the current platform type cuda.\n",
      "INFO 03-18 12:44:29 llm_engine.py:234] Initializing a V0 LLM engine (v0.7.3) with config: model='/data/xiaosa/public_data/Qwen2-7B-Instruct', speculative_config=None, tokenizer='/data/xiaosa/public_data/Qwen2-7B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/data/xiaosa/public_data/Qwen2-7B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={\"splitting_ops\":[],\"compile_sizes\":[],\"cudagraph_capture_sizes\":[],\"max_capture_size\":0}, use_cached_outputs=False, \n",
      "INFO 03-18 12:44:30 cuda.py:229] Using Flash Attention backend.\n",
      "INFO 03-18 12:44:31 model_runner.py:1110] Starting to load model /data/xiaosa/public_data/Qwen2-7B-Instruct...\n",
      "Loading safetensors checkpoint shards:   0% Completed | 0/4 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:  25% Completed | 1/4 [00:01<00:03,  1.01s/it]\n",
      "Loading safetensors checkpoint shards:  50% Completed | 2/4 [00:02<00:02,  1.07s/it]\n",
      "Loading safetensors checkpoint shards:  75% Completed | 3/4 [00:03<00:01,  1.08s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.05s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.06s/it]\n",
      "\n",
      "INFO 03-18 12:44:35 model_runner.py:1115] Loading model weights took 14.2487 GB\n",
      "INFO 03-18 12:44:37 worker.py:267] Memory profiling takes 1.34 seconds\n",
      "INFO 03-18 12:44:37 worker.py:267] the current vLLM instance can use total_gpu_memory (23.64GiB) x gpu_memory_utilization (0.90) = 21.28GiB\n",
      "INFO 03-18 12:44:37 worker.py:267] model weights take 14.25GiB; non_torch_memory takes 0.07GiB; PyTorch activation peak memory takes 1.44GiB; the rest of the memory reserved for KV Cache is 5.52GiB.\n",
      "INFO 03-18 12:44:37 executor_base.py:111] # cuda blocks: 6457, # CPU blocks: 4681\n",
      "INFO 03-18 12:44:37 executor_base.py:116] Maximum concurrency for 8192 tokens per request: 12.61x\n",
      "INFO 03-18 12:44:41 llm_engine.py:436] init engine (profile, create kv cache, warmup model) took 6.00 seconds\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Start time of running main: 2025-03-18 12:44:42.043385\n",
      "[INFO:swift] request_config: RequestConfig(max_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stop=[], seed=None, stream=False, logprobs=False, top_logprobs=None, n=1, best_of=None, presence_penalty=0.0, frequency_penalty=0.0, length_penalty=1.0)\n",
      "Generating train split: 36 examples [00:00, 8971.24 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-xswmixdp\n",
      "Map: 100%|█████████████████████████████| 36/36 [00:00<00:00, 7973.54 examples/s]\n",
      "[INFO:swift] val_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 36\n",
      "})\n",
      "100%|███████████████████████████████████████████| 36/36 [00:13<00:00,  2.60it/s]\n",
      "[rank-1] {'num_prompt_tokens': 6766, 'num_generated_tokens': 8608, 'num_samples': 36, 'runtime': 13.967459151001094, 'samples/s': 2.577419386790887, 'tokens/s': 616.2896133748876}\n",
      "[INFO:swift] The inference results have been saved to result_path: `/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_3.jsonl`.\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:44:57.865422\n",
      "[rank0]:[W318 12:44:58.449104229 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present,  but this warning has only been added since PyTorch 2.4 (function operator())\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 40%|████      | 4/10 [03:13<04:48, 48.16s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/infer.py --model ../../public_data/Qwen2-7B-Instruct --merge_lora false --infer_backend vllm --model_revision master --torch_dtype bfloat16 --max_length 8192 --attn_impl flash_attn --max_new_tokens 1000 --temperature 0.1 --top_p 0.7 --tensor_parallel_size 1 --max_model_len 8192 --enforce_eager true --split_dataset_ratio 0 --repetition_penalty 1.05 --device_map auto --result_path data/submissions/test_preds_L4090-v012_process_response_4.jsonl --val_dataset data/feature/test_L4090-v012_process_response_4.jsonl`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Because len(args.val_dataset) > 0, setting split_dataset_ratio: 0.0\n",
      "[INFO:swift] Setting args.eval_human: False\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: InferArguments(model='../../public_data/Qwen2-7B-Instruct', model_type='qwen2', model_revision='master', task_type='causal_lm', torch_dtype=torch.bfloat16, attn_impl='flash_attn', num_labels=None, rope_scaling=None, device_map='auto', max_memory={}, local_repo_path=None, template='qwen', system=None, max_length=8192, truncation_strategy='delete', max_pixels=None, tools_prompt='react_en', norm_bbox=None, response_prefix=None, padding_side='right', loss_scale='default', sequence_parallel_size=1, use_chat_template=True, template_backend='swift', dataset=[], val_dataset=['data/feature/test_L4090-v012_process_response_4.jsonl'], split_dataset_ratio=0.0, data_seed=42, dataset_num_proc=1, streaming=False, enable_cache=False, download_mode='reuse_dataset_if_exists', columns={}, strict=False, remove_unused_columns=True, model_name=[None, None], model_author=[None, None], custom_dataset_info=[], quant_method=None, quant_bits=None, hqq_axis=None, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_quant_storage=None, max_new_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stream=False, stop_words=[], logprobs=False, top_logprobs=None, ckpt_dir=None, load_dataset_config=None, lora_modules=[], tuner_backend='peft', train_type='lora', adapters=[], external_plugins=[], seed=42, model_kwargs={}, load_args=True, load_data_args=False, use_hf=False, hub_token=None, custom_register_path=[], ignore_args_error=False, use_swift_lora=False, tp=1, session_len=None, cache_max_entry_count=0.8, quant_policy=0, vision_batch_size=1, gpu_memory_utilization=0.9, tensor_parallel_size=1, pipeline_parallel_size=1, max_num_seqs=256, max_model_len=8192, disable_custom_all_reduce=False, enforce_eager=True, limit_mm_per_prompt={}, vllm_max_lora_rank=16, enable_prefix_caching=False, merge_lora=False, safe_serialization=True, max_shard_size='5GB', infer_backend='vllm', result_path='/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_4.jsonl', metric=None, max_batch_size=1, ddp_backend=None, val_dataset_sample=None)\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "INFO 03-18 12:45:11 __init__.py:207] Automatically detected platform cuda.\n",
      "INFO 03-18 12:45:18 config.py:549] This model supports multiple tasks: {'generate', 'classify', 'embed', 'score', 'reward'}. Defaulting to 'generate'.\n",
      "WARNING 03-18 12:45:18 cuda.py:95] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
      "WARNING 03-18 12:45:18 config.py:685] Async output processing is not supported on the current platform type cuda.\n",
      "INFO 03-18 12:45:18 llm_engine.py:234] Initializing a V0 LLM engine (v0.7.3) with config: model='/data/xiaosa/public_data/Qwen2-7B-Instruct', speculative_config=None, tokenizer='/data/xiaosa/public_data/Qwen2-7B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/data/xiaosa/public_data/Qwen2-7B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={\"splitting_ops\":[],\"compile_sizes\":[],\"cudagraph_capture_sizes\":[],\"max_capture_size\":0}, use_cached_outputs=False, \n",
      "INFO 03-18 12:45:19 cuda.py:229] Using Flash Attention backend.\n",
      "INFO 03-18 12:45:19 model_runner.py:1110] Starting to load model /data/xiaosa/public_data/Qwen2-7B-Instruct...\n",
      "Loading safetensors checkpoint shards:   0% Completed | 0/4 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:  25% Completed | 1/4 [00:01<00:03,  1.01s/it]\n",
      "Loading safetensors checkpoint shards:  50% Completed | 2/4 [00:02<00:02,  1.07s/it]\n",
      "Loading safetensors checkpoint shards:  75% Completed | 3/4 [00:03<00:01,  1.08s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.05s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.06s/it]\n",
      "\n",
      "INFO 03-18 12:45:24 model_runner.py:1115] Loading model weights took 14.2487 GB\n",
      "INFO 03-18 12:45:25 worker.py:267] Memory profiling takes 1.34 seconds\n",
      "INFO 03-18 12:45:25 worker.py:267] the current vLLM instance can use total_gpu_memory (23.64GiB) x gpu_memory_utilization (0.90) = 21.28GiB\n",
      "INFO 03-18 12:45:25 worker.py:267] model weights take 14.25GiB; non_torch_memory takes 0.07GiB; PyTorch activation peak memory takes 1.44GiB; the rest of the memory reserved for KV Cache is 5.52GiB.\n",
      "INFO 03-18 12:45:26 executor_base.py:111] # cuda blocks: 6457, # CPU blocks: 4681\n",
      "INFO 03-18 12:45:26 executor_base.py:116] Maximum concurrency for 8192 tokens per request: 12.61x\n",
      "INFO 03-18 12:45:30 llm_engine.py:436] init engine (profile, create kv cache, warmup model) took 6.01 seconds\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Start time of running main: 2025-03-18 12:45:30.537002\n",
      "[INFO:swift] request_config: RequestConfig(max_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stop=[], seed=None, stream=False, logprobs=False, top_logprobs=None, n=1, best_of=None, presence_penalty=0.0, frequency_penalty=0.0, length_penalty=1.0)\n",
      "Generating train split: 36 examples [00:00, 8649.54 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-81ggtyx1\n",
      "Map: 100%|█████████████████████████████| 36/36 [00:00<00:00, 7815.47 examples/s]\n",
      "[INFO:swift] val_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 36\n",
      "})\n",
      "100%|███████████████████████████████████████████| 36/36 [00:13<00:00,  2.64it/s]\n",
      "[rank-1] {'num_prompt_tokens': 6766, 'num_generated_tokens': 8799, 'num_samples': 36, 'runtime': 13.759590314000889, 'samples/s': 2.6163569683734464, 'tokens/s': 639.4812490199431}\n",
      "[INFO:swift] The inference results have been saved to result_path: `/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_4.jsonl`.\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:45:45.645857\n",
      "[rank0]:[W318 12:45:46.229982749 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present,  but this warning has only been added since PyTorch 2.4 (function operator())\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 50%|█████     | 5/10 [04:01<04:00, 48.03s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/infer.py --model ../../public_data/Qwen2-7B-Instruct --merge_lora false --infer_backend vllm --model_revision master --torch_dtype bfloat16 --max_length 8192 --attn_impl flash_attn --max_new_tokens 1000 --temperature 0.1 --top_p 0.7 --tensor_parallel_size 1 --max_model_len 8192 --enforce_eager true --split_dataset_ratio 0 --repetition_penalty 1.05 --device_map auto --result_path data/submissions/test_preds_L4090-v012_process_response_5.jsonl --val_dataset data/feature/test_L4090-v012_process_response_5.jsonl`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Because len(args.val_dataset) > 0, setting split_dataset_ratio: 0.0\n",
      "[INFO:swift] Setting args.eval_human: False\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: InferArguments(model='../../public_data/Qwen2-7B-Instruct', model_type='qwen2', model_revision='master', task_type='causal_lm', torch_dtype=torch.bfloat16, attn_impl='flash_attn', num_labels=None, rope_scaling=None, device_map='auto', max_memory={}, local_repo_path=None, template='qwen', system=None, max_length=8192, truncation_strategy='delete', max_pixels=None, tools_prompt='react_en', norm_bbox=None, response_prefix=None, padding_side='right', loss_scale='default', sequence_parallel_size=1, use_chat_template=True, template_backend='swift', dataset=[], val_dataset=['data/feature/test_L4090-v012_process_response_5.jsonl'], split_dataset_ratio=0.0, data_seed=42, dataset_num_proc=1, streaming=False, enable_cache=False, download_mode='reuse_dataset_if_exists', columns={}, strict=False, remove_unused_columns=True, model_name=[None, None], model_author=[None, None], custom_dataset_info=[], quant_method=None, quant_bits=None, hqq_axis=None, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_quant_storage=None, max_new_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stream=False, stop_words=[], logprobs=False, top_logprobs=None, ckpt_dir=None, load_dataset_config=None, lora_modules=[], tuner_backend='peft', train_type='lora', adapters=[], external_plugins=[], seed=42, model_kwargs={}, load_args=True, load_data_args=False, use_hf=False, hub_token=None, custom_register_path=[], ignore_args_error=False, use_swift_lora=False, tp=1, session_len=None, cache_max_entry_count=0.8, quant_policy=0, vision_batch_size=1, gpu_memory_utilization=0.9, tensor_parallel_size=1, pipeline_parallel_size=1, max_num_seqs=256, max_model_len=8192, disable_custom_all_reduce=False, enforce_eager=True, limit_mm_per_prompt={}, vllm_max_lora_rank=16, enable_prefix_caching=False, merge_lora=False, safe_serialization=True, max_shard_size='5GB', infer_backend='vllm', result_path='/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_5.jsonl', metric=None, max_batch_size=1, ddp_backend=None, val_dataset_sample=None)\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "INFO 03-18 12:45:59 __init__.py:207] Automatically detected platform cuda.\n",
      "INFO 03-18 12:46:05 config.py:549] This model supports multiple tasks: {'classify', 'reward', 'embed', 'generate', 'score'}. Defaulting to 'generate'.\n",
      "WARNING 03-18 12:46:05 cuda.py:95] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
      "WARNING 03-18 12:46:05 config.py:685] Async output processing is not supported on the current platform type cuda.\n",
      "INFO 03-18 12:46:05 llm_engine.py:234] Initializing a V0 LLM engine (v0.7.3) with config: model='/data/xiaosa/public_data/Qwen2-7B-Instruct', speculative_config=None, tokenizer='/data/xiaosa/public_data/Qwen2-7B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/data/xiaosa/public_data/Qwen2-7B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={\"splitting_ops\":[],\"compile_sizes\":[],\"cudagraph_capture_sizes\":[],\"max_capture_size\":0}, use_cached_outputs=False, \n",
      "INFO 03-18 12:46:07 cuda.py:229] Using Flash Attention backend.\n",
      "INFO 03-18 12:46:07 model_runner.py:1110] Starting to load model /data/xiaosa/public_data/Qwen2-7B-Instruct...\n",
      "Loading safetensors checkpoint shards:   0% Completed | 0/4 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:  25% Completed | 1/4 [00:01<00:03,  1.01s/it]\n",
      "Loading safetensors checkpoint shards:  50% Completed | 2/4 [00:02<00:02,  1.07s/it]\n",
      "Loading safetensors checkpoint shards:  75% Completed | 3/4 [00:03<00:01,  1.08s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.05s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.06s/it]\n",
      "\n",
      "INFO 03-18 12:46:12 model_runner.py:1115] Loading model weights took 14.2487 GB\n",
      "INFO 03-18 12:46:13 worker.py:267] Memory profiling takes 1.34 seconds\n",
      "INFO 03-18 12:46:13 worker.py:267] the current vLLM instance can use total_gpu_memory (23.64GiB) x gpu_memory_utilization (0.90) = 21.28GiB\n",
      "INFO 03-18 12:46:13 worker.py:267] model weights take 14.25GiB; non_torch_memory takes 0.07GiB; PyTorch activation peak memory takes 1.44GiB; the rest of the memory reserved for KV Cache is 5.52GiB.\n",
      "INFO 03-18 12:46:13 executor_base.py:111] # cuda blocks: 6457, # CPU blocks: 4681\n",
      "INFO 03-18 12:46:13 executor_base.py:116] Maximum concurrency for 8192 tokens per request: 12.61x\n",
      "INFO 03-18 12:46:18 llm_engine.py:436] init engine (profile, create kv cache, warmup model) took 6.01 seconds\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Start time of running main: 2025-03-18 12:46:18.342225\n",
      "[INFO:swift] request_config: RequestConfig(max_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stop=[], seed=None, stream=False, logprobs=False, top_logprobs=None, n=1, best_of=None, presence_penalty=0.0, frequency_penalty=0.0, length_penalty=1.0)\n",
      "Generating train split: 36 examples [00:00, 8478.58 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-3sc8mtfr\n",
      "Map: 100%|█████████████████████████████| 36/36 [00:00<00:00, 7737.78 examples/s]\n",
      "[INFO:swift] val_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 36\n",
      "})\n",
      "100%|███████████████████████████████████████████| 36/36 [00:13<00:00,  2.63it/s]\n",
      "[rank-1] {'num_prompt_tokens': 6766, 'num_generated_tokens': 8758, 'num_samples': 36, 'runtime': 13.828785561998302, 'samples/s': 2.603265473934928, 'tokens/s': 633.3166394645028}\n",
      "[INFO:swift] The inference results have been saved to result_path: `/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_5.jsonl`.\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:46:33.927724\n",
      "[rank0]:[W318 12:46:34.508286425 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present,  but this warning has only been added since PyTorch 2.4 (function operator())\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 60%|██████    | 6/10 [04:49<03:12, 48.11s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/infer.py --model ../../public_data/Qwen2-7B-Instruct --merge_lora false --infer_backend vllm --model_revision master --torch_dtype bfloat16 --max_length 8192 --attn_impl flash_attn --max_new_tokens 1000 --temperature 0.1 --top_p 0.7 --tensor_parallel_size 1 --max_model_len 8192 --enforce_eager true --split_dataset_ratio 0 --repetition_penalty 1.05 --device_map auto --result_path data/submissions/test_preds_L4090-v012_process_response_6.jsonl --val_dataset data/feature/test_L4090-v012_process_response_6.jsonl`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Because len(args.val_dataset) > 0, setting split_dataset_ratio: 0.0\n",
      "[INFO:swift] Setting args.eval_human: False\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: InferArguments(model='../../public_data/Qwen2-7B-Instruct', model_type='qwen2', model_revision='master', task_type='causal_lm', torch_dtype=torch.bfloat16, attn_impl='flash_attn', num_labels=None, rope_scaling=None, device_map='auto', max_memory={}, local_repo_path=None, template='qwen', system=None, max_length=8192, truncation_strategy='delete', max_pixels=None, tools_prompt='react_en', norm_bbox=None, response_prefix=None, padding_side='right', loss_scale='default', sequence_parallel_size=1, use_chat_template=True, template_backend='swift', dataset=[], val_dataset=['data/feature/test_L4090-v012_process_response_6.jsonl'], split_dataset_ratio=0.0, data_seed=42, dataset_num_proc=1, streaming=False, enable_cache=False, download_mode='reuse_dataset_if_exists', columns={}, strict=False, remove_unused_columns=True, model_name=[None, None], model_author=[None, None], custom_dataset_info=[], quant_method=None, quant_bits=None, hqq_axis=None, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_quant_storage=None, max_new_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stream=False, stop_words=[], logprobs=False, top_logprobs=None, ckpt_dir=None, load_dataset_config=None, lora_modules=[], tuner_backend='peft', train_type='lora', adapters=[], external_plugins=[], seed=42, model_kwargs={}, load_args=True, load_data_args=False, use_hf=False, hub_token=None, custom_register_path=[], ignore_args_error=False, use_swift_lora=False, tp=1, session_len=None, cache_max_entry_count=0.8, quant_policy=0, vision_batch_size=1, gpu_memory_utilization=0.9, tensor_parallel_size=1, pipeline_parallel_size=1, max_num_seqs=256, max_model_len=8192, disable_custom_all_reduce=False, enforce_eager=True, limit_mm_per_prompt={}, vllm_max_lora_rank=16, enable_prefix_caching=False, merge_lora=False, safe_serialization=True, max_shard_size='5GB', infer_backend='vllm', result_path='/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_6.jsonl', metric=None, max_batch_size=1, ddp_backend=None, val_dataset_sample=None)\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "INFO 03-18 12:46:47 __init__.py:207] Automatically detected platform cuda.\n",
      "INFO 03-18 12:46:54 config.py:549] This model supports multiple tasks: {'classify', 'embed', 'reward', 'score', 'generate'}. Defaulting to 'generate'.\n",
      "WARNING 03-18 12:46:54 cuda.py:95] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
      "WARNING 03-18 12:46:54 config.py:685] Async output processing is not supported on the current platform type cuda.\n",
      "INFO 03-18 12:46:54 llm_engine.py:234] Initializing a V0 LLM engine (v0.7.3) with config: model='/data/xiaosa/public_data/Qwen2-7B-Instruct', speculative_config=None, tokenizer='/data/xiaosa/public_data/Qwen2-7B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/data/xiaosa/public_data/Qwen2-7B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={\"splitting_ops\":[],\"compile_sizes\":[],\"cudagraph_capture_sizes\":[],\"max_capture_size\":0}, use_cached_outputs=False, \n",
      "INFO 03-18 12:46:55 cuda.py:229] Using Flash Attention backend.\n",
      "INFO 03-18 12:46:55 model_runner.py:1110] Starting to load model /data/xiaosa/public_data/Qwen2-7B-Instruct...\n",
      "Loading safetensors checkpoint shards:   0% Completed | 0/4 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:  25% Completed | 1/4 [00:01<00:03,  1.01s/it]\n",
      "Loading safetensors checkpoint shards:  50% Completed | 2/4 [00:02<00:02,  1.07s/it]\n",
      "Loading safetensors checkpoint shards:  75% Completed | 3/4 [00:03<00:01,  1.09s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.06s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.06s/it]\n",
      "\n",
      "INFO 03-18 12:47:00 model_runner.py:1115] Loading model weights took 14.2487 GB\n",
      "INFO 03-18 12:47:02 worker.py:267] Memory profiling takes 1.34 seconds\n",
      "INFO 03-18 12:47:02 worker.py:267] the current vLLM instance can use total_gpu_memory (23.64GiB) x gpu_memory_utilization (0.90) = 21.28GiB\n",
      "INFO 03-18 12:47:02 worker.py:267] model weights take 14.25GiB; non_torch_memory takes 0.07GiB; PyTorch activation peak memory takes 1.44GiB; the rest of the memory reserved for KV Cache is 5.52GiB.\n",
      "INFO 03-18 12:47:02 executor_base.py:111] # cuda blocks: 6457, # CPU blocks: 4681\n",
      "INFO 03-18 12:47:02 executor_base.py:116] Maximum concurrency for 8192 tokens per request: 12.61x\n",
      "INFO 03-18 12:47:06 llm_engine.py:436] init engine (profile, create kv cache, warmup model) took 6.00 seconds\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Start time of running main: 2025-03-18 12:47:06.604850\n",
      "[INFO:swift] request_config: RequestConfig(max_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stop=[], seed=None, stream=False, logprobs=False, top_logprobs=None, n=1, best_of=None, presence_penalty=0.0, frequency_penalty=0.0, length_penalty=1.0)\n",
      "Generating train split: 36 examples [00:00, 10769.20 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-nihqflwd\n",
      "Map: 100%|█████████████████████████████| 36/36 [00:00<00:00, 5383.64 examples/s]\n",
      "[INFO:swift] val_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 36\n",
      "})\n",
      "100%|███████████████████████████████████████████| 36/36 [00:14<00:00,  2.54it/s]\n",
      "[rank-1] {'num_prompt_tokens': 6766, 'num_generated_tokens': 8928, 'num_samples': 36, 'runtime': 14.284465586000806, 'samples/s': 2.520220289884772, 'tokens/s': 625.0146318914234}\n",
      "[INFO:swift] The inference results have been saved to result_path: `/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_6.jsonl`.\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:47:21.721044\n",
      "[rank0]:[W318 12:47:22.329278764 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present,  but this warning has only been added since PyTorch 2.4 (function operator())\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 70%|███████   | 7/10 [05:37<02:24, 48.02s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/infer.py --model ../../public_data/Qwen2-7B-Instruct --merge_lora false --infer_backend vllm --model_revision master --torch_dtype bfloat16 --max_length 8192 --attn_impl flash_attn --max_new_tokens 1000 --temperature 0.1 --top_p 0.7 --tensor_parallel_size 1 --max_model_len 8192 --enforce_eager true --split_dataset_ratio 0 --repetition_penalty 1.05 --device_map auto --result_path data/submissions/test_preds_L4090-v012_process_response_7.jsonl --val_dataset data/feature/test_L4090-v012_process_response_7.jsonl`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Because len(args.val_dataset) > 0, setting split_dataset_ratio: 0.0\n",
      "[INFO:swift] Setting args.eval_human: False\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: InferArguments(model='../../public_data/Qwen2-7B-Instruct', model_type='qwen2', model_revision='master', task_type='causal_lm', torch_dtype=torch.bfloat16, attn_impl='flash_attn', num_labels=None, rope_scaling=None, device_map='auto', max_memory={}, local_repo_path=None, template='qwen', system=None, max_length=8192, truncation_strategy='delete', max_pixels=None, tools_prompt='react_en', norm_bbox=None, response_prefix=None, padding_side='right', loss_scale='default', sequence_parallel_size=1, use_chat_template=True, template_backend='swift', dataset=[], val_dataset=['data/feature/test_L4090-v012_process_response_7.jsonl'], split_dataset_ratio=0.0, data_seed=42, dataset_num_proc=1, streaming=False, enable_cache=False, download_mode='reuse_dataset_if_exists', columns={}, strict=False, remove_unused_columns=True, model_name=[None, None], model_author=[None, None], custom_dataset_info=[], quant_method=None, quant_bits=None, hqq_axis=None, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_quant_storage=None, max_new_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stream=False, stop_words=[], logprobs=False, top_logprobs=None, ckpt_dir=None, load_dataset_config=None, lora_modules=[], tuner_backend='peft', train_type='lora', adapters=[], external_plugins=[], seed=42, model_kwargs={}, load_args=True, load_data_args=False, use_hf=False, hub_token=None, custom_register_path=[], ignore_args_error=False, use_swift_lora=False, tp=1, session_len=None, cache_max_entry_count=0.8, quant_policy=0, vision_batch_size=1, gpu_memory_utilization=0.9, tensor_parallel_size=1, pipeline_parallel_size=1, max_num_seqs=256, max_model_len=8192, disable_custom_all_reduce=False, enforce_eager=True, limit_mm_per_prompt={}, vllm_max_lora_rank=16, enable_prefix_caching=False, merge_lora=False, safe_serialization=True, max_shard_size='5GB', infer_backend='vllm', result_path='/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_7.jsonl', metric=None, max_batch_size=1, ddp_backend=None, val_dataset_sample=None)\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "INFO 03-18 12:47:35 __init__.py:207] Automatically detected platform cuda.\n",
      "INFO 03-18 12:47:42 config.py:549] This model supports multiple tasks: {'generate', 'classify', 'reward', 'score', 'embed'}. Defaulting to 'generate'.\n",
      "WARNING 03-18 12:47:42 cuda.py:95] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
      "WARNING 03-18 12:47:42 config.py:685] Async output processing is not supported on the current platform type cuda.\n",
      "INFO 03-18 12:47:42 llm_engine.py:234] Initializing a V0 LLM engine (v0.7.3) with config: model='/data/xiaosa/public_data/Qwen2-7B-Instruct', speculative_config=None, tokenizer='/data/xiaosa/public_data/Qwen2-7B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/data/xiaosa/public_data/Qwen2-7B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={\"splitting_ops\":[],\"compile_sizes\":[],\"cudagraph_capture_sizes\":[],\"max_capture_size\":0}, use_cached_outputs=False, \n",
      "INFO 03-18 12:47:43 cuda.py:229] Using Flash Attention backend.\n",
      "INFO 03-18 12:47:43 model_runner.py:1110] Starting to load model /data/xiaosa/public_data/Qwen2-7B-Instruct...\n",
      "Loading safetensors checkpoint shards:   0% Completed | 0/4 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:  25% Completed | 1/4 [00:01<00:03,  1.01s/it]\n",
      "Loading safetensors checkpoint shards:  50% Completed | 2/4 [00:02<00:02,  1.07s/it]\n",
      "Loading safetensors checkpoint shards:  75% Completed | 3/4 [00:03<00:01,  1.08s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.05s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.06s/it]\n",
      "\n",
      "INFO 03-18 12:47:48 model_runner.py:1115] Loading model weights took 14.2487 GB\n",
      "INFO 03-18 12:47:49 worker.py:267] Memory profiling takes 1.33 seconds\n",
      "INFO 03-18 12:47:49 worker.py:267] the current vLLM instance can use total_gpu_memory (23.64GiB) x gpu_memory_utilization (0.90) = 21.28GiB\n",
      "INFO 03-18 12:47:49 worker.py:267] model weights take 14.25GiB; non_torch_memory takes 0.07GiB; PyTorch activation peak memory takes 1.44GiB; the rest of the memory reserved for KV Cache is 5.52GiB.\n",
      "INFO 03-18 12:47:50 executor_base.py:111] # cuda blocks: 6457, # CPU blocks: 4681\n",
      "INFO 03-18 12:47:50 executor_base.py:116] Maximum concurrency for 8192 tokens per request: 12.61x\n",
      "INFO 03-18 12:47:54 llm_engine.py:436] init engine (profile, create kv cache, warmup model) took 6.00 seconds\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Start time of running main: 2025-03-18 12:47:54.436237\n",
      "[INFO:swift] request_config: RequestConfig(max_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stop=[], seed=None, stream=False, logprobs=False, top_logprobs=None, n=1, best_of=None, presence_penalty=0.0, frequency_penalty=0.0, length_penalty=1.0)\n",
      "Generating train split: 36 examples [00:00, 10967.89 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-1ty1cnl6\n",
      "Map: 100%|█████████████████████████████| 36/36 [00:00<00:00, 5334.76 examples/s]\n",
      "[INFO:swift] val_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 36\n",
      "})\n",
      "100%|███████████████████████████████████████████| 36/36 [00:13<00:00,  2.64it/s]\n",
      "[rank-1] {'num_prompt_tokens': 6766, 'num_generated_tokens': 8621, 'num_samples': 36, 'runtime': 13.758184360000087, 'samples/s': 2.6166243348697047, 'tokens/s': 626.6088441919924}\n",
      "[INFO:swift] The inference results have been saved to result_path: `/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_7.jsonl`.\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:48:08.960285\n",
      "[rank0]:[W318 12:48:09.573212849 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present,  but this warning has only been added since PyTorch 2.4 (function operator())\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 80%|████████  | 8/10 [06:24<01:35, 47.78s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/infer.py --model ../../public_data/Qwen2-7B-Instruct --merge_lora false --infer_backend vllm --model_revision master --torch_dtype bfloat16 --max_length 8192 --attn_impl flash_attn --max_new_tokens 1000 --temperature 0.1 --top_p 0.7 --tensor_parallel_size 1 --max_model_len 8192 --enforce_eager true --split_dataset_ratio 0 --repetition_penalty 1.05 --device_map auto --result_path data/submissions/test_preds_L4090-v012_process_response_8.jsonl --val_dataset data/feature/test_L4090-v012_process_response_8.jsonl`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Because len(args.val_dataset) > 0, setting split_dataset_ratio: 0.0\n",
      "[INFO:swift] Setting args.eval_human: False\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: InferArguments(model='../../public_data/Qwen2-7B-Instruct', model_type='qwen2', model_revision='master', task_type='causal_lm', torch_dtype=torch.bfloat16, attn_impl='flash_attn', num_labels=None, rope_scaling=None, device_map='auto', max_memory={}, local_repo_path=None, template='qwen', system=None, max_length=8192, truncation_strategy='delete', max_pixels=None, tools_prompt='react_en', norm_bbox=None, response_prefix=None, padding_side='right', loss_scale='default', sequence_parallel_size=1, use_chat_template=True, template_backend='swift', dataset=[], val_dataset=['data/feature/test_L4090-v012_process_response_8.jsonl'], split_dataset_ratio=0.0, data_seed=42, dataset_num_proc=1, streaming=False, enable_cache=False, download_mode='reuse_dataset_if_exists', columns={}, strict=False, remove_unused_columns=True, model_name=[None, None], model_author=[None, None], custom_dataset_info=[], quant_method=None, quant_bits=None, hqq_axis=None, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_quant_storage=None, max_new_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stream=False, stop_words=[], logprobs=False, top_logprobs=None, ckpt_dir=None, load_dataset_config=None, lora_modules=[], tuner_backend='peft', train_type='lora', adapters=[], external_plugins=[], seed=42, model_kwargs={}, load_args=True, load_data_args=False, use_hf=False, hub_token=None, custom_register_path=[], ignore_args_error=False, use_swift_lora=False, tp=1, session_len=None, cache_max_entry_count=0.8, quant_policy=0, vision_batch_size=1, gpu_memory_utilization=0.9, tensor_parallel_size=1, pipeline_parallel_size=1, max_num_seqs=256, max_model_len=8192, disable_custom_all_reduce=False, enforce_eager=True, limit_mm_per_prompt={}, vllm_max_lora_rank=16, enable_prefix_caching=False, merge_lora=False, safe_serialization=True, max_shard_size='5GB', infer_backend='vllm', result_path='/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_8.jsonl', metric=None, max_batch_size=1, ddp_backend=None, val_dataset_sample=None)\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "INFO 03-18 12:48:22 __init__.py:207] Automatically detected platform cuda.\n",
      "INFO 03-18 12:48:29 config.py:549] This model supports multiple tasks: {'score', 'generate', 'classify', 'embed', 'reward'}. Defaulting to 'generate'.\n",
      "WARNING 03-18 12:48:29 cuda.py:95] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
      "WARNING 03-18 12:48:29 config.py:685] Async output processing is not supported on the current platform type cuda.\n",
      "INFO 03-18 12:48:29 llm_engine.py:234] Initializing a V0 LLM engine (v0.7.3) with config: model='/data/xiaosa/public_data/Qwen2-7B-Instruct', speculative_config=None, tokenizer='/data/xiaosa/public_data/Qwen2-7B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/data/xiaosa/public_data/Qwen2-7B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={\"splitting_ops\":[],\"compile_sizes\":[],\"cudagraph_capture_sizes\":[],\"max_capture_size\":0}, use_cached_outputs=False, \n",
      "INFO 03-18 12:48:30 cuda.py:229] Using Flash Attention backend.\n",
      "INFO 03-18 12:48:30 model_runner.py:1110] Starting to load model /data/xiaosa/public_data/Qwen2-7B-Instruct...\n",
      "Loading safetensors checkpoint shards:   0% Completed | 0/4 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:  25% Completed | 1/4 [00:01<00:03,  1.01s/it]\n",
      "Loading safetensors checkpoint shards:  50% Completed | 2/4 [00:02<00:02,  1.07s/it]\n",
      "Loading safetensors checkpoint shards:  75% Completed | 3/4 [00:03<00:01,  1.08s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.05s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.06s/it]\n",
      "\n",
      "INFO 03-18 12:48:35 model_runner.py:1115] Loading model weights took 14.2487 GB\n",
      "INFO 03-18 12:48:37 worker.py:267] Memory profiling takes 1.34 seconds\n",
      "INFO 03-18 12:48:37 worker.py:267] the current vLLM instance can use total_gpu_memory (23.64GiB) x gpu_memory_utilization (0.90) = 21.28GiB\n",
      "INFO 03-18 12:48:37 worker.py:267] model weights take 14.25GiB; non_torch_memory takes 0.07GiB; PyTorch activation peak memory takes 1.44GiB; the rest of the memory reserved for KV Cache is 5.52GiB.\n",
      "INFO 03-18 12:48:37 executor_base.py:111] # cuda blocks: 6457, # CPU blocks: 4681\n",
      "INFO 03-18 12:48:37 executor_base.py:116] Maximum concurrency for 8192 tokens per request: 12.61x\n",
      "INFO 03-18 12:48:41 llm_engine.py:436] init engine (profile, create kv cache, warmup model) took 6.01 seconds\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Start time of running main: 2025-03-18 12:48:41.733082\n",
      "[INFO:swift] request_config: RequestConfig(max_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stop=[], seed=None, stream=False, logprobs=False, top_logprobs=None, n=1, best_of=None, presence_penalty=0.0, frequency_penalty=0.0, length_penalty=1.0)\n",
      "Generating train split: 36 examples [00:00, 10518.63 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-0pme50bx\n",
      "Map: 100%|█████████████████████████████| 36/36 [00:00<00:00, 5335.32 examples/s]\n",
      "[INFO:swift] val_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 36\n",
      "})\n",
      "100%|███████████████████████████████████████████| 36/36 [00:13<00:00,  2.64it/s]\n",
      "[rank-1] {'num_prompt_tokens': 6766, 'num_generated_tokens': 8805, 'num_samples': 36, 'runtime': 13.747324631000083, 'samples/s': 2.618691342955585, 'tokens/s': 640.4882576312202}\n",
      "[INFO:swift] The inference results have been saved to result_path: `/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_8.jsonl`.\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:48:56.233491\n",
      "[rank0]:[W318 12:48:56.848422940 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present,  but this warning has only been added since PyTorch 2.4 (function operator())\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 90%|█████████ | 9/10 [07:11<00:47, 47.62s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "run sh: `/home/ubuntu/anaconda3/bin/python /home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/cli/infer.py --model ../../public_data/Qwen2-7B-Instruct --merge_lora false --infer_backend vllm --model_revision master --torch_dtype bfloat16 --max_length 8192 --attn_impl flash_attn --max_new_tokens 1000 --temperature 0.1 --top_p 0.7 --tensor_parallel_size 1 --max_model_len 8192 --enforce_eager true --split_dataset_ratio 0 --repetition_penalty 1.05 --device_map auto --result_path data/submissions/test_preds_L4090-v012_process_response_9.jsonl --val_dataset data/feature/test_L4090-v012_process_response_9.jsonl`\n",
      "[INFO:swift] Successfully registered `/home/ubuntu/anaconda3/lib/python3.10/site-packages/swift/llm/dataset/data/dataset_info.json`.\n",
      "[INFO:swift] rank: -1, local_rank: -1, world_size: 1, local_world_size: 1\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "[INFO:swift] Because len(args.val_dataset) > 0, setting split_dataset_ratio: 0.0\n",
      "[INFO:swift] Setting args.eval_human: False\n",
      "[INFO:swift] Global seed set to 42\n",
      "[INFO:swift] args: InferArguments(model='../../public_data/Qwen2-7B-Instruct', model_type='qwen2', model_revision='master', task_type='causal_lm', torch_dtype=torch.bfloat16, attn_impl='flash_attn', num_labels=None, rope_scaling=None, device_map='auto', max_memory={}, local_repo_path=None, template='qwen', system=None, max_length=8192, truncation_strategy='delete', max_pixels=None, tools_prompt='react_en', norm_bbox=None, response_prefix=None, padding_side='right', loss_scale='default', sequence_parallel_size=1, use_chat_template=True, template_backend='swift', dataset=[], val_dataset=['data/feature/test_L4090-v012_process_response_9.jsonl'], split_dataset_ratio=0.0, data_seed=42, dataset_num_proc=1, streaming=False, enable_cache=False, download_mode='reuse_dataset_if_exists', columns={}, strict=False, remove_unused_columns=True, model_name=[None, None], model_author=[None, None], custom_dataset_info=[], quant_method=None, quant_bits=None, hqq_axis=None, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_quant_storage=None, max_new_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stream=False, stop_words=[], logprobs=False, top_logprobs=None, ckpt_dir=None, load_dataset_config=None, lora_modules=[], tuner_backend='peft', train_type='lora', adapters=[], external_plugins=[], seed=42, model_kwargs={}, load_args=True, load_data_args=False, use_hf=False, hub_token=None, custom_register_path=[], ignore_args_error=False, use_swift_lora=False, tp=1, session_len=None, cache_max_entry_count=0.8, quant_policy=0, vision_batch_size=1, gpu_memory_utilization=0.9, tensor_parallel_size=1, pipeline_parallel_size=1, max_num_seqs=256, max_model_len=8192, disable_custom_all_reduce=False, enforce_eager=True, limit_mm_per_prompt={}, vllm_max_lora_rank=16, enable_prefix_caching=False, merge_lora=False, safe_serialization=True, max_shard_size='5GB', infer_backend='vllm', result_path='/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_9.jsonl', metric=None, max_batch_size=1, ddp_backend=None, val_dataset_sample=None)\n",
      "[INFO:swift] Loading the model using model_dir: ../../public_data/Qwen2-7B-Instruct\n",
      "INFO 03-18 12:49:10 __init__.py:207] Automatically detected platform cuda.\n",
      "INFO 03-18 12:49:16 config.py:549] This model supports multiple tasks: {'generate', 'score', 'embed', 'reward', 'classify'}. Defaulting to 'generate'.\n",
      "WARNING 03-18 12:49:16 cuda.py:95] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
      "WARNING 03-18 12:49:16 config.py:685] Async output processing is not supported on the current platform type cuda.\n",
      "INFO 03-18 12:49:16 llm_engine.py:234] Initializing a V0 LLM engine (v0.7.3) with config: model='/data/xiaosa/public_data/Qwen2-7B-Instruct', speculative_config=None, tokenizer='/data/xiaosa/public_data/Qwen2-7B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/data/xiaosa/public_data/Qwen2-7B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={\"splitting_ops\":[],\"compile_sizes\":[],\"cudagraph_capture_sizes\":[],\"max_capture_size\":0}, use_cached_outputs=False, \n",
      "INFO 03-18 12:49:17 cuda.py:229] Using Flash Attention backend.\n",
      "INFO 03-18 12:49:18 model_runner.py:1110] Starting to load model /data/xiaosa/public_data/Qwen2-7B-Instruct...\n",
      "Loading safetensors checkpoint shards:   0% Completed | 0/4 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:  25% Completed | 1/4 [00:01<00:03,  1.01s/it]\n",
      "Loading safetensors checkpoint shards:  50% Completed | 2/4 [00:02<00:02,  1.07s/it]\n",
      "Loading safetensors checkpoint shards:  75% Completed | 3/4 [00:03<00:01,  1.08s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.05s/it]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:04<00:00,  1.06s/it]\n",
      "\n",
      "INFO 03-18 12:49:22 model_runner.py:1115] Loading model weights took 14.2487 GB\n",
      "INFO 03-18 12:49:24 worker.py:267] Memory profiling takes 1.34 seconds\n",
      "INFO 03-18 12:49:24 worker.py:267] the current vLLM instance can use total_gpu_memory (23.64GiB) x gpu_memory_utilization (0.90) = 21.28GiB\n",
      "INFO 03-18 12:49:24 worker.py:267] model weights take 14.25GiB; non_torch_memory takes 0.07GiB; PyTorch activation peak memory takes 1.44GiB; the rest of the memory reserved for KV Cache is 5.52GiB.\n",
      "INFO 03-18 12:49:24 executor_base.py:111] # cuda blocks: 6457, # CPU blocks: 4681\n",
      "INFO 03-18 12:49:24 executor_base.py:116] Maximum concurrency for 8192 tokens per request: 12.61x\n",
      "INFO 03-18 12:49:28 llm_engine.py:436] init engine (profile, create kv cache, warmup model) took 6.00 seconds\n",
      "[INFO:swift] default_system: You are a helpful assistant.\n",
      "[INFO:swift] Start time of running main: 2025-03-18 12:49:28.957020\n",
      "[INFO:swift] request_config: RequestConfig(max_tokens=1000, temperature=0.1, top_k=None, top_p=0.7, repetition_penalty=1.05, num_beams=1, stop=[], seed=None, stream=False, logprobs=False, top_logprobs=None, n=1, best_of=None, presence_penalty=0.0, frequency_penalty=0.0, length_penalty=1.0)\n",
      "Generating train split: 36 examples [00:00, 10950.39 examples/s]\n",
      "[INFO:swift] create tmp_dir: /home/ubuntu/.cache/modelscope/hub/tmp/hf_datasets-eb988p4y\n",
      "Map: 100%|█████████████████████████████| 36/36 [00:00<00:00, 5399.23 examples/s]\n",
      "[INFO:swift] val_dataset: Dataset({\n",
      "    features: ['messages'],\n",
      "    num_rows: 36\n",
      "})\n",
      "100%|███████████████████████████████████████████| 36/36 [00:10<00:00,  3.39it/s]\n",
      "[rank-1] {'num_prompt_tokens': 6766, 'num_generated_tokens': 8536, 'num_samples': 36, 'runtime': 10.737676007000118, 'samples/s': 3.352680782743942, 'tokens/s': 794.9578655972857}\n",
      "[INFO:swift] The inference results have been saved to result_path: `/data/xiaosa/Competitions/synthetic_data/data/submissions/test_preds_L4090-v012_process_response_9.jsonl`.\n",
      "[INFO:swift] End time of running main: 2025-03-18 12:49:40.480068\n",
      "[rank0]:[W318 12:49:41.094838817 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present,  but this warning has only been added since PyTorch 2.4 (function operator())\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 10/10 [07:55<00:00, 47.60s/it]\n"
     ]
    }
   ],
   "source": [
    "from tqdm import tqdm\n",
    "error_cnt_list=[]\n",
    "for round_id in tqdm(range(10)):\n",
    "    with open(path+f'feature/test_{version}_process_response_{round_id}.jsonl', 'w',  encoding='utf-8') as fw:\n",
    "        for idx,query in enumerate(test_infer_df['response']):\n",
    "            try:\n",
    "                json.loads(query.split('```json')[-1].split('```')[0])\n",
    "            except:\n",
    "                try:\n",
    "                    json.loads(query.split('```json')[-1].split('```')[0].replace('},','}},'))\n",
    "                except:\n",
    "                    try:\n",
    "                        json.loads(query.split('```json')[-1].split('```')[0].replace('},','}},').replace('}}}','}}'))\n",
    "                    except:\n",
    "                        try:\n",
    "                            json.loads(query.split('```json')[-1].split('```')[0].replace('}}','}},').replace('}}}','}}'))\n",
    "                        except:\n",
    "                            response=test_df_b['id'][idx]\n",
    "                            query='我有如下json字符串，使用json.loads报错，格式上存在一定错误，请你对json字符串进行修正，返回我正确的json字符串。\\n'+query+'\\n\\n# 输出格式\\n错误原因：....\\n修正后内容：\\n```json\\n...\\n```'\n",
    "                            s = json.dumps({\n",
    "                                'query': query,\n",
    "                                'response': str(response)\n",
    "                            }, ensure_ascii=False)\n",
    "                            fw.write(s + '\\n')\n",
    "                            # print(query)\n",
    "    get_ipython().system(f'''CUDA_VISIBLE_DEVICES=2 swift infer \\\n",
    "        --model ../../public_data/Qwen2-7B-Instruct \\\n",
    "        --merge_lora false \\\n",
    "        --infer_backend vllm \\\n",
    "        --model_revision master \\\n",
    "        --torch_dtype bfloat16 \\\n",
    "        --max_length 8192 \\\n",
    "        --attn_impl flash_attn \\\n",
    "        --max_new_tokens 1000 \\\n",
    "        --temperature 0.1 \\\n",
    "        --top_p 0.7 \\\n",
    "        --tensor_parallel_size 1 \\\n",
    "        --max_model_len 8192 \\\n",
    "        --enforce_eager true \\\n",
    "        --split_dataset_ratio 0 \\\n",
    "        --repetition_penalty 1.05 \\\n",
    "        --device_map auto \\\n",
    "        --result_path data/submissions/test_preds_{version}_process_response_{round_id}.jsonl \\\n",
    "        --val_dataset data/feature/test_{version}_process_response_{round_id}.jsonl''')\n",
    "    test_infer_df_process_response=pd.read_json(f'data/submissions/test_preds_{version}_process_response_{round_id}.jsonl',lines=True)\n",
    "    error_cnt = 0 \n",
    "    for idx,query in enumerate(test_infer_df_process_response['response']):\n",
    "        try:\n",
    "            json.loads(query.split('```json')[-1].split('```')[0])\n",
    "        except:\n",
    "            error_cnt+=1\n",
    "    error_cnt_list.append(error_cnt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "95580145-2159-4c87-ae00-7aa26852adef",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(5, 4)"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "min_error_cnt=min(error_cnt_list)\n",
    "min_error_cnt_idx=error_cnt_list.index(min_error_cnt)\n",
    "min_error_cnt,min_error_cnt_idx"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "72b7ad49-8f31-430b-a276-274f3afc7eb1",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[8, 6, 6, 7, 5, 7, 8, 9, 8, 9]"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "error_cnt_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "c4450522-566c-4821-a37f-e9dde3857ae9",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>response</th>\n",
       "      <th>labels</th>\n",
       "      <th>logprobs</th>\n",
       "      <th>messages</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...</td>\n",
       "      <td>28</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>错误原因：在 JSON 字符串中，列表和字典的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>130</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>错误原因：提供的 JSON 字符串是一个数组，但是数组中只有一个元素，并且这个元素是一个对象...</td>\n",
       "      <td>133</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>146</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>错误原因：在Python的JSON解析中，列表和字典的元素之间需要使用逗号分隔。在您提供的J...</td>\n",
       "      <td>458</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>错误原因：在 JSON 字符串中，所有的中文字符（如“食物”、“数量”等）需要使用英文状态下...</td>\n",
       "      <td>462</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>错误原因：提供的JSON字符串是一个数组，其中包含多个元素，包括一个空数组和一个字典。在JS...</td>\n",
       "      <td>591</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>错误原因：在 JSON 字符串中，数组和对象的元素之间应该用逗号分隔，而您提供的 JSON ...</td>\n",
       "      <td>594</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>错误原因：提供的JSON字符串不是一个有效的JSON数组。在JSON中，数组应该由方括号`[...</td>\n",
       "      <td>793</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>错误原因：提供的JSON字符串是一个数组，其中包含四个元素，但最后一个元素是不完整的数组。在...</td>\n",
       "      <td>806</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>1082</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>在您提供的JSON字符串中，存在一些不规范的地方。首先，JSON数组应该以 `[` 开始，并...</td>\n",
       "      <td>1124</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>错误原因：在`engagement_metrics`字段中的`点击率`和`观看时长`值需要使...</td>\n",
       "      <td>1126</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13</th>\n",
       "      <td>根据您提供的JSON字符串，它看起来是一个数组，其中包含两个对象。每个对象都有一个`name...</td>\n",
       "      <td>1158</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14</th>\n",
       "      <td>错误原因：在Python的JSON解析中，列表和字典的元素之间需要使用逗号分隔。在您提供的J...</td>\n",
       "      <td>1228</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>15</th>\n",
       "      <td>错误原因：提供的JSON字符串是一个数组，但是每个元素应该是一个对象（字典），而不仅仅是简单...</td>\n",
       "      <td>1272</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16</th>\n",
       "      <td>错误原因：在Python的JSON解析中，列表和字典必须以特定的格式表示。在您提供的JSON...</td>\n",
       "      <td>1327</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>17</th>\n",
       "      <td>错误原因：提供的 JSON 字符串中包含了一个数组，而 JSON 实际上应该是一个对象（字典...</td>\n",
       "      <td>1460</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>18</th>\n",
       "      <td>错误原因：在构造JSON数组时，内部的数组元素之间以及对象的属性值之间没有正确使用逗号分隔。...</td>\n",
       "      <td>1483</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>19</th>\n",
       "      <td>在提供的 JSON 字符串中，存在一个错误，即在 `competitor_pricing` ...</td>\n",
       "      <td>1485</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>20</th>\n",
       "      <td>根据您提供的 JSON 字符串，它实际上是包含在 Python 列表中的字典，而不是有效的 ...</td>\n",
       "      <td>1518</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>21</th>\n",
       "      <td>错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>1566</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>22</th>\n",
       "      <td>错误原因：在`historical_data`数组中，数据项之间使用了中文逗号（，），而在J...</td>\n",
       "      <td>1568</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>23</th>\n",
       "      <td>错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>1623</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>24</th>\n",
       "      <td>根据您提供的JSON字符串，我发现存在一些不规范的格式问题。主要问题在于字典和列表的嵌套结构...</td>\n",
       "      <td>1715</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25</th>\n",
       "      <td>错误原因：在 JSON 字符串中，列表和字典的末尾应该使用 `]` 和 `}` 关闭，但是提...</td>\n",
       "      <td>1746</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>26</th>\n",
       "      <td>错误原因：在Python的JSON解析中，列表和字典的结构需要严格遵循JSON规范。在您提供...</td>\n",
       "      <td>1816</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>27</th>\n",
       "      <td>错误原因：在 JSON 字符串中，列表和字典的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>1850</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>28</th>\n",
       "      <td>错误原因：在 JSON 字符串中，列表和字典的元素之间以及它们与冒号之间需要有逗号分隔。同时...</td>\n",
       "      <td>1858</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>29</th>\n",
       "      <td>错误原因：提供的JSON字符串是一个数组，但是数组中的元素应该是一个对象（字典），而不是一个...</td>\n",
       "      <td>1935</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>30</th>\n",
       "      <td>错误原因：在提供的 JSON 字符串中，数组元素内部的浮点数没有正确闭合引号。例如，在 `[...</td>\n",
       "      <td>1942</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>31</th>\n",
       "      <td>错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...</td>\n",
       "      <td>1983</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>32</th>\n",
       "      <td>根据您提供的JSON字符串，我发现它是一个数组，其中包含多个元素。在JSON中，数组元素应该...</td>\n",
       "      <td>2059</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>33</th>\n",
       "      <td>错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...</td>\n",
       "      <td>2167</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>34</th>\n",
       "      <td>错误原因：在原始 JSON 字符串中，数组元素的结构不一致。在第一个对象中，`weather...</td>\n",
       "      <td>2194</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>35</th>\n",
       "      <td>错误原因：提供的JSON字符串中包含了一个额外的逗号和一个空列表，这在JSON数组中是不必要...</td>\n",
       "      <td>2265</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                             response  labels  logprobs  \\\n",
       "0   错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...      28       NaN   \n",
       "1   错误原因：在 JSON 字符串中，列表和字典的元素之间应该使用逗号分隔，而您提供的 JSON...     130       NaN   \n",
       "2   错误原因：提供的 JSON 字符串是一个数组，但是数组中只有一个元素，并且这个元素是一个对象...     133       NaN   \n",
       "3   错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...     146       NaN   \n",
       "4   错误原因：在Python的JSON解析中，列表和字典的元素之间需要使用逗号分隔。在您提供的J...     458       NaN   \n",
       "5   错误原因：在 JSON 字符串中，所有的中文字符（如“食物”、“数量”等）需要使用英文状态下...     462       NaN   \n",
       "6   错误原因：提供的JSON字符串是一个数组，其中包含多个元素，包括一个空数组和一个字典。在JS...     591       NaN   \n",
       "7   错误原因：在 JSON 字符串中，数组和对象的元素之间应该用逗号分隔，而您提供的 JSON ...     594       NaN   \n",
       "8   错误原因：提供的JSON字符串不是一个有效的JSON数组。在JSON中，数组应该由方括号`[...     793       NaN   \n",
       "9   错误原因：提供的JSON字符串是一个数组，其中包含四个元素，但最后一个元素是不完整的数组。在...     806       NaN   \n",
       "10  错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...    1082       NaN   \n",
       "11  在您提供的JSON字符串中，存在一些不规范的地方。首先，JSON数组应该以 `[` 开始，并...    1124       NaN   \n",
       "12  错误原因：在`engagement_metrics`字段中的`点击率`和`观看时长`值需要使...    1126       NaN   \n",
       "13  根据您提供的JSON字符串，它看起来是一个数组，其中包含两个对象。每个对象都有一个`name...    1158       NaN   \n",
       "14  错误原因：在Python的JSON解析中，列表和字典的元素之间需要使用逗号分隔。在您提供的J...    1228       NaN   \n",
       "15  错误原因：提供的JSON字符串是一个数组，但是每个元素应该是一个对象（字典），而不仅仅是简单...    1272       NaN   \n",
       "16  错误原因：在Python的JSON解析中，列表和字典必须以特定的格式表示。在您提供的JSON...    1327       NaN   \n",
       "17  错误原因：提供的 JSON 字符串中包含了一个数组，而 JSON 实际上应该是一个对象（字典...    1460       NaN   \n",
       "18  错误原因：在构造JSON数组时，内部的数组元素之间以及对象的属性值之间没有正确使用逗号分隔。...    1483       NaN   \n",
       "19  在提供的 JSON 字符串中，存在一个错误，即在 `competitor_pricing` ...    1485       NaN   \n",
       "20  根据您提供的 JSON 字符串，它实际上是包含在 Python 列表中的字典，而不是有效的 ...    1518       NaN   \n",
       "21  错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...    1566       NaN   \n",
       "22  错误原因：在`historical_data`数组中，数据项之间使用了中文逗号（，），而在J...    1568       NaN   \n",
       "23  错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...    1623       NaN   \n",
       "24  根据您提供的JSON字符串，我发现存在一些不规范的格式问题。主要问题在于字典和列表的嵌套结构...    1715       NaN   \n",
       "25  错误原因：在 JSON 字符串中，列表和字典的末尾应该使用 `]` 和 `}` 关闭，但是提...    1746       NaN   \n",
       "26  错误原因：在Python的JSON解析中，列表和字典的结构需要严格遵循JSON规范。在您提供...    1816       NaN   \n",
       "27  错误原因：在 JSON 字符串中，列表和字典的元素之间应该使用逗号分隔，而您提供的 JSON...    1850       NaN   \n",
       "28  错误原因：在 JSON 字符串中，列表和字典的元素之间以及它们与冒号之间需要有逗号分隔。同时...    1858       NaN   \n",
       "29  错误原因：提供的JSON字符串是一个数组，但是数组中的元素应该是一个对象（字典），而不是一个...    1935       NaN   \n",
       "30  错误原因：在提供的 JSON 字符串中，数组元素内部的浮点数没有正确闭合引号。例如，在 `[...    1942       NaN   \n",
       "31  错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...    1983       NaN   \n",
       "32  根据您提供的JSON字符串，我发现它是一个数组，其中包含多个元素。在JSON中，数组元素应该...    2059       NaN   \n",
       "33  错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...    2167       NaN   \n",
       "34  错误原因：在原始 JSON 字符串中，数组元素的结构不一致。在第一个对象中，`weather...    2194       NaN   \n",
       "35  错误原因：提供的JSON字符串中包含了一个额外的逗号和一个空列表，这在JSON数组中是不必要...    2265       NaN   \n",
       "\n",
       "                                             messages  \n",
       "0   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "1   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "2   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "3   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "4   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "5   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "6   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "7   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "8   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "9   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "10  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "11  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "12  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "13  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "14  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "15  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "16  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "17  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "18  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "19  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "20  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "21  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "22  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "23  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "24  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "25  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "26  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "27  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "28  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "29  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "30  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "31  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "32  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "33  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "34  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "35  [{'role': 'user', 'content': '我有如下json字符串，使用js...  "
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_infer_df_process_response=pd.read_json(f'data/submissions/test_preds_{version}_process_response_{min_error_cnt_idx}.jsonl',lines=True)\n",
    "test_infer_df_process_response"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "022e28a3-361d-462b-9855-58076af68993",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>response</th>\n",
       "      <th>labels</th>\n",
       "      <th>logprobs</th>\n",
       "      <th>messages</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...</td>\n",
       "      <td>28</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>错误原因：在 JSON 字符串中，列表和字典的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>130</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>错误原因：提供的 JSON 字符串是一个数组，但是数组中只有一个元素，并且这个元素是一个对象...</td>\n",
       "      <td>133</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>146</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>错误原因：在Python的JSON解析中，列表和字典的元素之间需要使用逗号分隔。在您提供的J...</td>\n",
       "      <td>458</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>错误原因：在 JSON 字符串中，所有的中文字符（如“食物”、“数量”等）需要使用英文状态下...</td>\n",
       "      <td>462</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>错误原因：提供的JSON字符串是一个数组，其中包含多个元素，包括一个空数组和一个字典。在JS...</td>\n",
       "      <td>591</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>错误原因：在 JSON 字符串中，数组和对象的元素之间应该用逗号分隔，而您提供的 JSON ...</td>\n",
       "      <td>594</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>错误原因：提供的JSON字符串不是一个有效的JSON数组。在JSON中，数组应该由方括号`[...</td>\n",
       "      <td>793</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>错误原因：提供的JSON字符串是一个数组，其中包含四个元素，但最后一个元素是不完整的数组。在...</td>\n",
       "      <td>806</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>1082</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>在您提供的JSON字符串中，存在一些不规范的地方。首先，JSON数组应该以 `[` 开始，并...</td>\n",
       "      <td>1124</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>错误原因：在`engagement_metrics`字段中的`点击率`和`观看时长`值需要使...</td>\n",
       "      <td>1126</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13</th>\n",
       "      <td>根据您提供的JSON字符串，它看起来是一个数组，其中包含两个对象。每个对象都有一个`name...</td>\n",
       "      <td>1158</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14</th>\n",
       "      <td>错误原因：在Python的JSON解析中，列表和字典的元素之间需要使用逗号分隔。在您提供的J...</td>\n",
       "      <td>1228</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>15</th>\n",
       "      <td>错误原因：提供的JSON字符串是一个数组，但是每个元素应该是一个对象（字典），而不仅仅是简单...</td>\n",
       "      <td>1272</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16</th>\n",
       "      <td>错误原因：在Python的JSON解析中，列表和字典必须以特定的格式表示。在您提供的JSON...</td>\n",
       "      <td>1327</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>17</th>\n",
       "      <td>错误原因：提供的 JSON 字符串中包含了一个数组，而 JSON 实际上应该是一个对象（字典...</td>\n",
       "      <td>1460</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>18</th>\n",
       "      <td>错误原因：在构造JSON数组时，内部的数组元素之间以及对象的属性值之间没有正确使用逗号分隔。...</td>\n",
       "      <td>1483</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>19</th>\n",
       "      <td>在提供的 JSON 字符串中，存在一个错误，即在 `competitor_pricing` ...</td>\n",
       "      <td>1485</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>20</th>\n",
       "      <td>根据您提供的 JSON 字符串，它实际上是包含在 Python 列表中的字典，而不是有效的 ...</td>\n",
       "      <td>1518</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>21</th>\n",
       "      <td>错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>1566</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>22</th>\n",
       "      <td>错误原因：在`historical_data`数组中，数据项之间使用了中文逗号（，），而在J...</td>\n",
       "      <td>1568</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>23</th>\n",
       "      <td>错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>1623</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>24</th>\n",
       "      <td>根据您提供的JSON字符串，我发现存在一些不规范的格式问题。主要问题在于字典和列表的嵌套结构...</td>\n",
       "      <td>1715</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25</th>\n",
       "      <td>错误原因：在 JSON 字符串中，列表和字典的末尾应该使用 `]` 和 `}` 关闭，但是提...</td>\n",
       "      <td>1746</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>26</th>\n",
       "      <td>错误原因：在Python的JSON解析中，列表和字典的结构需要严格遵循JSON规范。在您提供...</td>\n",
       "      <td>1816</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>27</th>\n",
       "      <td>错误原因：在 JSON 字符串中，列表和字典的元素之间应该使用逗号分隔，而您提供的 JSON...</td>\n",
       "      <td>1850</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>28</th>\n",
       "      <td>错误原因：在 JSON 字符串中，列表和字典的元素之间以及它们与冒号之间需要有逗号分隔。同时...</td>\n",
       "      <td>1858</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>29</th>\n",
       "      <td>错误原因：提供的JSON字符串是一个数组，但是数组中的元素应该是一个对象（字典），而不是一个...</td>\n",
       "      <td>1935</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>30</th>\n",
       "      <td>错误原因：在提供的 JSON 字符串中，数组元素内部的浮点数没有正确闭合引号。例如，在 `[...</td>\n",
       "      <td>1942</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>31</th>\n",
       "      <td>错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...</td>\n",
       "      <td>1983</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>32</th>\n",
       "      <td>根据您提供的JSON字符串，我发现它是一个数组，其中包含多个元素。在JSON中，数组元素应该...</td>\n",
       "      <td>2059</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>33</th>\n",
       "      <td>错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...</td>\n",
       "      <td>2167</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>34</th>\n",
       "      <td>错误原因：在原始 JSON 字符串中，数组元素的结构不一致。在第一个对象中，`weather...</td>\n",
       "      <td>2194</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>35</th>\n",
       "      <td>错误原因：提供的JSON字符串中包含了一个额外的逗号和一个空列表，这在JSON数组中是不必要...</td>\n",
       "      <td>2265</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[{'role': 'user', 'content': '我有如下json字符串，使用js...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                             response  labels  logprobs  \\\n",
       "0   错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...      28       NaN   \n",
       "1   错误原因：在 JSON 字符串中，列表和字典的元素之间应该使用逗号分隔，而您提供的 JSON...     130       NaN   \n",
       "2   错误原因：提供的 JSON 字符串是一个数组，但是数组中只有一个元素，并且这个元素是一个对象...     133       NaN   \n",
       "3   错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...     146       NaN   \n",
       "4   错误原因：在Python的JSON解析中，列表和字典的元素之间需要使用逗号分隔。在您提供的J...     458       NaN   \n",
       "5   错误原因：在 JSON 字符串中，所有的中文字符（如“食物”、“数量”等）需要使用英文状态下...     462       NaN   \n",
       "6   错误原因：提供的JSON字符串是一个数组，其中包含多个元素，包括一个空数组和一个字典。在JS...     591       NaN   \n",
       "7   错误原因：在 JSON 字符串中，数组和对象的元素之间应该用逗号分隔，而您提供的 JSON ...     594       NaN   \n",
       "8   错误原因：提供的JSON字符串不是一个有效的JSON数组。在JSON中，数组应该由方括号`[...     793       NaN   \n",
       "9   错误原因：提供的JSON字符串是一个数组，其中包含四个元素，但最后一个元素是不完整的数组。在...     806       NaN   \n",
       "10  错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...    1082       NaN   \n",
       "11  在您提供的JSON字符串中，存在一些不规范的地方。首先，JSON数组应该以 `[` 开始，并...    1124       NaN   \n",
       "12  错误原因：在`engagement_metrics`字段中的`点击率`和`观看时长`值需要使...    1126       NaN   \n",
       "13  根据您提供的JSON字符串，它看起来是一个数组，其中包含两个对象。每个对象都有一个`name...    1158       NaN   \n",
       "14  错误原因：在Python的JSON解析中，列表和字典的元素之间需要使用逗号分隔。在您提供的J...    1228       NaN   \n",
       "15  错误原因：提供的JSON字符串是一个数组，但是每个元素应该是一个对象（字典），而不仅仅是简单...    1272       NaN   \n",
       "16  错误原因：在Python的JSON解析中，列表和字典必须以特定的格式表示。在您提供的JSON...    1327       NaN   \n",
       "17  错误原因：提供的 JSON 字符串中包含了一个数组，而 JSON 实际上应该是一个对象（字典...    1460       NaN   \n",
       "18  错误原因：在构造JSON数组时，内部的数组元素之间以及对象的属性值之间没有正确使用逗号分隔。...    1483       NaN   \n",
       "19  在提供的 JSON 字符串中，存在一个错误，即在 `competitor_pricing` ...    1485       NaN   \n",
       "20  根据您提供的 JSON 字符串，它实际上是包含在 Python 列表中的字典，而不是有效的 ...    1518       NaN   \n",
       "21  错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...    1566       NaN   \n",
       "22  错误原因：在`historical_data`数组中，数据项之间使用了中文逗号（，），而在J...    1568       NaN   \n",
       "23  错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON...    1623       NaN   \n",
       "24  根据您提供的JSON字符串，我发现存在一些不规范的格式问题。主要问题在于字典和列表的嵌套结构...    1715       NaN   \n",
       "25  错误原因：在 JSON 字符串中，列表和字典的末尾应该使用 `]` 和 `}` 关闭，但是提...    1746       NaN   \n",
       "26  错误原因：在Python的JSON解析中，列表和字典的结构需要严格遵循JSON规范。在您提供...    1816       NaN   \n",
       "27  错误原因：在 JSON 字符串中，列表和字典的元素之间应该使用逗号分隔，而您提供的 JSON...    1850       NaN   \n",
       "28  错误原因：在 JSON 字符串中，列表和字典的元素之间以及它们与冒号之间需要有逗号分隔。同时...    1858       NaN   \n",
       "29  错误原因：提供的JSON字符串是一个数组，但是数组中的元素应该是一个对象（字典），而不是一个...    1935       NaN   \n",
       "30  错误原因：在提供的 JSON 字符串中，数组元素内部的浮点数没有正确闭合引号。例如，在 `[...    1942       NaN   \n",
       "31  错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...    1983       NaN   \n",
       "32  根据您提供的JSON字符串，我发现它是一个数组，其中包含多个元素。在JSON中，数组元素应该...    2059       NaN   \n",
       "33  错误原因：提供的JSON字符串中包含了一个数组，而JSON实际上是一个键值对的集合。数组应该...    2167       NaN   \n",
       "34  错误原因：在原始 JSON 字符串中，数组元素的结构不一致。在第一个对象中，`weather...    2194       NaN   \n",
       "35  错误原因：提供的JSON字符串中包含了一个额外的逗号和一个空列表，这在JSON数组中是不必要...    2265       NaN   \n",
       "\n",
       "                                             messages  \n",
       "0   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "1   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "2   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "3   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "4   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "5   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "6   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "7   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "8   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "9   [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "10  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "11  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "12  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "13  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "14  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "15  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "16  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "17  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "18  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "19  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "20  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "21  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "22  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "23  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "24  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "25  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "26  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "27  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "28  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "29  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "30  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "31  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "32  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "33  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "34  [{'role': 'user', 'content': '我有如下json字符串，使用js...  \n",
       "35  [{'role': 'user', 'content': '我有如下json字符串，使用js...  "
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_infer_df_process_response"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "32432b93-8a68-407c-a595-4e743afb1990",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_infer_df_process_response['id']=test_infer_df_process_response['labels'].astype(int)\n",
    "test_infer_df=test_infer_df[~test_infer_df['id'].isin(test_infer_df_process_response['id'])]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "442bf5cd-b0f5-4cb3-9e15-e7f124a899af",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_reponse(x):\n",
    "    user_messages=x['user_messages']\n",
    "    apis=x['apis']\n",
    "    apis_name=[i['name'] for i in apis]\n",
    "    try:\n",
    "        response=json.loads(x['response'].split('```json')[-1].split('```')[0])\n",
    "    except:\n",
    "        try:\n",
    "            response=json.loads(x['response'].split('```json')[-1].split('```')[0].replace('},','}},'))\n",
    "        except:\n",
    "            try:\n",
    "                response=json.loads(x['response'].split('```json')[-1].split('```')[0].replace('},','}},').replace('}}}','}}'))\n",
    "            except:\n",
    "                try:\n",
    "                    response=json.loads(x['response'].split('```json')[-1].split('```')[0].replace('}}','}},').replace('}}}','}}'))\n",
    "                except:\n",
    "                    \n",
    "                    print(x['id'],'json.loads失败',x['response'])\n",
    "                    return [[]]*len(user_messages)\n",
    "    res=[]\n",
    "    if type(response)==list:\n",
    "        for data in response:\n",
    "            if type(data)==list:\n",
    "                res.append([])\n",
    "            elif type(data)==dict:\n",
    "                if 'name' in data and 'arguments' in data:\n",
    "                    api_name=data['name']\n",
    "                    arguments=data['arguments']\n",
    "                    arguments_keys=arguments.keys()\n",
    "                    if api_name in apis_name:\n",
    "                        api_id=apis_name.index(api_name)\n",
    "                        api=apis[api_id]\n",
    "                        required=api['required']\n",
    "                        parameters=api['parameters']\n",
    "                        parameters_keys=parameters.keys()\n",
    "                        parameters_types={}\n",
    "                        for key,value in parameters.items():\n",
    "                            parameters_types[key]=value['type']\n",
    "                        if (set(required)-set(arguments_keys))==set():\n",
    "                            if (set(arguments_keys)-set(parameters_keys))==set():\n",
    "                                check_type=0\n",
    "                                for key,value in arguments.items():\n",
    "                                    api_parameter_type=parameters_types[key]\n",
    "                                    if api_parameter_type=='boolean' and type(value)!=bool:\n",
    "                                        print('布尔类型错误',user_messages,arguments,parameters)\n",
    "                                        check_type+=1\n",
    "                                        break\n",
    "                                    if api_parameter_type=='number' and type(value) not in [int,float]:\n",
    "                                        print('number类型错误',user_messages,arguments,parameters)\n",
    "                                        check_type+=1\n",
    "                                        break\n",
    "                                    if api_parameter_type=='string' and type(value)!=str:\n",
    "                                        print('string类型错误',user_messages,arguments,parameters)\n",
    "                                        check_type+=1\n",
    "                                        break\n",
    "                                    if api_parameter_type=='array' and type(value)!=list:\n",
    "                                        print('array类型错误',user_messages,arguments,parameters)\n",
    "                                        check_type+=1\n",
    "                                        break\n",
    "                                    if api_parameter_type=='dict' and type(value)!=dict:\n",
    "                                        print('dict类型错误',user_messages,arguments,parameters)\n",
    "                                        check_type+=1\n",
    "                                        break\n",
    "                                    if api_parameter_type=='float':\n",
    "                                        if  type(value)==int:\n",
    "                                            data['arguments'][key]=float(value)\n",
    "                                        elif type(value) not in [int,float]:\n",
    "                                            print('float类型错误',user_messages,arguments,parameters)\n",
    "                                            check_type+=1\n",
    "                                            break\n",
    "                                    if api_parameter_type=='integer' and type(value)!=int:\n",
    "                                        print('integer类型错误',user_messages,arguments,parameters)\n",
    "                                        check_type+=1\n",
    "                                        break\n",
    "                                    if api_parameter_type=='tuple' and type(value)!=tuple:\n",
    "                                        print('tuple类型错误',user_messages,arguments,parameters)\n",
    "                                        check_type+=1\n",
    "                                        break\n",
    "                                if check_type==0:\n",
    "                                    res.append([data])\n",
    "                                else:\n",
    "                                    res.append([])\n",
    "                            else:\n",
    "                                print('多参数',set(arguments_keys)-set(parameters_keys),data)\n",
    "                                res.append([])\n",
    "                        else:\n",
    "                            print('required缺少',set(required)-set(arguments_keys),data)\n",
    "                            res.append([])\n",
    "                    else:\n",
    "                        print('api_name不对',data)\n",
    "                        res.append([])\n",
    "                else:\n",
    "                    print('缺少name和arguments',data)\n",
    "                    res.append([])\n",
    "    else:\n",
    "        res=[[]]*len(user_messages)\n",
    "    return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "ca119ba1-0be9-4bfd-8482-37dfada24284",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "required缺少 {'excavation_area'} {'name': 'site_excavation_plan', 'arguments': {'location': '河南省洛阳市', 'initial_findings': '一些古代建筑遗迹，预计是唐代的遗址', 'expected_period': '唐代', 'reconstruction_scale': 800, 'team_size': 15, 'budget': 80.0}}\n",
      "required缺少 {'gender'} {'name': 'ski_gear_recommendation', 'arguments': {'height': 160.0, 'weight': 55.0, 'skill_level': '初学者', 'preferred_style': '竞速', 'budget': 300.0}}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "594 json.loads失败 错误原因：在 JSON 字符串中，数组和对象的元素之间应该用逗号分隔，而您提供的 JSON 字符串中的最后一个元素（一个空数组）后面没有逗号。这会导致解析时出现错误。\n",
      "\n",
      "修正后内容：\n",
      "\n",
      "```json\n",
      "[\n",
      "    [],\n",
      "    [],\n",
      "    [],\n",
      "    {\n",
      "        \"name\": \"power_distribution_optimize\",\n",
      "        \"arguments\": {\n",
      "            \"distribution_map\": {\n",
      "                \"nodeA\": 1000,\n",
      "                \"nodeB\": 800,\n",
      "                \"nodeC\": 600\n",
      "            },\n",
      "            \"demand_forecast\": [\n",
      "                900,\n",
      "                750,\n",
      "                700\n",
      "            ],\n",
      "            \"capacity_limits\": {\n",
      "                \"nodeA\": 1200,\n",
      "                \"nodeB\": 1000,\n",
      "                \"nodeC\": 800\n",
      "            },\n",
      "            \"cost_parameters\": {\n",
      "                \"传输成本\": 0.05,\n",
      "                \"发电成本\": 0.1\n",
      "            }\n",
      "        },\n",
      "        []\n",
      "    ]\n",
      "]\n",
      "```\n",
      "多参数 {'apply_immediately'} {'name': 'modify_instance_config', 'arguments': {'instance_id': 'db-789012', 'new_storage_size': 500, 'new_instance_name': '', 'switch_public_access': False, 'apply_immediately': False}}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "float类型错误 ['我想分析一下/user/project项目的代码测试覆盖率，分别使用npm test、mvn test、pytest和go test这几个命令来运行测试，并将覆盖率报告输出到/user/project/coverage目录下。对于mvn test和pytest这两个命令，我还想忽略一些文件和目录，比如target/、node_modules/、venv/和tests/。另外，我希望覆盖率的最低阈值分别设置为90.0、75.0和95.0。'] {'project_path': '/user/project', 'test_command': 'npm test, mvn test, pytest, go test', 'output_dir': '/user/project/coverage', 'threshold': [90.0, 75.0, 95.0], 'ignore_files': ['target/', 'node_modules/', 'venv/', 'tests/']} {'project_path': {'type': 'string', 'description': '项目根目录路径'}, 'test_command': {'type': 'string', 'description': '运行测试的命令'}, 'ignore_files': {'type': 'array', 'description': '忽略的文件或目录列表', 'items': {'type': 'string'}}, 'output_dir': {'type': 'string', 'description': '覆盖率报告的输出目录'}, 'threshold': {'type': 'float', 'description': '覆盖率的最低阈值，低于此值会发出警告', 'default': 80.0}}\n",
      "required缺少 {'cost_constraints'} {'name': 'optimize_chemical_process', 'arguments': {'process_name': 'ethylene_production', 'current_efficiency': 68.0, 'target_efficiency': 78.0, 'resource_usage': {'ethane': 1200.0, 'oxygen': 600.0, 'steam': 300.0}}, 'cost_constraints': {'max_cost': 700000.0, 'min_cost': 400000.0}}\n",
      "1126 json.loads失败 错误原因：在`engagement_metrics`字段中的`点击率`和`观看时长`值需要使用双引号包裹，并且百分比符号 `%` 应该被移除。\n",
      "\n",
      "修正后内容：\n",
      "\n",
      "```json\n",
      "[\n",
      "    [],\n",
      "    [],\n",
      "    {\n",
      "        \"name\": \"evaluate_ad_effectiveness\",\n",
      "        \"arguments\": {\n",
      "            \"ad_id\": \"ad67890\",\n",
      "            \"platform\": \"电视\",\n",
      "            \"viewer_feedback\": [\n",
      "                \"广告太长，内容不吸引人，产品信息模糊。\",\n",
      "                \"观众反馈\"\n",
      "            ],\n",
      "            \"engagement_metrics\": {\n",
      "                \"点击率\": \"2%\",\n",
      "                \"观看时长\": \"15秒\"\n",
      "            },\n",
      "            \"demographic_data\": {\n",
      "                \"年龄\": \"35-54\",\n",
      "                \"性别\": \"女性\"\n",
      "            }\n",
      "        },\n",
      "        []\n",
      "    ]\n",
      "]\n",
      "```\n",
      "多参数 {'include_activities'} {'name': 'religious_festival_info', 'arguments': {'festival_name': 'Diwali', 'religion': 'Hinduism', 'year': 2023, 'include_history': True, 'include_activities': True}}\n",
      "required缺少 {'initial_wavefunction', 'time', 'hamiltonian'} {'name': 'compute_quantum_state', 'arguments': {'potential': [0.0, 1.0, 2.0, 1.0, 0.0], 'mass': 1.0, 'energy': 2.0, 'num_states': 3, 'boundary_condition': 'zero'}}\n",
      "string类型错误 ['在处理一些分布式文件时，我需要将/path/to/file1.txt和/path/to/file2.txt这两个文件合并成一个TXT格式的文件，并输出到/path/to/output.txt。同时，我还需要对sales_data_2023这个数据集进行聚合，对revenue和quantity这两个字段分别应用sum和avg函数，生成汇总数据。'] {'dataset': 'sales_data_2023', 'aggregation_fields': ['revenue', 'quantity'], 'aggregation_functions': ['sum', 'avg'], 'group_by': None, 'filter_conditions': None} {'dataset': {'type': 'string', 'description': '要聚合的数据集的唯一标识符'}, 'aggregation_fields': {'type': 'array', 'description': '需要进行聚合的字段列表，每个字段将应用指定的聚合函数'}, 'aggregation_functions': {'type': 'array', 'description': '应用于聚合字段的函数列表，如sum, avg, max, min等'}, 'group_by': {'type': 'string', 'description': '用于分组的字段名', 'default': None}, 'filter_conditions': {'type': 'string', 'description': '过滤条件，用于筛选出符合条件的记录进行聚合', 'default': None}}\n",
      "1518 json.loads失败 根据您提供的 JSON 字符串，它实际上是包含在 Python 列表中的字典，而不是有效的 JSON 数组。在 JSON 中，这样的数据结构应该被表示为一个对象数组（即一个 JSON 数组）。以下是修正后的 JSON 字符串：\n",
      "\n",
      "```json\n",
      "[\n",
      "    {\n",
      "        \"name\": \"scan_network_devices\",\n",
      "        \"arguments\": {\n",
      "            \"network_range\": \"192.168.1.0/24\",\n",
      "            \"timeout\": 5,\n",
      "            \"retries\": 3,\n",
      "            \"exclude_ips\": [192.168.1.1, 192.168.1.2]\n",
      "        }\n",
      "    },\n",
      "    {\n",
      "        \"name\": \"bandwidth_test\",\n",
      "        \"arguments\": {\n",
      "            \"server_ip\": \"192.168.1.1\",\n",
      "            \"duration\": 10,\n",
      "            \"parallel_streams\": 4,\n",
      "            \"protocol\": \"TCP\",\n",
      "            \"output_format\": \"JSON\"\n",
      "        }\n",
      "    },\n",
      "    {\n",
      "        \"name\": \"bandwidth_test\",\n",
      "        \"arguments\": {\n",
      "            \"server_ip\": \"203.0.113.5\",\n",
      "            \"duration\": 30,\n",
      "            \"parallel_streams\": 8,\n",
      "            \"protocol\": \"UDP\",\n",
      "            \"output_format\": \"CSV\"\n",
      "        }\n",
      "    }\n",
      "]\n",
      "```\n",
      "\n",
      "错误原因：原始 JSON 字符串中使用了 Python 的列表语法（`[]`），而 JSON 需要使用数组语法（`[]`）来表示列表。此外，内部的键值对也需要用冒号 `:` 进行分隔，并且每对键值对之间需要使用逗号 `,` 分隔。\n",
      "\n",
      "修正后内容：如上所示，我已经将原始 JSON 字符串转换为有效的 JSON 格式。\n",
      "1623 json.loads失败 错误原因：在 JSON 字符串中，数组和对象的元素之间应该使用逗号分隔，而您提供的 JSON 字符串中的最后一个数组元素后面没有逗号。这会导致解析时出现错误。\n",
      "\n",
      "修正后内容：\n",
      "\n",
      "```json\n",
      "[\n",
      "    [],\n",
      "    [],\n",
      "    {\n",
      "        \"name\": \"shipping_schedule_optimization\",\n",
      "        \"arguments\": {\n",
      "            \"shipments\": [\n",
      "                {\n",
      "                    \"destination\": \"City C\",\n",
      "                    \"weight\": 200\n",
      "                },\n",
      "                {\n",
      "                    \"destination\": \"City D\",\n",
      "                    \"weight\": 300\n",
      "                },\n",
      "                {\n",
      "                    \"destination\": \"City E\",\n",
      "                    \"weight\": 400\n",
      "                }\n",
      "            ],\n",
      "            \"vehicle_capacity\": 800,\n",
      "            \"fuel_cost\": 2.0,\n",
      "            \"driver_availability\": [\n",
      "                {\n",
      "                    \"start_time\": \"07:00\",\n",
      "                    \"end_time\": \"16:00\"\n",
      "                },\n",
      "                {\n",
      "                    \"start_time\": \"08:00\",\n",
      "                    \"end_time\": \"17:00\"\n",
      "                }\n",
      "            ]\n",
      "        },\n",
      "        []\n",
      "    ]\n",
      "]\n",
      "```\n",
      "1715 json.loads失败 根据您提供的JSON字符串，我发现存在一些不规范的格式问题。主要问题在于字典和列表的嵌套结构以及键值对的表示方式。以下是修正后的JSON字符串：\n",
      "\n",
      "```json\n",
      "[\n",
      "    [],\n",
      "    [],\n",
      "    [],\n",
      "    {\n",
      "        \"name\": \"airline_revenue_optimizer\",\n",
      "        \"arguments\": {\n",
      "            \"airline_code\": \"DL\",\n",
      "            \"route_demand\": {\n",
      "                \"ATL-LGA\": {\n",
      "                    \"乘客数量\": 250,\n",
      "                    \"票价敏感度\": 0.7\n",
      "                },\n",
      "                \"LGA-ORD\": {\n",
      "                    \"乘客数量\": 180,\n",
      "                    \"票价敏感度\": 0.5\n",
      "                }\n",
      "            },\n",
      "            \"flight_schedule\": [\n",
      "                \"DL789\",\n",
      "                \"起飞时间\": \"09:00\",\n",
      "                \"到达时间\": \"11:30\"\n",
      "            ],\n",
      "            \"cost_per_flight\": 4500.0,\n",
      "            \"ticket_prices\": {\n",
      "                \"ATL-LGA\": 150.0,\n",
      "                \"LGA-ORD\": 120.0\n",
      "            },\n",
      "            \"competitor_prices\": {\n",
      "                \"ATL-LGA\": 155.0,\n",
      "                \"LGA-ORD\": 115.0\n",
      "            },\n",
      "            \"seat_capacity\": 180\n",
      "        },\n",
      "    []\n",
      "]\n",
      "```\n",
      "\n",
      "**错误原因**：\n",
      "1. **列表和字典的嵌套结构**：在原始字符串中，列表和字典的嵌套结构不正确。例如，`\"flight_schedule\"` 应该是一个列表，但其内部的键值对被错误地放在了一个单独的列表中。\n",
      "2. **键值对的表示**：在原始字符串中，键值对之间使用了额外的引号和逗号，这导致了解析错误。\n",
      "\n",
      "**修正后内容**：\n",
      "通过移除不必要的引号、确保列表和字典的正确嵌套，并调整键值对的表示方式，上述JSON字符串可以正确解析。\n",
      "多参数 {'timeout', 'enableHighAccuracy', 'maximumAge', 'accuracy'} {'name': 'update_app_settings', 'arguments': {'app_id': 'com.example.app', 'theme': 'dark', 'language': 'en', 'auto_update': True, 'analytics_enabled': True, 'accuracy': 'high', 'timeout': 10, 'maximumAge': 60000, 'enableHighAccuracy': True}}\n",
      "多参数 {'detail_level'} {'name': 'treaty_details', 'arguments': {'treaty_name': '凡尔赛条约', 'include_signatories': False, 'language': '英文', 'detail_level': 3}}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "required缺少 {'employee_claims'} {'name': 'creditor_priority', 'arguments': {'secured_creditors': [{'name': 'Credit Union C', 'amount': 75000.0}], 'unsecured_creditors': [{'name': 'Vendor Z', 'amount': 10000.0}, {'name': '员工', 'amount': 20000.0}], 'government_claims': 15000.0}}\n",
      "多参数 {'auto_renewable'} {'name': 'in_app_purchase_verify', 'arguments': {'transaction_id': '1234567890', 'product_id': 'com.example.product1', 'user_id': 'user123', 'purchase_date': 1633024800000, 'receipt_data': 'receipt_data_123', 'sandbox': True, 'auto_renewable': False}}\n",
      "多参数 {'query_type', 'enable_parallel_execution', 'optimization_level', 'memory_allocation'} {'name': 'storage_compressor', 'arguments': {'database_name': 'customer_db', 'compression_level': 2, 'backup_before_compress': True, 'query_type': 'select', 'optimization_level': 3, 'enable_parallel_execution': True, 'memory_allocation': 1024}}\n",
      "api_name不对 {'name': 'system原理图软件提供的功能主要有自动布局功能、信号完整性分析功能、电源完整性分析功能、时序分析功能、模拟仿真功能、PCB设计规则检查功能、帮助设计更高效、优化电路布局、降低布线风险、改善电气性能等。\\n\\n基于以上这段文本内容回答：\\n系统原理图软件的功能主要体现在哪些方面？', 'arguments': {}}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n",
      "缺少name和arguments {}\n"
     ]
    }
   ],
   "source": [
    "infer_df=test_df_b.merge(pd.concat([test_infer_df,test_infer_df_process_response])[['id','response']],how='left',on='id')\n",
    "infer_df['response']=infer_df.apply(lambda x:get_reponse(x),axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "cf05e937-b4aa-4cd2-bef3-660b3c2cc8c8",
   "metadata": {},
   "outputs": [],
   "source": [
    "del infer_df['apis']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "5c8f00e7-4b9e-422b-b3bb-581ce8d2f591",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "infer_df=infer_df[['id','response']].merge(pd.read_json('data/合成数据决赛赛题.jsonl',lines=True)[['id','apis','user_messages']],how='left',on='id')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "d7c25d1e-8f66-4c61-8bc5-f928221a9330",
   "metadata": {},
   "outputs": [],
   "source": [
    "infer_df=infer_df.rename(columns={'response':'targets'})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "4343cb33-d745-4a6e-a54b-ec809a0bd0e7",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>targets</th>\n",
       "      <th>apis</th>\n",
       "      <th>user_messages</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>133</th>\n",
       "      <td>133</td>\n",
       "      <td>[[{'name': 'food_expiry_tracker', 'arguments':...</td>\n",
       "      <td>[{'name': 'food_price_comparison', 'descriptio...</td>\n",
       "      <td>[最近买了不少食品，想跟踪一下它们的保质期。牛奶是2023-10-01买的，面包是2023-...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1835</th>\n",
       "      <td>1835</td>\n",
       "      <td>[[], [], [{'name': 'optimize_tax_deductions', ...</td>\n",
       "      <td>[{'name': 'feed_optimization', 'description': ...</td>\n",
       "      <td>[我今年在退休金上缴纳了3000.0，想了解一下如何优化我的税务抵扣方案。, 我今年的医疗费...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "        id                                            targets  \\\n",
       "133    133  [[{'name': 'food_expiry_tracker', 'arguments':...   \n",
       "1835  1835  [[], [], [{'name': 'optimize_tax_deductions', ...   \n",
       "\n",
       "                                                   apis  \\\n",
       "133   [{'name': 'food_price_comparison', 'descriptio...   \n",
       "1835  [{'name': 'feed_optimization', 'description': ...   \n",
       "\n",
       "                                          user_messages  \n",
       "133   [最近买了不少食品，想跟踪一下它们的保质期。牛奶是2023-10-01买的，面包是2023-...  \n",
       "1835  [我今年在退休金上缴纳了3000.0，想了解一下如何优化我的税务抵扣方案。, 我今年的医疗费...  "
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "infer_df[infer_df['user_messages'].apply(len)>infer_df['targets'].apply(len)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "id": "4df123a5-40b6-4b55-9809-9e9251da90bc",
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_step1(x):\n",
    "    if len(x['targets'])<len(x['user_messages']):\n",
    "        return x['targets']+[[]]*(len(x['user_messages'])-len(x['targets']))\n",
    "    return x['targets']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "7f12339f-ec62-407b-98b2-36276b1b6c77",
   "metadata": {},
   "outputs": [],
   "source": [
    "infer_df['targets_new']=infer_df.apply(lambda x:process_step1(x),axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "a04767aa-4ec3-4cf3-b78c-4450f696315f",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>targets</th>\n",
       "      <th>apis</th>\n",
       "      <th>user_messages</th>\n",
       "      <th>targets_new</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>11</td>\n",
       "      <td>[[{'name': 'network_diagnose', 'arguments': {'...</td>\n",
       "      <td>[{'name': 'analyze_chemical_purity', 'descript...</td>\n",
       "      <td>[最近网络连接不太稳定，我想检查一下192.168.1.1和10.0.0.1这两个IP地址的...</td>\n",
       "      <td>[[{'name': 'network_diagnose', 'arguments': {'...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>12</td>\n",
       "      <td>[[{'name': 'assess_child_support', 'arguments'...</td>\n",
       "      <td>[{'name': 'guardianship_application_guide', 'd...</td>\n",
       "      <td>[我最近在处理一些家庭法律事务，需要评估几种不同情况下的子女抚养费。第一对父母的年收入分别是...</td>\n",
       "      <td>[[{'name': 'assess_child_support', 'arguments'...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14</th>\n",
       "      <td>14</td>\n",
       "      <td>[[{'name': 'index_optimizer', 'arguments': {'t...</td>\n",
       "      <td>[{'name': 'index_optimizer', 'description': '分...</td>\n",
       "      <td>[最近我们在优化数据库性能，特别是对user_table和order_table的索引进行了...</td>\n",
       "      <td>[[{'name': 'index_optimizer', 'arguments': {'t...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>31</th>\n",
       "      <td>31</td>\n",
       "      <td>[[], [{'name': 'climate_agreement_status', 'ar...</td>\n",
       "      <td>[{'name': 'schrodinger_solution', 'description...</td>\n",
       "      <td>[Create a Tic Tac toe game in python., 我想了解一下G...</td>\n",
       "      <td>[[], [{'name': 'climate_agreement_status', 'ar...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>41</th>\n",
       "      <td>41</td>\n",
       "      <td>[[{'name': 'request_interlibrary_loan', 'argum...</td>\n",
       "      <td>[{'name': 'network_providers', 'description': ...</td>\n",
       "      <td>[我想借阅《The Great Gatsby》这本书，作者是F. Scott Fitzger...</td>\n",
       "      <td>[[{'name': 'request_interlibrary_loan', 'argum...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2262</th>\n",
       "      <td>2262</td>\n",
       "      <td>[[{'name': 'process_monitor', 'arguments': {'s...</td>\n",
       "      <td>[{'name': 'search_books_by_genre', 'descriptio...</td>\n",
       "      <td>[最近系统运行有点慢，想看看是不是有什么进程占用了太多资源。帮我看看admin用户启动的进程...</td>\n",
       "      <td>[[{'name': 'process_monitor', 'arguments': {'s...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2270</th>\n",
       "      <td>2270</td>\n",
       "      <td>[[], [], [{'name': 'forecast_staffing', 'argum...</td>\n",
       "      <td>[{'name': 'media_contact_finder', 'description...</td>\n",
       "      <td>[Create a sentence with the following words: a...</td>\n",
       "      <td>[[], [], [{'name': 'forecast_staffing', 'argum...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2276</th>\n",
       "      <td>2276</td>\n",
       "      <td>[[{'name': 'water_demand_forecast', 'arguments...</td>\n",
       "      <td>[{'name': 'water_demand_forecast', 'descriptio...</td>\n",
       "      <td>[我们区域的人口预计将达到100万，GDP增长率为3.5%，平均气温20.0摄氏度，降水量5...</td>\n",
       "      <td>[[{'name': 'water_demand_forecast', 'arguments...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2280</th>\n",
       "      <td>2280</td>\n",
       "      <td>[[{'name': 'find_quotes', 'arguments': {'book_...</td>\n",
       "      <td>[{'name': 'find_characters', 'description': '在...</td>\n",
       "      <td>[在《红楼梦》里找一些关于“爱情”和“命运”的经典语录，最多返回5条。再查查《三国演义》中诸...</td>\n",
       "      <td>[[{'name': 'find_quotes', 'arguments': {'book_...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2285</th>\n",
       "      <td>2285</td>\n",
       "      <td>[[{'name': 'urbanization_effects', 'arguments'...</td>\n",
       "      <td>[{'name': 'dynamic_pricing', 'description': '根...</td>\n",
       "      <td>[北京在2000-2020年期间的城市化进程对环境和居民生活的影响是什么？人口增长率为3%。...</td>\n",
       "      <td>[[{'name': 'urbanization_effects', 'arguments'...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>391 rows × 5 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "        id                                            targets  \\\n",
       "11      11  [[{'name': 'network_diagnose', 'arguments': {'...   \n",
       "12      12  [[{'name': 'assess_child_support', 'arguments'...   \n",
       "14      14  [[{'name': 'index_optimizer', 'arguments': {'t...   \n",
       "31      31  [[], [{'name': 'climate_agreement_status', 'ar...   \n",
       "41      41  [[{'name': 'request_interlibrary_loan', 'argum...   \n",
       "...    ...                                                ...   \n",
       "2262  2262  [[{'name': 'process_monitor', 'arguments': {'s...   \n",
       "2270  2270  [[], [], [{'name': 'forecast_staffing', 'argum...   \n",
       "2276  2276  [[{'name': 'water_demand_forecast', 'arguments...   \n",
       "2280  2280  [[{'name': 'find_quotes', 'arguments': {'book_...   \n",
       "2285  2285  [[{'name': 'urbanization_effects', 'arguments'...   \n",
       "\n",
       "                                                   apis  \\\n",
       "11    [{'name': 'analyze_chemical_purity', 'descript...   \n",
       "12    [{'name': 'guardianship_application_guide', 'd...   \n",
       "14    [{'name': 'index_optimizer', 'description': '分...   \n",
       "31    [{'name': 'schrodinger_solution', 'description...   \n",
       "41    [{'name': 'network_providers', 'description': ...   \n",
       "...                                                 ...   \n",
       "2262  [{'name': 'search_books_by_genre', 'descriptio...   \n",
       "2270  [{'name': 'media_contact_finder', 'description...   \n",
       "2276  [{'name': 'water_demand_forecast', 'descriptio...   \n",
       "2280  [{'name': 'find_characters', 'description': '在...   \n",
       "2285  [{'name': 'dynamic_pricing', 'description': '根...   \n",
       "\n",
       "                                          user_messages  \\\n",
       "11    [最近网络连接不太稳定，我想检查一下192.168.1.1和10.0.0.1这两个IP地址的...   \n",
       "12    [我最近在处理一些家庭法律事务，需要评估几种不同情况下的子女抚养费。第一对父母的年收入分别是...   \n",
       "14    [最近我们在优化数据库性能，特别是对user_table和order_table的索引进行了...   \n",
       "31    [Create a Tic Tac toe game in python., 我想了解一下G...   \n",
       "41    [我想借阅《The Great Gatsby》这本书，作者是F. Scott Fitzger...   \n",
       "...                                                 ...   \n",
       "2262  [最近系统运行有点慢，想看看是不是有什么进程占用了太多资源。帮我看看admin用户启动的进程...   \n",
       "2270  [Create a sentence with the following words: a...   \n",
       "2276  [我们区域的人口预计将达到100万，GDP增长率为3.5%，平均气温20.0摄氏度，降水量5...   \n",
       "2280  [在《红楼梦》里找一些关于“爱情”和“命运”的经典语录，最多返回5条。再查查《三国演义》中诸...   \n",
       "2285  [北京在2000-2020年期间的城市化进程对环境和居民生活的影响是什么？人口增长率为3%。...   \n",
       "\n",
       "                                            targets_new  \n",
       "11    [[{'name': 'network_diagnose', 'arguments': {'...  \n",
       "12    [[{'name': 'assess_child_support', 'arguments'...  \n",
       "14    [[{'name': 'index_optimizer', 'arguments': {'t...  \n",
       "31    [[], [{'name': 'climate_agreement_status', 'ar...  \n",
       "41    [[{'name': 'request_interlibrary_loan', 'argum...  \n",
       "...                                                 ...  \n",
       "2262  [[{'name': 'process_monitor', 'arguments': {'s...  \n",
       "2270  [[], [], [{'name': 'forecast_staffing', 'argum...  \n",
       "2276  [[{'name': 'water_demand_forecast', 'arguments...  \n",
       "2280  [[{'name': 'find_quotes', 'arguments': {'book_...  \n",
       "2285  [[{'name': 'urbanization_effects', 'arguments'...  \n",
       "\n",
       "[391 rows x 5 columns]"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "infer_df[infer_df['user_messages'].apply(len)<infer_df['targets'].apply(len)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "aa854947-4b97-4c11-a04e-10697618cbba",
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_big_list(x):\n",
    "    N = len(x['user_messages'])\n",
    "    big_list=x['targets_new']\n",
    "    # 截断到前N个小列表\n",
    "    truncated = big_list[:N]\n",
    "    remaining = big_list[N:]\n",
    "    \n",
    "    # 收集所有剩余非空子列表中的元素\n",
    "    elements_to_merge = []\n",
    "    for sublist in remaining:\n",
    "        if sublist:  # 检查子列表是否非空\n",
    "            elements_to_merge.extend(sublist)\n",
    "    \n",
    "    # 如果有需要合并的元素\n",
    "    if elements_to_merge:\n",
    "        # 寻找截断部分中的最后一个非空子列表\n",
    "        last_non_empty = None\n",
    "        last_idx = None\n",
    "        for idx,sublist in enumerate(truncated):\n",
    "            if len(sublist):\n",
    "                last_non_empty = sublist\n",
    "                last_idx = idx\n",
    "        \n",
    "        if last_non_empty is not None:\n",
    "            for i in elements_to_merge:\n",
    "                if i not in last_non_empty:\n",
    "                    print('追加')\n",
    "                    # 追加到最后一个非空子列表\n",
    "                    last_non_empty.append(i)\n",
    "            truncated[last_idx]=last_non_empty\n",
    "        else:\n",
    "            # 如果所有子列表都为空，将元素追加到最后一个子列表\n",
    "            if truncated:\n",
    "                truncated[-1].extend(elements_to_merge)\n",
    "\n",
    "    return truncated"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "id": "a39bc6ef-e47b-4b8f-8be8-8c810ac8c81e",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n",
      "追加\n"
     ]
    }
   ],
   "source": [
    "infer_df['targets_new']=infer_df.apply(lambda x:process_big_list(x),axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "1348dd01-33f2-4afc-9362-0dc01709af95",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>targets</th>\n",
       "      <th>apis</th>\n",
       "      <th>user_messages</th>\n",
       "      <th>targets_new</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>11</td>\n",
       "      <td>[[{'name': 'network_diagnose', 'arguments': {'...</td>\n",
       "      <td>[{'name': 'analyze_chemical_purity', 'descript...</td>\n",
       "      <td>[最近网络连接不太稳定，我想检查一下192.168.1.1和10.0.0.1这两个IP地址的...</td>\n",
       "      <td>[[{'name': 'network_diagnose', 'arguments': {'...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>12</td>\n",
       "      <td>[[{'name': 'assess_child_support', 'arguments'...</td>\n",
       "      <td>[{'name': 'guardianship_application_guide', 'd...</td>\n",
       "      <td>[我最近在处理一些家庭法律事务，需要评估几种不同情况下的子女抚养费。第一对父母的年收入分别是...</td>\n",
       "      <td>[[{'name': 'assess_child_support', 'arguments'...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>14</td>\n",
       "      <td>[[{'name': 'index_optimizer', 'arguments': {'t...</td>\n",
       "      <td>[{'name': 'index_optimizer', 'description': '分...</td>\n",
       "      <td>[最近我们在优化数据库性能，特别是对user_table和order_table的索引进行了...</td>\n",
       "      <td>[[{'name': 'index_optimizer', 'arguments': {'t...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>31</td>\n",
       "      <td>[[], [{'name': 'climate_agreement_status', 'ar...</td>\n",
       "      <td>[{'name': 'schrodinger_solution', 'description...</td>\n",
       "      <td>[Create a Tic Tac toe game in python., 我想了解一下G...</td>\n",
       "      <td>[[], [{'name': 'climate_agreement_status', 'ar...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>41</td>\n",
       "      <td>[[{'name': 'request_interlibrary_loan', 'argum...</td>\n",
       "      <td>[{'name': 'network_providers', 'description': ...</td>\n",
       "      <td>[我想借阅《The Great Gatsby》这本书，作者是F. Scott Fitzger...</td>\n",
       "      <td>[[{'name': 'request_interlibrary_loan', 'argum...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>388</th>\n",
       "      <td>2262</td>\n",
       "      <td>[[{'name': 'process_monitor', 'arguments': {'s...</td>\n",
       "      <td>[{'name': 'search_books_by_genre', 'descriptio...</td>\n",
       "      <td>[最近系统运行有点慢，想看看是不是有什么进程占用了太多资源。帮我看看admin用户启动的进程...</td>\n",
       "      <td>[[{'name': 'process_monitor', 'arguments': {'s...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>389</th>\n",
       "      <td>2270</td>\n",
       "      <td>[[], [{'name': 'forecast_staffing', 'arguments...</td>\n",
       "      <td>[{'name': 'media_contact_finder', 'description...</td>\n",
       "      <td>[Create a sentence with the following words: a...</td>\n",
       "      <td>[[], [{'name': 'forecast_staffing', 'arguments...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>390</th>\n",
       "      <td>2276</td>\n",
       "      <td>[[{'name': 'water_demand_forecast', 'arguments...</td>\n",
       "      <td>[{'name': 'water_demand_forecast', 'descriptio...</td>\n",
       "      <td>[我们区域的人口预计将达到100万，GDP增长率为3.5%，平均气温20.0摄氏度，降水量5...</td>\n",
       "      <td>[[{'name': 'water_demand_forecast', 'arguments...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>391</th>\n",
       "      <td>2280</td>\n",
       "      <td>[[{'name': 'find_quotes', 'arguments': {'book_...</td>\n",
       "      <td>[{'name': 'find_characters', 'description': '在...</td>\n",
       "      <td>[在《红楼梦》里找一些关于“爱情”和“命运”的经典语录，最多返回5条。再查查《三国演义》中诸...</td>\n",
       "      <td>[[{'name': 'find_quotes', 'arguments': {'book_...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>392</th>\n",
       "      <td>2285</td>\n",
       "      <td>[[{'name': 'urbanization_effects', 'arguments'...</td>\n",
       "      <td>[{'name': 'dynamic_pricing', 'description': '根...</td>\n",
       "      <td>[北京在2000-2020年期间的城市化进程对环境和居民生活的影响是什么？人口增长率为3%。...</td>\n",
       "      <td>[[{'name': 'urbanization_effects', 'arguments'...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>393 rows × 5 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "       id                                            targets  \\\n",
       "0      11  [[{'name': 'network_diagnose', 'arguments': {'...   \n",
       "1      12  [[{'name': 'assess_child_support', 'arguments'...   \n",
       "2      14  [[{'name': 'index_optimizer', 'arguments': {'t...   \n",
       "3      31  [[], [{'name': 'climate_agreement_status', 'ar...   \n",
       "4      41  [[{'name': 'request_interlibrary_loan', 'argum...   \n",
       "..    ...                                                ...   \n",
       "388  2262  [[{'name': 'process_monitor', 'arguments': {'s...   \n",
       "389  2270  [[], [{'name': 'forecast_staffing', 'arguments...   \n",
       "390  2276  [[{'name': 'water_demand_forecast', 'arguments...   \n",
       "391  2280  [[{'name': 'find_quotes', 'arguments': {'book_...   \n",
       "392  2285  [[{'name': 'urbanization_effects', 'arguments'...   \n",
       "\n",
       "                                                  apis  \\\n",
       "0    [{'name': 'analyze_chemical_purity', 'descript...   \n",
       "1    [{'name': 'guardianship_application_guide', 'd...   \n",
       "2    [{'name': 'index_optimizer', 'description': '分...   \n",
       "3    [{'name': 'schrodinger_solution', 'description...   \n",
       "4    [{'name': 'network_providers', 'description': ...   \n",
       "..                                                 ...   \n",
       "388  [{'name': 'search_books_by_genre', 'descriptio...   \n",
       "389  [{'name': 'media_contact_finder', 'description...   \n",
       "390  [{'name': 'water_demand_forecast', 'descriptio...   \n",
       "391  [{'name': 'find_characters', 'description': '在...   \n",
       "392  [{'name': 'dynamic_pricing', 'description': '根...   \n",
       "\n",
       "                                         user_messages  \\\n",
       "0    [最近网络连接不太稳定，我想检查一下192.168.1.1和10.0.0.1这两个IP地址的...   \n",
       "1    [我最近在处理一些家庭法律事务，需要评估几种不同情况下的子女抚养费。第一对父母的年收入分别是...   \n",
       "2    [最近我们在优化数据库性能，特别是对user_table和order_table的索引进行了...   \n",
       "3    [Create a Tic Tac toe game in python., 我想了解一下G...   \n",
       "4    [我想借阅《The Great Gatsby》这本书，作者是F. Scott Fitzger...   \n",
       "..                                                 ...   \n",
       "388  [最近系统运行有点慢，想看看是不是有什么进程占用了太多资源。帮我看看admin用户启动的进程...   \n",
       "389  [Create a sentence with the following words: a...   \n",
       "390  [我们区域的人口预计将达到100万，GDP增长率为3.5%，平均气温20.0摄氏度，降水量5...   \n",
       "391  [在《红楼梦》里找一些关于“爱情”和“命运”的经典语录，最多返回5条。再查查《三国演义》中诸...   \n",
       "392  [北京在2000-2020年期间的城市化进程对环境和居民生活的影响是什么？人口增长率为3%。...   \n",
       "\n",
       "                                           targets_new  \n",
       "0    [[{'name': 'network_diagnose', 'arguments': {'...  \n",
       "1    [[{'name': 'assess_child_support', 'arguments'...  \n",
       "2    [[{'name': 'index_optimizer', 'arguments': {'t...  \n",
       "3    [[], [{'name': 'climate_agreement_status', 'ar...  \n",
       "4    [[{'name': 'request_interlibrary_loan', 'argum...  \n",
       "..                                                 ...  \n",
       "388  [[{'name': 'process_monitor', 'arguments': {'s...  \n",
       "389  [[], [{'name': 'forecast_staffing', 'arguments...  \n",
       "390  [[{'name': 'water_demand_forecast', 'arguments...  \n",
       "391  [[{'name': 'find_quotes', 'arguments': {'book_...  \n",
       "392  [[{'name': 'urbanization_effects', 'arguments'...  \n",
       "\n",
       "[393 rows x 5 columns]"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tmp_df=infer_df[(infer_df['targets'].apply(lambda x:json.dumps(x, sort_keys=True,ensure_ascii=False))!=infer_df['targets_new'].apply(lambda x:json.dumps(x, sort_keys=True,ensure_ascii=False)))].reset_index(drop=True)\n",
    "tmp_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "31c6320b-02c5-451a-9bc7-57f38269abb0",
   "metadata": {},
   "outputs": [],
   "source": [
    "idx=0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "id": "eac7b892-9cdb-454a-a563-6ed5420e900e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['我最近在处理一些家庭法律事务，需要评估几种不同情况下的子女抚养费。第一对父母的年收入分别是50000.0和40000.0，有两个孩子，每月生活成本是1000.0。第二对父母年收入是75000.0和60000.0，有一个孩子，每月生活成本1200.0。第三对父母年收入是90000.0和45000.0，有三个孩子，每月生活成本1500.0。另外，我还需要了解如何申请成为法定监护人，申请人是被监护人的parent，年龄35岁，被监护人10岁，健康状况healthy，申请人有稳定的经济来源。']"
      ]
     },
     "execution_count": 53,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tmp_df['user_messages'][idx]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "0c65078d-f860-4d21-88f4-be0bd34dd07f",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[[{'name': 'assess_child_support',\n",
       "   'arguments': {'income_1': 50000.0,\n",
       "    'income_2': 40000.0,\n",
       "    'children_count': 2,\n",
       "    'living_costs': 1000.0}},\n",
       "  {'name': 'assess_child_support',\n",
       "   'arguments': {'income_1': 75000.0,\n",
       "    'income_2': 60000.0,\n",
       "    'children_count': 1,\n",
       "    'living_costs': 1200.0}},\n",
       "  {'name': 'assess_child_support',\n",
       "   'arguments': {'income_1': 90000.0,\n",
       "    'income_2': 45000.0,\n",
       "    'children_count': 3,\n",
       "    'living_costs': 1500.0}},\n",
       "  {'name': 'guardianship_application_guide',\n",
       "   'arguments': {'applicant_relation': 'parent',\n",
       "    'applicant_age': 35,\n",
       "    'ward_age': 10,\n",
       "    'ward_health_condition': 'healthy',\n",
       "    'financial_stability': True}}],\n",
       " [{'name': 'assess_child_support',\n",
       "   'arguments': {'income_1': 75000.0,\n",
       "    'income_2': 60000.0,\n",
       "    'children_count': 1,\n",
       "    'living_costs': 1200.0}}],\n",
       " [{'name': 'assess_child_support',\n",
       "   'arguments': {'income_1': 90000.0,\n",
       "    'income_2': 45000.0,\n",
       "    'children_count': 3,\n",
       "    'living_costs': 1500.0}}],\n",
       " [{'name': 'guardianship_application_guide',\n",
       "   'arguments': {'applicant_relation': 'parent',\n",
       "    'applicant_age': 35,\n",
       "    'ward_age': 10,\n",
       "    'ward_health_condition': 'healthy',\n",
       "    'financial_stability': True}}]]"
      ]
     },
     "execution_count": 54,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tmp_df['targets'][idx]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "id": "99fcf5dd-2cee-477b-973c-f1d0f89189b0",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[[{'name': 'assess_child_support',\n",
       "   'arguments': {'income_1': 50000.0,\n",
       "    'income_2': 40000.0,\n",
       "    'children_count': 2,\n",
       "    'living_costs': 1000.0}},\n",
       "  {'name': 'assess_child_support',\n",
       "   'arguments': {'income_1': 75000.0,\n",
       "    'income_2': 60000.0,\n",
       "    'children_count': 1,\n",
       "    'living_costs': 1200.0}},\n",
       "  {'name': 'assess_child_support',\n",
       "   'arguments': {'income_1': 90000.0,\n",
       "    'income_2': 45000.0,\n",
       "    'children_count': 3,\n",
       "    'living_costs': 1500.0}},\n",
       "  {'name': 'guardianship_application_guide',\n",
       "   'arguments': {'applicant_relation': 'parent',\n",
       "    'applicant_age': 35,\n",
       "    'ward_age': 10,\n",
       "    'ward_health_condition': 'healthy',\n",
       "    'financial_stability': True}}]]"
      ]
     },
     "execution_count": 55,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tmp_df['targets_new'][idx]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "id": "b0d5084d-adf6-42d4-bb34-da5ca204fbfb",
   "metadata": {},
   "outputs": [],
   "source": [
    "idx+=1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "id": "445e5978-e9b8-4008-940a-8b6940c58744",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import time\n",
    "with open(path+f'submissions/submission_{version}_'+time.strftime(\"%Y-%m-%d_%H_%M_%S\",time.localtime())+'.jsonl', 'w',  encoding='utf-8') as fw:\n",
    "    for idx,id in enumerate(infer_df['id']):\n",
    "        user_messages=infer_df['user_messages'][idx]\n",
    "        apis=infer_df['apis'][idx]\n",
    "        targets=infer_df['targets_new'][idx]\n",
    "        s = json.dumps({\n",
    "            'id': str(id),\n",
    "            'apis': apis,\n",
    "            'user_messages': user_messages,\n",
    "            'targets': targets\n",
    "        }, ensure_ascii=False)\n",
    "        fw.write(s + '\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4b0e5cd9-cef0-4e6b-91f0-2ae8b78d9f86",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
