{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Step1：清洗数据（已有清洗数据，可直接跳至Step2）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# %pip install openpyxl\n",
    "from datetime import datetime, timedelta\n",
    "import pandas as pd\n",
    "\n",
    "df = pd.read_excel('rate18-24.xlsx')\n",
    "df = df.drop('CNY/TRY', axis=1)\n",
    "df = df.drop('CNY/MOP', axis=1)\n",
    "df = df.drop('CNY/RUB', axis=1)\n",
    "print(df.head()) \n",
    "\n",
    "titles = df.columns\n",
    "data_without_titles = df.values\n",
    "raw_data=list(data_without_titles)\n",
    "\n",
    "def is_invalid(el):\n",
    "    return el is None or el==\"---\"\n",
    "\n",
    "def clean_data(raw_data):\n",
    "    for i in range(len(raw_data)):\n",
    "        for j in range(2, len(raw_data[0])):\n",
    "            if is_invalid(raw_data[i][j]):\n",
    "                last, next = None, None\n",
    "                for k in range(i-1, -1, -1):\n",
    "                    if not is_invalid(raw_data[k][j]):\n",
    "                        next=raw_data[k][j]\n",
    "                for k in range(i+1, len(raw_data)):\n",
    "                    if not is_invalid(raw_data[k][j]):\n",
    "                        last=raw_data[k][j]\n",
    "\n",
    "                if last==None and next==None:\n",
    "                    # raw_data[i][j] = \"0\"\n",
    "                    raise Exception(\"last==None and next==None\")\n",
    "                elif last==None:\n",
    "                    raw_data[i][j]=next\n",
    "                elif next==None:\n",
    "                    raw_data[i][j]=last\n",
    "                else:\n",
    "                    raw_data[i][j]=str((float(last)+float(next))/2) "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.1 加入当天新闻"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datetime import datetime\n",
    "with open(\"news_merge.txt\", \"r\", encoding=\"UTF8\") as f:\n",
    "    news_items = eval(f.read())\n",
    "\n",
    "\n",
    "date_news_map = {}\n",
    "for news_item in news_items:\n",
    "    date_format = datetime.strptime(news_item[2], \"%Y年%m月%d日\").strftime(\"%Y-%m-%d\")\n",
    "    news_list = None\n",
    "    if date_format not in date_news_map:\n",
    "        news_list = []\n",
    "    else:\n",
    "        news_list = date_news_map[date_format]\n",
    "    news_list.append(news_item[0])\n",
    "    date_news_map[date_format] = news_list "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "clean_data(raw_data)\n",
    "import numpy as np\n",
    "data_list = [] \n",
    "for row in raw_data:\n",
    "    row_list = list(row)\n",
    "    if row[0] not in date_news_map:\n",
    "        news_list = []\n",
    "    else:\n",
    "        news_list = date_news_map[row[0]]\n",
    "    row_list.append(str(news_list))\n",
    "    data_list.append(np.array(row_list))\n",
    "\n",
    "df_output = pd.DataFrame(data_list, columns=df.columns.append(pd.Index([\"news\"])))\n",
    "# # 将DataFrame写入Excel文件\n",
    "df_output.to_excel('rate18-24-cleaned-with-news.xlsx', index=False)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.2 依据汇率数据生成汇报结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datetime import datetime, timedelta\n",
    "\n",
    "RATE_NAMES = ['', '', '美元', '欧元', '日元', '港元', '英镑', '澳元', '新西兰币', '新加坡币', '瑞士法郎', '加拿大币']\n",
    "\n",
    "def find_nextdays_x(rates_data, now_row, next_day_dist):\n",
    "    now_date = rates_data[now_row][0]\n",
    "    now_date_obj = datetime.strptime(now_date, \"%Y-%m-%d\")\n",
    "    next_dates_obj = now_date_obj + timedelta(days=next_day_dist)\n",
    "    next_date = next_dates_obj.strftime(\"%Y-%m-%d\")\n",
    "    i=now_row\n",
    "    while i>=0:\n",
    "        if rates_data[i][0]>=next_date:\n",
    "            return rates_data[i]\n",
    "        i-=1\n",
    "\n",
    "    return None\n",
    "\n",
    "def smart_analysis(rate_name, current_rate, future_rate, period_days_num):\n",
    "    period_times = str(period_days_num) + \"天后\"\n",
    "    if (future_rate-current_rate) / current_rate > 0.1:\n",
    "        return rate_name + period_times + \"涨幅大于10%\"\n",
    "    elif (future_rate-current_rate) / current_rate > 0.05:\n",
    "        return rate_name + period_times + \"涨幅大于5%\"\n",
    "    elif (future_rate-current_rate) / current_rate > 0.03:\n",
    "        return rate_name + period_times + \"涨幅大于3%\"\n",
    "    elif (future_rate-current_rate) / current_rate > 0.01:\n",
    "        return rate_name + period_times + \"涨幅大于1%\"\n",
    "    elif (future_rate-current_rate) / current_rate < -0.1:\n",
    "        return rate_name + period_times + \"跌幅大于10%\"\n",
    "    elif (future_rate-current_rate) / current_rate < -0.05:\n",
    "        return rate_name + period_times + \"跌幅大于5%\"\n",
    "    elif (future_rate-current_rate) / current_rate < -0.03:\n",
    "        return rate_name + period_times + \"跌幅大于3%\"\n",
    "    elif (future_rate-current_rate) / current_rate < -0.01:\n",
    "        return rate_name + period_times + \"跌幅大于1%\"\n",
    "    else:\n",
    "        # return rate_name + period_times+\"平稳\"\n",
    "        return None\n",
    "\n",
    "def make_reports(rates_data):\n",
    "    row_num, column_num = len(rates_data), 12\n",
    "    r=row_num-1\n",
    "    next_days = [7, 30, 180]\n",
    "    reports = []\n",
    "    for r in range(0, row_num):\n",
    "        report_text = ''\n",
    "        for i in range(len(next_days)):        \n",
    "            sell_day_rates=find_nextdays_x(rates_data,r, next_days[i])\n",
    "            if sell_day_rates is None:\n",
    "                continue\n",
    "            for j in range(2, column_num):\n",
    "                report = smart_analysis(RATE_NAMES[j], rates_data[r][j], sell_day_rates[j], next_days[i])\n",
    "                if report != None:\n",
    "                    report_text+=report+\"。\"\n",
    "        if len(report_text)>0:\n",
    "            reports.append(report_text)\n",
    "        else:\n",
    "            reports.append(\"目前而言，各货币较稳定\")\n",
    "\n",
    "    return reports\n",
    "\n",
    "df = pd.read_excel('rate18-24-cleaned-with-news.xlsx')\n",
    "data_without_titles = df.values\n",
    "rates_data=list(data_without_titles)\n",
    "reports = make_reports(rates_data)\n",
    "\n",
    "import numpy as np\n",
    "data_list = [] \n",
    "for i in range(len(rates_data)):\n",
    "    row_list = list(rates_data[i])\n",
    "    row_list.append(reports[i])\n",
    "    data_list.append(np.array(row_list))\n",
    "\n",
    "df_output = pd.DataFrame(data_list, columns=df.columns.append(pd.Index([\"report\"])))\n",
    "# # 将DataFrame写入Excel文件\n",
    "df_output.to_excel('rate18-24-cleaned-with-news-and-report.xlsx', index=False)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Step2：训练GPT2模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# %pip install openpyxl -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from transformers import GPT2LMHeadModel, GPT2Tokenizer\n",
    "\n",
    "# 1. 加载预训练模型和分词器\n",
    "my_first_train = True # 若已有模型的话改为False\n",
    "if (my_first_train):\n",
    "    model_name = 'gpt2'\n",
    "else:\n",
    "    model_name = './model/rate_gpt2'\n",
    "model = GPT2LMHeadModel.from_pretrained(model_name)  # 加载预训练的GPT-2语言模型\n",
    "tokenizer = GPT2Tokenizer.from_pretrained(model_name)  # 加载对应的GPT-2分词器\n",
    "tokenizer.pad_token = tokenizer.eos_token\n",
    "QUESTION_TOKEN_END = '<|endofquestion|>'\n",
    "special_tokens_dict = {'end_of_question': [QUESTION_TOKEN_END]}"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.1 准备待训练数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "import pandas as pd\n",
    "\n",
    "def join_news(news_list):\n",
    "    return ';'.join(news_list)\n",
    "\n",
    "def squeeze_train_datas(train_datas):\n",
    "    squeeze_datas = []\n",
    "    for data in train_datas:\n",
    "        news_list = eval(data[0])\n",
    "        if len(news_list)==0:\n",
    "            continue\n",
    "        report = data[1]\n",
    "        #squeeze_datas.append(tokenizer.bos_token + join_news(news_list) + tokenizer.eos_token)\n",
    "        #squeeze_datas.append(tokenizer.bos_token + report + tokenizer.eos_token)\n",
    "        squeeze_datas.append(tokenizer.bos_token + join_news(news_list) + QUESTION_TOKEN_END +\n",
    "                                report + tokenizer.eos_token)\n",
    "    random.shuffle(squeeze_datas)\n",
    "    return squeeze_datas\n",
    "\n",
    "df = pd.read_excel('rate18-24-cleaned-with-news-and-report.xlsx', usecols=['news', 'report'])\n",
    "data_without_titles = df.values\n",
    "train_datas = list(data_without_titles)\n",
    "squeeze_datas = squeeze_train_datas(train_datas)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['<|endoftext|>外汇欧市：美元指数于五周高位附近徘徊 美国通胀降温速度恐放缓;英镑多头小心！八大机构预估英国经济，整体仍不乐观;国际金价跌势受限，先导指标利好空头，但待CPI数据加油;美国1月CPI报告即将出炉，或成金价下一个重要催化剂！;金价承压专家仍给市场打气，称若1850支撑能守住就是买点！;利好澳元！澳洲联储3月料加息25基点，掉期交易员认为概率高达80%;2月13日汇市观潮：欧元、英镑和日元技术分析;黄金交易提醒：金价承压等待美国CPI数据，多数分析师看空后市;黄金市场分析：若反弹突破1878.80美元，将推动多头复苏;外汇交易提醒：美元试探55日均线阻力，投资者等待美国通胀数据<|endofquestion|>日元7天后跌幅大于1%。欧元30天后涨幅大于1%。英镑30天后涨幅大于1%。澳元30天后跌幅大于1%。瑞士法郎30天后涨幅大于1%。加拿大币30天后跌幅大于1%。美元180天后涨幅大于5%。欧元180天后涨幅大于5%。日元180天后跌幅大于3%。港元180天后涨幅大于5%。英镑180天后涨幅大于10%。新加坡币180天后涨幅大于3%。瑞士法郎180天后涨幅大于10%。加拿大币180天后涨幅大于5%。<|endoftext|>', '<|endoftext|>COT报告：日元交易员转为净多头敞口;两组值得关注的资产：欧元/美元、WTI原油期货;关注日本央行行长植田和男国会讲话 日元升值趋势或将延续;欧元大反弹！日元汇率转跌，本周全看鲍威尔?【外汇周报】;非农报告本周要下修100万？！黄金与美元会如何反应？;现货黄金技术分析：黄金飙升至历史新高;鲍威尔全球央行年会演讲 FOMC会议纪要 塔吉特、梅西百货财报引关注【一周前瞻】;日元涨超1%！投资者押注日本加息，本周美日演讲能否送惊喜？;日元暴涨近240点，分析师：“投资者还没有放弃对日本央行加息的押注”;美国金融专家：做到五点，才能从美联储多年来的首次降息中获益;8月19日汇市观潮：日元和澳元技术分析;鲍威尔料确认美联储将降息，但不透露降息幅度！;里程碑时刻！对冲基金3年来首次净看多日元;澳元兑美元技术面看涨，若突破0.6740将进一步升向0.6798！;美元兑日元延续上周五跌势，市场焦点转向本周鲍威尔讲话;芝加哥联储主席质疑特朗普、哈里斯的通胀观点，强调美联储仍在考虑其利率决定;杰克森霍尔会议来袭，鲍威尔不太可能释放降息50基点的信号！;金融分析师：以当前金价支持美元发行总量，可以避免美元崩溃;美银看涨英镑汇率，预测年底镑美升至1.35！;本周展望：三大风险因素笼罩市场，黄金后市前景看好;黄金交易提醒：金价攻克2500关口，刷新历史高点，本周将迎杰克逊霍尔年会<|endofquestion|>欧元7天后涨幅大于1%。日元7天后涨幅大于1%。英镑7天后涨幅大于1%。澳元7天后涨幅大于1%。新西兰币7天后涨幅大于1%。瑞士法郎7天后涨幅大于1%。日元30天后涨幅大于3%。新西兰币30天后涨幅大于1%。瑞士法郎30天后涨幅大于1%。<|endoftext|>', '<|endoftext|>欧银二把手意外放鹰欧元欲再攻1.10，晚间美联储纪要或送暖意;金银T+D纷纷大涨，多重风险因素共振；金价欲摆脱千五关口束缚，需要它来加点料;INE原油一度飙升6%，四大利多因素发酵；但收盘回吐逾半数涨幅，需求前景仍悲观;脱欧期限即使延长至明年6月，英镑回升或仍仅昙花一现，后市继续看空情理之中;外汇欧盘：分析师料美联储年内还将降息两次 美元回吐周二全部涨幅;重磅！欧盟或就脱欧协商作出重大让步，英镑快拉60逾点直逼1.23;国际金价坚守上日升势，贸易前景不佳打压风险偏好；美联储宽松或再出新牌;两大地缘风险来袭，重质原油供应告急！美国炼厂或成最大受害者，短线油价看涨？;欧市盘前：美联储纪要料为10月降息定向！避险情绪升温，黄金或正酝酿大行情;美联储年内降息预期再度升温，金价受多重利好推升;默克尔给脱欧谈判泼冷水不可怕！欧盟峰会前维持谈判，英镑或仍有翻身之机;外汇亚盘：脱欧谈判遇阻拖累英镑低位徘徊 约翰逊甩锅给默克尔;10月9日现货黄金、白银、原油、外汇短线交易策略;无协议脱欧成定局？英国、欧盟相互推诿塞责，投资者顺势加码看空英镑;美国9月PPI表现远低于预期，现货金价短线攀升3美元站稳1500关口;纽市盘前：有序脱欧希望渺茫，英镑创逾一个月新低；贸易风险不减，黄金反弹近20美元;现货金价重新站上1500美元，英国硬脱欧的担忧情绪爆发，稍后关注鲍威尔讲话;10月9日外汇交易提醒;10月9日财经早餐：贸易忧虑重燃，美元坚挺黄金多头反击，英镑创近五周新低，关注美联储会议纪要<|endofquestion|>日元7天后跌幅大于1%。英镑7天后涨幅大于3%。新西兰币7天后跌幅大于1%。瑞士法郎7天后跌幅大于1%。美元30天后跌幅大于1%。欧元30天后跌幅大于1%。日元30天后跌幅大于3%。港元30天后跌幅大于1%。英镑30天后涨幅大于1%。新西兰币30天后跌幅大于1%。瑞士法郎30天后跌幅大于1%。加拿大币30天后跌幅大于1%。美元180天后跌幅大于1%。欧元180天后跌幅大于1%。日元180天后跌幅大于3%。澳元180天后跌幅大于10%。新西兰币180天后跌幅大于5%。新加坡币180天后跌幅大于3%。加拿大币180天后跌幅大于5%。<|endoftext|>', '<|endoftext|>美加谈判暂无果加元续萎靡，坐等加银决议及美国非农审判;英国制造业PMI创逾两年新低，英镑急挫后市仍需聚焦脱欧;欧市盘前：非农来袭或暗示12月加息可能性，美加僵局令非美全线承压;全球金市：金价下跌，因贸易紧张关系提振美元;外汇市场本周展望：贸易局势阴云不散 美国8月非农来袭;埃尔多安誓言抛弃美元，指责美国的行为像“野狼”;外汇 - USD/JPY在亚洲盘口下跌;《全球汇市》美元持坚，受全球贸易紧张局势升级的支撑;9月3日现货黄金、白银、原油、外汇短线交易策略;黄金交易提醒：风向突变！美元热或再度兴起，黄金恐又要坠落深崖;《全球汇市》美元小幅上涨，因全球贸易紧张局势升温;外汇 - EUR/JPY在亚洲盘口下跌;外汇 - NZD/USD在亚洲盘口下跌;9月3日财经早餐：美国非农来袭，澳加将公布利率决议;外汇 - USD/JPY在亚洲盘口上升<|endofquestion|>日元30天后跌幅大于1%。港元30天后涨幅大于1%。英镑30天后涨幅大于1%。新西兰币30天后跌幅大于1%。瑞士法郎30天后跌幅大于1%。加拿大币30天后涨幅大于1%。美元180天后跌幅大于1%。欧元180天后跌幅大于3%。日元180天后跌幅大于1%。港元180天后跌幅大于1%。澳元180天后跌幅大于3%。新西兰币180天后涨幅大于1%。瑞士法郎180天后跌幅大于3%。加拿大币180天后跌幅大于3%。<|endoftext|>', '<|endoftext|>纽市盘前：英银排斥负利率，英镑创五个月新高；美国就业市场吹暖风，美油探底回升逾2%;国际金价续创纪录新高，日线五连升在即！美元周期性弱势形成，甚至可能面临历史性逆转;英镑刷新近五个月高位，短线有望上攻1.32；但英银8月决议鹰鸽参半，多头心腹大患未消;黄金T+D再创逾16年新高，白银T+D暴涨逾7%！美元指数持续疲弱，美国两党有根难啃的骨头;英国央行行长贝利重申：“负利率”政策不在考虑动用范围内，英镑短线上浮近20点;外汇欧盘：英镑大涨超0.5%创5个月新高 警惕脱欧不确定性拖累后市;现货金价持稳于历史高位附近，疫情下美国经济放缓担忧升温;INE原油收涨，EIA库存超预期下降，且弱势美元利多大宗商品，但多头仍有理由保持谨慎;英银决议维持利率不变，但经济预期较为乐观+未提及负利率，英镑急拉50点刷新5个月高点;欧市盘前：英银或按兵不动，日本无意重启紧急状态，黄金利多因素不断强化;原油交易提醒：美油自五个月高点冲高回落，因需求前景仍是多头心病，短线关注200日均线压力;黄金交易提醒：近期基调是继续走高！美国就业市场疲软为多头再添筹码，日内关注失业金数据;英银本周料维稳，但或下调经济预估！疫情和脱欧谈判令人忧，英镑短期进一步上行难度大;英银决议前瞻：8月只是“过渡”？或在11月加大购债规模；英镑涨势恐是强弩之末;8月6日外汇交易提醒;8月6日财经早餐：美元接近逾二年低位，黄金升上2050续刷历史新高，英银利率决议来袭<|endofquestion|>日元7天后跌幅大于1%。新西兰币7天后跌幅大于1%。美元30天后跌幅大于1%。欧元30天后跌幅大于1%。日元30天后跌幅大于1%。港元30天后跌幅大于1%。新加坡币30天后跌幅大于1%。瑞士法郎30天后跌幅大于1%。美元180天后跌幅大于5%。欧元180天后跌幅大于5%。日元180天后跌幅大于10%。港元180天后跌幅大于5%。英镑180天后跌幅大于1%。澳元180天后跌幅大于5%。新西兰币180天后跌幅大于1%。新加坡币180天后跌幅大于5%。瑞士法郎180天后跌幅大于5%。加拿大币180天后跌幅大于1%。<|endoftext|>']\n"
     ]
    }
   ],
   "source": [
    "print(squeeze_datas[:5])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import Dataset, DataLoader\n",
    "class CustomDataset(Dataset):\n",
    "    def __init__(self, squeeze_datas, train=True):\n",
    "        self.squeeze_datas = squeeze_datas\n",
    "        self.train = train\n",
    "        self.input_ids = tokenizer(squeeze_datas, truncation=True, padding=True, return_tensors=\"pt\").input_ids\n",
    " \n",
    "    def __len__(self):\n",
    "        return self.input_ids.shape[0]\n",
    " \n",
    "    def __getitem__(self, idx):\n",
    "        return self.input_ids[idx]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "dataset1 = CustomDataset(squeeze_datas, train=True)\n",
    "loader_train = DataLoader(dataset1, batch_size=32, shuffle=True, num_workers=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "use cuda\n"
     ]
    }
   ],
   "source": [
    "USE_GPU = True\n",
    "dtype = torch.long # We will be using float throughout this tutorial.\n",
    "print_every = 100\n",
    "\n",
    "if USE_GPU and torch.cuda.is_available():\n",
    "    device = torch.device('cuda')\n",
    "    print(\"use cuda\")\n",
    "else:\n",
    "    device = torch.device('cpu')\n",
    "    print(\"use cpu\")\n",
    "model = model.to(device=device) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/100, 45 , Loss: 0.9074509143829346\n",
      "Epoch 2/100, 45 , Loss: 0.8633137941360474\n",
      "Epoch 3/100, 45 , Loss: 0.944281816482544\n",
      "Epoch 4/100, 45 , Loss: 0.8731291890144348\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[7], line 8\u001b[0m\n\u001b[1;32m      6\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m t, (input_ids) \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(loader_train):\n\u001b[1;32m      7\u001b[0m     optimizer\u001b[38;5;241m.\u001b[39mzero_grad()  \u001b[38;5;66;03m# 清空梯度\u001b[39;00m\n\u001b[0;32m----> 8\u001b[0m     input_ids \u001b[38;5;241m=\u001b[39m \u001b[43minput_ids\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdtype\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m      9\u001b[0m     att \u001b[38;5;241m=\u001b[39m model(input_ids, labels\u001b[38;5;241m=\u001b[39minput_ids)  \u001b[38;5;66;03m# 前向传播计算损失\u001b[39;00m\n\u001b[1;32m     10\u001b[0m     att\u001b[38;5;241m.\u001b[39mloss\u001b[38;5;241m.\u001b[39mbackward()  \u001b[38;5;66;03m# 反向传播计算梯度\u001b[39;00m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "\n",
    "optimizer = optim.Adam(model.parameters(), lr=5e-5)  # 使用Adam优化器，学习率为5e-5\n",
    "\n",
    "num_epochs = 100  # 设置训练周期数\n",
    "model.train()  # 将模型设置为训练模式\n",
    "for epoch in range(num_epochs):\n",
    "    for t, (input_ids) in enumerate(loader_train):\n",
    "        optimizer.zero_grad()  # 清空梯度\n",
    "        input_ids = input_ids.to(device=device, dtype=dtype)\n",
    "        att = model(input_ids, labels=input_ids)  # 前向传播计算损失\n",
    "        att.loss.backward()  # 反向传播计算梯度\n",
    "        optimizer.step()  # 更新参数\n",
    "    print(f\"Epoch {epoch + 1}/{num_epochs}, {t} , Loss: {att.loss.item()}\")  # 打印当前周期的损失"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.2 保存模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "('./model/rate_gpt2/tokenizer_config.json',\n",
       " './model/rate_gpt2/special_tokens_map.json',\n",
       " './model/rate_gpt2/vocab.json',\n",
       " './model/rate_gpt2/merges.txt',\n",
       " './model/rate_gpt2/added_tokens.json')"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.save_pretrained('./model/rate_gpt2',safe_serialization=False)  # 保存训练后的模型\n",
    "tokenizer.save_pretrained('./model/rate_gpt2')  # 保存分词器"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.3 预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "The attention mask is not set and cannot be inferred from input because pad token is same as eos token. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "美元/加元价格预测：刷新两周高点至1.4350附近;美元反弹，致澳元逼近0.6300支撑;还是未能上破1.05，欧元/美元会继续震荡吗？;技术面震荡，纽元/美元短期或下行;美元指数持续疲软！106.52成为关键生死线？能否突破阻力开启反弹？;WTI原油跌破70美元，因关税担忧进一步恶化市场情绪;日元进一步下行风险显现，关键支撑位考验;欧元/美元分析：1.05关口是本季度交易区间的上限？;黄金多空生死线2918近在咫尺！今夜能否突破引爆行情？;特朗普要被印上美钞？网友扎心评论：250可不是什么好数字;英国首相斯塔默豪掷千亿，国防开支飙升，向特朗普传递强硬信号！<|endofquestion|>美元30天后涨幅大于1%。日元30天后跌幅大于1%。港元30天后涨幅大于1%。英镑30天后涨幅大于1%。澳元30天后跌幅大于1%。新西兰币30天后跌幅大于1%。美元180天后涨幅大于5%。欧元180天后涨幅大于5%。日元180天后跌幅大于5%。港元180天后涨幅大于5%。英镑180天后涨幅大于1%。澳元180天后跌幅大于1%。新西兰币180天后跌幅大于3%。新加坡币180天后涨幅大于3%。瑞士法郎180天后涨幅大于5%。加拿大币180天后涨幅大于1%。\n"
     ]
    }
   ],
   "source": [
    "news_list = ['美元/加元价格预测：刷新两周高点至1.4350附近', \n",
    "    '美元反弹，致澳元逼近0.6300支撑', \n",
    "    '还是未能上破1.05，欧元/美元会继续震荡吗？', \n",
    "    '技术面震荡，纽元/美元短期或下行', \n",
    "    '美元指数持续疲软！106.52成为关键生死线？能否突破阻力开启反弹？',\n",
    "    'WTI原油跌破70美元，因关税担忧进一步恶化市场情绪',\n",
    "    '日元进一步下行风险显现，关键支撑位考验',\n",
    "    '欧元/美元分析：1.05关口是本季度交易区间的上限？',\n",
    "    '黄金多空生死线2918近在咫尺！今夜能否突破引爆行情？',\n",
    "    '特朗普要被印上美钞？网友扎心评论：250可不是什么好数字',\n",
    "    '英国首相斯塔默豪掷千亿，国防开支飙升，向特朗普传递强硬信号！']\n",
    "input_text = tokenizer.bos_token + join_news(news_list) + QUESTION_TOKEN_END\n",
    "inputs = tokenizer.encode(input_text, return_tensors='pt')\n",
    "inputs = inputs.to(device=device)\n",
    "output = model.generate(inputs, max_length=10240, num_return_sequences=1)\n",
    "\n",
    "generated_text = tokenizer.decode(output[0], skip_special_tokens=True)\n",
    "print(generated_text)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
