{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# import sys\n",
    "# sys.path # python导入package时自动搜索的路径\n",
    "# sys.path.append(\"/root/Docqa\")\n",
    "import sys\n",
    "sys.path\n",
    "sys.path.append(\"/root/\")  \n",
    "from langchain.text_splitter import CharacterTextSplitter,RecursiveCharacterTextSplitter\n",
    "from langchain.document_loaders import Docx2txtLoader\n",
    "from langchain.document_loaders import PyPDFLoader, ArxivLoader,PyPDFDirectoryLoader\n",
    "from Docqa.text_spliter import ChineseTextSplitter\n",
    "import PyPDF2 # pdf reader\n",
    "from pypdf import PdfReader\n",
    "from langchain.chains import RetrievalQA, ConversationalRetrievalChain\n",
    "from langchain.memory import ConversationBufferMemory\n",
    "# from langchain.chains import ConversationalRetrievalChain\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "loader = Docx2txtLoader('/data/documents/分布式计算系统.docx')\n",
    "input_doc = loader.load()\n",
    "text_splitter = CharacterTextSplitter(chunk_size=650, chunk_overlap=0)\n",
    "texts = text_splitter.split_documents(input_doc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "loader = PyPDFLoader(\"/data/documents/深度学习_582660_231113.pdf\")\n",
    "pages = loader.load()\n",
    "pdf_splitter = ChineseTextSplitter(chunk_size=650, chunk_overlap=0, pdf=True)\n",
    "pdf_text = pdf_splitter.split_documents(pages)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "6"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "loader = PyPDFLoader(\"/data/documents/深度学习_582660_231113.pdf\")\n",
    "pages = loader.load()\n",
    "pdf_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=165)\n",
    "pdf_text = pdf_splitter.split_documents(pages)\n",
    "len(pdf_text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/llm-tutorial/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "2024-01-21 07:27:30,256 - WARNING - BigdlNativeEmbeddings has been deprecated, please switch to the new LMEmbeddings API for sepcific models.\n",
      "The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. \n",
      "The tokenizer class you load from this checkpoint is 'ChatGLMTokenizer'. \n",
      "The class this function is called from is 'LlamaTokenizer'.\n",
      "You are using the legacy behaviour of the <class 'transformers.models.llama.tokenization_llama.LlamaTokenizer'>. This means that tokens that come after special tokens will not be properly handled. We recommend you to read the related pull request available at https://github.com/huggingface/transformers/pull/24565\n",
      "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n",
      "You are using a model of type chatglm3 to instantiate a model of type chatglm. This is not supported for all configurations of models and can yield errors.\n",
      "Loading checkpoint shards: 100%|██████████| 7/7 [00:06<00:00,  1.16it/s]\n",
      "2024-01-21 07:27:40,406 - INFO - Converting the current model to sym_int4 format......\n"
     ]
    }
   ],
   "source": [
    "from bigdl.llm.langchain.embeddings import TransformersEmbeddings\n",
    "\n",
    "embeddings = TransformersEmbeddings.from_model_id(model_id=\"/data/chatglm3-6B/\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-01-21 07:28:20,356 - INFO - Loading faiss with AVX2 support.\n",
      "2024-01-21 07:28:20,388 - INFO - Successfully loaded faiss with AVX2 support.\n"
     ]
    }
   ],
   "source": [
    "from langchain.vectorstores import FAISS\n",
    "\n",
    "vectorstore_db = FAISS.from_documents(pdf_text, embeddings)\n",
    "retriever = vectorstore_db.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 6})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.prompts import PromptTemplate\n",
    "custom_prompt_template = \"\"\"\n",
    "You are an assistant for question-answering tasks. Use the retrieved {context} and answer the question at the end.\n",
    "If you don't know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\n",
    "History = {history}\n",
    "Question= {question}\n",
    "Don't add text above in the result.\n",
    "Helpful Answer:\n",
    "\"\"\"\n",
    "\n",
    "prompt = PromptTemplate(template=custom_prompt_template,\n",
    "                        input_variables=[\"question\", \"context\", \"history\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-01-21 07:28:27,771 - WARNING - BigdlNativeLLM has been deprecated, please switch to the new LLM API for sepcific models.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. \n",
      "The tokenizer class you load from this checkpoint is 'ChatGLMTokenizer'. \n",
      "The class this function is called from is 'LlamaTokenizer'.\n",
      "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n",
      "You are using a model of type chatglm3 to instantiate a model of type chatglm. This is not supported for all configurations of models and can yield errors.\n",
      "Loading checkpoint shards: 100%|██████████| 7/7 [00:07<00:00,  1.13s/it]\n",
      "2024-01-21 07:28:35,847 - INFO - Converting the current model to sym_int4 format......\n"
     ]
    }
   ],
   "source": [
    "from langchain.chains.chat_vector_db.prompts import QA_PROMPT\n",
    "from langchain.chains.question_answering import load_qa_chain\n",
    "\n",
    "from bigdl.llm.langchain.llms import TransformersLLM\n",
    "\n",
    "llm = TransformersLLM.from_model_id(\n",
    "        model_id=\"/data/chatglm3-6B/\",\n",
    "        model_kwargs={\"temperature\": 0, \"max_length\": 4096, \"trust_remote_code\": True},\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "qqqqqqqqqqq\n",
      "context\n"
     ]
    }
   ],
   "source": [
    "qa_chain_with_memory = RetrievalQA.from_chain_type(llm=llm, chain_type='stuff',\n",
    "                                                   retriever = vectorstore_db.as_retriever(),\n",
    "                                                   return_source_documents = False,\n",
    "                                                   chain_type_kwargs = {\"verbose\": False,\n",
    "                                                                        \"prompt\": prompt,\n",
    "                                                                        \"memory\": ConversationBufferMemory(\n",
    "                                                                            input_key=\"question\",\n",
    "                                                                            memory_key=\"history\",\n",
    "                                                                            return_messages=False)})\n",
    "\n",
    "# memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
    "# qa_chain_with_memory = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff', retriever=retriever, memory=memory)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/llm-tutorial/lib/python3.9/site-packages/langchain_core/_api/deprecation.py:117: LangChainDeprecationWarning: The function `__call__` was deprecated in LangChain 0.1.0 and will be removed in 0.2.0. Use invoke instead.\n",
      "  warn_deprecated(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "pppppppppppp\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/llm-tutorial/lib/python3.9/site-packages/transformers/generation/utils.py:1369: UserWarning: Using `max_length`'s default (4096) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "文章提出了基于深度学习的文章摘要生成方法。该方法通过训练深度神经网络来学习文章的特征表示，并使用该表示来生成摘要。\n",
      "\n",
      "\n",
      "\n",
      "-------------------------\n",
      "文章提出了基于深度学习的文章摘要生成方法。该方法通过训练深度神经网络来学习文章的特征表示，并使用该表示来生成摘要。\n",
      "\n",
      " \n",
      "\n",
      "++++++++++++++\n",
      "文章提出了基于深度学习的文章摘要生成方法。该方法通过训练深度神经网络来学习文章的特征表示，并使用该表示来生成摘要。\n",
      "\n",
      " \n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'query': '这篇文章提出了什么方法?',\n",
       " 'result': '文章提出了基于深度学习的文章摘要生成方法。该方法通过训练深度神经网络来学习文章的特征表示，并使用该表示来生成摘要。\\n\\n \\n'}"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "query = \"这篇文章提出了什么方法?\"\n",
    "qa_chain_with_memory({\"query\": query})\n",
    "# ret = qa_chain_with_memory({\"question\": query})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/llm-tutorial/lib/python3.9/site-packages/transformers/generation/utils.py:1369: UserWarning: Using `max_length`'s default (4096) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "文章使用的数据集是什么？\n"
     ]
    },
    {
     "ename": "",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31mThe Kernel crashed while executing code in the the current cell or a previous cell. Please review the code in the cell(s) to identify a possible cause of the failure. Click <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. View Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details."
     ]
    }
   ],
   "source": [
    "query = \"介绍一下论文使用的数据集\"\n",
    "ret = qa_chain_with_memory({\"question\": query})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new StuffDocumentsChain chain...\u001b[0m\n",
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mYou are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\n",
      "If you don't know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\n",
      "Context= are often correct, thus thresholding ﬁlters out erroneous low-\n",
      "conﬁdence predictions, leading to a boost in the sign accu-\n",
      "racy.\n",
      "Another highlight is the signiﬁcant gap that CTTS has\n",
      "achieved for thresholded 3-class∗accuracy compared to\n",
      "other baselines. This can be harnessed in the future for trad-\n",
      "ing decision-making. For example, a straightforward trad-\n",
      "ing decision can be buy/sell/hold stocks when the predicted\n",
      "class is up/down/ﬂat, respectively. Given that CTTS’s pre-\n",
      "dicted probablitlies are reliable as discussed earlier, the\n",
      "amount of stock shares to buy/sell/hold can depend on the\n",
      "\n",
      "and 4) naive constant class predictions. We benchmarked the\n",
      "performance using sign prediction accuracy and thresholded\n",
      "version of the sign prediction accuracy (discussed later) us-\n",
      "ing our model prediction probabilities. We ran our experi-\n",
      "ments on a Linux machine with 8 16GB NVIDIA T4 Ten-\n",
      "sor Core GPUs, and using PyTorch v1.0.0 DL platform in\n",
      "Python 3.6. In all models, we set a ﬁxed random seed for\n",
      "reproducible results.\n",
      "Experimental setup\n",
      "Data We used the intraday stock prices of S&P 500 con-\n",
      "stituent stocks obtained from licensed Bloomberg data ser-\n",
      "vice (blo 2022). The data was sampled at 1 minute interval\n",
      "for the year 2019 (52 weeks, each week has 5 trading days).\n",
      "For every stock, we sampled 7 time series for each week.\n",
      "Data from the ﬁrst three quarters (weeks 1 to 39) were used\n",
      "for training and validation. Data was randomly split and 80%\n",
      "was used for training and the remaining 20% for validation.\n",
      "\n",
      "patience of 15. The base learning rate was 1e-3, adjusted by\n",
      "learning rate scheduler with decay factor 0.1 and a patience\n",
      "of 5. We also used dropout with probability 0.1. DeepAR\n",
      "generated multiple (200) samples of the prediction target for\n",
      "each time series and we deﬁned the prediction probability\n",
      "over the three classes as the proportion of samples predicted\n",
      "per class.\n",
      "ARIMA Autoregressive Integrated Moving Average\n",
      "(ARIMA) models capture autocorrelations in the data\n",
      "using a combination approach of autoregressive model,\n",
      "moving average model, and differencing (Wilks 2011). We\n",
      "compared the continuous valued ARIMA forecasts with the\n",
      "last known price in the input data to deﬁne the predicted\n",
      "sign, and the corresponding prediction probability was\n",
      "deﬁned as the percentage of the absolute delta between\n",
      "the predicted and the last known price with respect to the\n",
      "standard deviation of the past 80 data points, capped by 1.\n",
      "EMA An exponential moving average (EMA) is a type of\n",
      "\n",
      "with the performance evaluation metrics and report the ex-\n",
      "perimental results. Finally, we highlight some concluding re-\n",
      "marks and future directions for this study.\n",
      "Method\n",
      "Our proposed method is called CNN and Transformer based\n",
      "timeseries modeling (CTTS) as shown in overview Figure 1.\n",
      "Preprocessing\n",
      "We used standard min-max scaling to standardize each input\n",
      "time series within [0,1]. Given a raw stock prices time series\n",
      "x, the standardized time series were calculated as\n",
      "xstandardized =x−min(x)\n",
      "max( x)−min(x)\n",
      "Thisxstandardized is then passed into our model to learn the\n",
      "sign of the change in the very next time step, which is de-\n",
      "ﬁned as our prediction target.\n",
      "Forecasting\n",
      "As shown in Figure 1, we use 1D CNN kernels to convo-\n",
      "lute through a time series, projecting each local window intoan embedding vector that we call a token. Each token car-\n",
      "ries the short-term patterns of the time series. we then add\n",
      "positional embedding (Vaswani et al. 2017) to the token and\n",
      "History = [HumanMessage(content='what is the main theory of the paper?', additional_kwargs={}, example=False), AIMessage(content=\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\\nIf you don't know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\\nContext= and long-term dependencies within a time series, and forecast\\nif the price would go up, down or remain the same (ﬂat) in\\nthe future. In our experiments, we demonstrated the success\\nof the proposed method in comparison to commonly adopted\\nstatistical and deep learning methods on forecasting intraday\\nstock price change of S&P 500 constituents.\\nIntroduction\\nTime series forecasting is challenging, especially in the ﬁ-\\nnancial industry (Pedersen 2019). It involves statistically\\nunderstanding complex linear and non-linear interactions\\nwithin historical data to predict the future. In the ﬁnan-\\ncial industry, common applications for forecasting include\\npredicting buy/sell or positive/negative price changes for\\ncompany stocks traded on the market. Traditional statis-\\ntical approaches commonly adapt linear regression, expo-\\nnential smoothing (Holt 2004; Winters 1960; Gardner Jr\\nand McKenzie 1985) and autoregression models (Makri-\\ndakis, Spiliotis, and Assimakopoulos 2020). With the ad-\\n\\ntistical tools, such as, exponential smoothing (ETS) (Holt\\n2004; Winters 1960; Gardner Jr and McKenzie 1985)\\nand autoregressive integrated moving average (ARIMA)\\n(Makridakis, Spiliotis, and Assimakopoulos 2020), on nu-\\nmerical time series data for making one-step-ahead predic-\\ntions. These predictions are then recursively fed into the\\nfuture inputs to obtain multi-step forecasts. Multi-horizon\\nforecasting methods such as (Taieb, Sorjamaa, and Bon-\\ntempi 2010; Marcellino, Stock, and Watson 2006) directly\\ngenerate simultaneous predictions for multiple pre-deﬁned\\nfuture time steps.\\nMachine learning and deep learning based approaches\\nMachine learning (ML) approaches have shown to improve\\nperformance by addressing high-dimensional and non-linear\\nfeature interactions in a model-free way. These methods in-\\nclude tree-based algorithms, ensemble methods, neural net-\\nwork, autoregression and recurrent neural networks (Hastie,\\nTibshirani, and Friedman 2001). More recent works have ap-\\n\\nFigure 1: Overview of the proposed approach. Quick peak of the transformer encoder architecture on the right (Dosovitskiy\\net al. 2020)\\nprove the performance of NLP applications. The commonly\\nused approach is to pre-train on a large dataset and then ﬁne-\\ntune on a smaller task-speciﬁc dataset (Devlin et al. 2018).\\nTransformers leverage from multi-headed self-attention and\\nreplace the recurrent layers most commonly used in encoder-\\ndecoder architectures. In contrast to RNNs and LSTMs\\nwhere the input data is sequentially processed, transformers\\nbypass the recursion to ingest all inputs at once; thus, trans-\\nformers allow for parallel computations to reduce training\\ntime and do not suffer from long-term memory dependency\\nissues.\\nThe remainder of the paper is organized as follows. We\\ndiscuss our proposed methodology for time series modeling\\nFurther, we discuss our benchmark baseline models along\\nwith the performance evaluation metrics and report the ex-\\nperimental results. Finally, we highlight some concluding re-\\n\\nare often correct, thus thresholding ﬁlters out erroneous low-\\nconﬁdence predictions, leading to a boost in the sign accu-\\nracy.\\nAnother highlight is the signiﬁcant gap that CTTS has\\nachieved for thresholded 3-class∗accuracy compared to\\nother baselines. This can be harnessed in the future for trad-\\ning decision-making. For example, a straightforward trad-\\ning decision can be buy/sell/hold stocks when the predicted\\nclass is up/down/ﬂat, respectively. Given that CTTS’s pre-\\ndicted probablitlies are reliable as discussed earlier, the\\namount of stock shares to buy/sell/hold can depend on the\\nHistory = []\\nQuestion= what is the main theory of the paper?\\nHelpful Answer:\\nThe main theory of the paper is the use of a transformer-based approach for time series forecasting, specifically for intraday stock price change prediction of S&P 500 constituents. The proposed method demonstrates success in comparison to traditional statistical and deep learning methods. The transformer architecture allows for parallel computations and bypasses the recursion issue in RNNs and LSTMs, making it suitable for handling long-term dependencies within a time series.\", additional_kwargs={}, example=False)]\n",
      "Question= simply introduce the data sets the paper used.\n",
      "Helpful Answer:\n",
      "\u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/llm-tutorial/lib/python3.9/site-packages/transformers/generation/utils.py:1369: UserWarning: Using `max_length`'s default (4096) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The paper used licensed Bloomberg data service for intraday stock prices of S&P 500 constituents for the year 2019, sampled at 1-minute interval for 52 weeks with 7 time series for each week. The data was randomly split into training (80%) and validation (20%) sets.\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'query': 'simply introduce the data sets the paper used.',\n",
       " 'result': 'You are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\\nIf you don\\'t know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\\nContext= are often correct, thus thresholding ﬁlters out erroneous low-\\nconﬁdence predictions, leading to a boost in the sign accu-\\nracy.\\nAnother highlight is the signiﬁcant gap that CTTS has\\nachieved for thresholded 3-class∗accuracy compared to\\nother baselines. This can be harnessed in the future for trad-\\ning decision-making. For example, a straightforward trad-\\ning decision can be buy/sell/hold stocks when the predicted\\nclass is up/down/ﬂat, respectively. Given that CTTS’s pre-\\ndicted probablitlies are reliable as discussed earlier, the\\namount of stock shares to buy/sell/hold can depend on the\\n\\nand 4) naive constant class predictions. We benchmarked the\\nperformance using sign prediction accuracy and thresholded\\nversion of the sign prediction accuracy (discussed later) us-\\ning our model prediction probabilities. We ran our experi-\\nments on a Linux machine with 8 16GB NVIDIA T4 Ten-\\nsor Core GPUs, and using PyTorch v1.0.0 DL platform in\\nPython 3.6. In all models, we set a ﬁxed random seed for\\nreproducible results.\\nExperimental setup\\nData We used the intraday stock prices of S&P 500 con-\\nstituent stocks obtained from licensed Bloomberg data ser-\\nvice (blo 2022). The data was sampled at 1 minute interval\\nfor the year 2019 (52 weeks, each week has 5 trading days).\\nFor every stock, we sampled 7 time series for each week.\\nData from the ﬁrst three quarters (weeks 1 to 39) were used\\nfor training and validation. Data was randomly split and 80%\\nwas used for training and the remaining 20% for validation.\\n\\npatience of 15. The base learning rate was 1e-3, adjusted by\\nlearning rate scheduler with decay factor 0.1 and a patience\\nof 5. We also used dropout with probability 0.1. DeepAR\\ngenerated multiple (200) samples of the prediction target for\\neach time series and we deﬁned the prediction probability\\nover the three classes as the proportion of samples predicted\\nper class.\\nARIMA Autoregressive Integrated Moving Average\\n(ARIMA) models capture autocorrelations in the data\\nusing a combination approach of autoregressive model,\\nmoving average model, and differencing (Wilks 2011). We\\ncompared the continuous valued ARIMA forecasts with the\\nlast known price in the input data to deﬁne the predicted\\nsign, and the corresponding prediction probability was\\ndeﬁned as the percentage of the absolute delta between\\nthe predicted and the last known price with respect to the\\nstandard deviation of the past 80 data points, capped by 1.\\nEMA An exponential moving average (EMA) is a type of\\n\\nwith the performance evaluation metrics and report the ex-\\nperimental results. Finally, we highlight some concluding re-\\nmarks and future directions for this study.\\nMethod\\nOur proposed method is called CNN and Transformer based\\ntimeseries modeling (CTTS) as shown in overview Figure 1.\\nPreprocessing\\nWe used standard min-max scaling to standardize each input\\ntime series within [0,1]. Given a raw stock prices time series\\nx, the standardized time series were calculated as\\nxstandardized =x−min(x)\\nmax( x)−min(x)\\nThisxstandardized is then passed into our model to learn the\\nsign of the change in the very next time step, which is de-\\nﬁned as our prediction target.\\nForecasting\\nAs shown in Figure 1, we use 1D CNN kernels to convo-\\nlute through a time series, projecting each local window intoan embedding vector that we call a token. Each token car-\\nries the short-term patterns of the time series. we then add\\npositional embedding (Vaswani et al. 2017) to the token and\\nHistory = [HumanMessage(content=\\'what is the main theory of the paper?\\', additional_kwargs={}, example=False), AIMessage(content=\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\\\\nIf you don\\'t know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\\\\nContext= and long-term dependencies within a time series, and forecast\\\\nif the price would go up, down or remain the same (ﬂat) in\\\\nthe future. In our experiments, we demonstrated the success\\\\nof the proposed method in comparison to commonly adopted\\\\nstatistical and deep learning methods on forecasting intraday\\\\nstock price change of S&P 500 constituents.\\\\nIntroduction\\\\nTime series forecasting is challenging, especially in the ﬁ-\\\\nnancial industry (Pedersen 2019). It involves statistically\\\\nunderstanding complex linear and non-linear interactions\\\\nwithin historical data to predict the future. In the ﬁnan-\\\\ncial industry, common applications for forecasting include\\\\npredicting buy/sell or positive/negative price changes for\\\\ncompany stocks traded on the market. Traditional statis-\\\\ntical approaches commonly adapt linear regression, expo-\\\\nnential smoothing (Holt 2004; Winters 1960; Gardner Jr\\\\nand McKenzie 1985) and autoregression models (Makri-\\\\ndakis, Spiliotis, and Assimakopoulos 2020). With the ad-\\\\n\\\\ntistical tools, such as, exponential smoothing (ETS) (Holt\\\\n2004; Winters 1960; Gardner Jr and McKenzie 1985)\\\\nand autoregressive integrated moving average (ARIMA)\\\\n(Makridakis, Spiliotis, and Assimakopoulos 2020), on nu-\\\\nmerical time series data for making one-step-ahead predic-\\\\ntions. These predictions are then recursively fed into the\\\\nfuture inputs to obtain multi-step forecasts. Multi-horizon\\\\nforecasting methods such as (Taieb, Sorjamaa, and Bon-\\\\ntempi 2010; Marcellino, Stock, and Watson 2006) directly\\\\ngenerate simultaneous predictions for multiple pre-deﬁned\\\\nfuture time steps.\\\\nMachine learning and deep learning based approaches\\\\nMachine learning (ML) approaches have shown to improve\\\\nperformance by addressing high-dimensional and non-linear\\\\nfeature interactions in a model-free way. These methods in-\\\\nclude tree-based algorithms, ensemble methods, neural net-\\\\nwork, autoregression and recurrent neural networks (Hastie,\\\\nTibshirani, and Friedman 2001). More recent works have ap-\\\\n\\\\nFigure 1: Overview of the proposed approach. Quick peak of the transformer encoder architecture on the right (Dosovitskiy\\\\net al. 2020)\\\\nprove the performance of NLP applications. The commonly\\\\nused approach is to pre-train on a large dataset and then ﬁne-\\\\ntune on a smaller task-speciﬁc dataset (Devlin et al. 2018).\\\\nTransformers leverage from multi-headed self-attention and\\\\nreplace the recurrent layers most commonly used in encoder-\\\\ndecoder architectures. In contrast to RNNs and LSTMs\\\\nwhere the input data is sequentially processed, transformers\\\\nbypass the recursion to ingest all inputs at once; thus, trans-\\\\nformers allow for parallel computations to reduce training\\\\ntime and do not suffer from long-term memory dependency\\\\nissues.\\\\nThe remainder of the paper is organized as follows. We\\\\ndiscuss our proposed methodology for time series modeling\\\\nFurther, we discuss our benchmark baseline models along\\\\nwith the performance evaluation metrics and report the ex-\\\\nperimental results. Finally, we highlight some concluding re-\\\\n\\\\nare often correct, thus thresholding ﬁlters out erroneous low-\\\\nconﬁdence predictions, leading to a boost in the sign accu-\\\\nracy.\\\\nAnother highlight is the signiﬁcant gap that CTTS has\\\\nachieved for thresholded 3-class∗accuracy compared to\\\\nother baselines. This can be harnessed in the future for trad-\\\\ning decision-making. For example, a straightforward trad-\\\\ning decision can be buy/sell/hold stocks when the predicted\\\\nclass is up/down/ﬂat, respectively. Given that CTTS’s pre-\\\\ndicted probablitlies are reliable as discussed earlier, the\\\\namount of stock shares to buy/sell/hold can depend on the\\\\nHistory = []\\\\nQuestion= what is the main theory of the paper?\\\\nHelpful Answer:\\\\nThe main theory of the paper is the use of a transformer-based approach for time series forecasting, specifically for intraday stock price change prediction of S&P 500 constituents. The proposed method demonstrates success in comparison to traditional statistical and deep learning methods. The transformer architecture allows for parallel computations and bypasses the recursion issue in RNNs and LSTMs, making it suitable for handling long-term dependencies within a time series.\", additional_kwargs={}, example=False)]\\nQuestion= simply introduce the data sets the paper used.\\nHelpful Answer:\\nThe paper used licensed Bloomberg data service for intraday stock prices of S&P 500 constituents for the year 2019, sampled at 1-minute interval for 52 weeks with 7 time series for each week. The data was randomly split into training (80%) and validation (20%) sets.',\n",
       " 'source_documents': [Document(page_content='are often correct, thus thresholding ﬁlters out erroneous low-\\nconﬁdence predictions, leading to a boost in the sign accu-\\nracy.\\nAnother highlight is the signiﬁcant gap that CTTS has\\nachieved for thresholded 3-class∗accuracy compared to\\nother baselines. This can be harnessed in the future for trad-\\ning decision-making. For example, a straightforward trad-\\ning decision can be buy/sell/hold stocks when the predicted\\nclass is up/down/ﬂat, respectively. Given that CTTS’s pre-\\ndicted probablitlies are reliable as discussed earlier, the\\namount of stock shares to buy/sell/hold can depend on the', metadata={'source': '/data/documents/2304.04912.pdf', 'page': 2}),\n",
       "  Document(page_content='and 4) naive constant class predictions. We benchmarked the\\nperformance using sign prediction accuracy and thresholded\\nversion of the sign prediction accuracy (discussed later) us-\\ning our model prediction probabilities. We ran our experi-\\nments on a Linux machine with 8 16GB NVIDIA T4 Ten-\\nsor Core GPUs, and using PyTorch v1.0.0 DL platform in\\nPython 3.6. In all models, we set a ﬁxed random seed for\\nreproducible results.\\nExperimental setup\\nData We used the intraday stock prices of S&P 500 con-\\nstituent stocks obtained from licensed Bloomberg data ser-\\nvice (blo 2022). The data was sampled at 1 minute interval\\nfor the year 2019 (52 weeks, each week has 5 trading days).\\nFor every stock, we sampled 7 time series for each week.\\nData from the ﬁrst three quarters (weeks 1 to 39) were used\\nfor training and validation. Data was randomly split and 80%\\nwas used for training and the remaining 20% for validation.', metadata={'source': '/data/documents/2304.04912.pdf', 'page': 1}),\n",
       "  Document(page_content='patience of 15. The base learning rate was 1e-3, adjusted by\\nlearning rate scheduler with decay factor 0.1 and a patience\\nof 5. We also used dropout with probability 0.1. DeepAR\\ngenerated multiple (200) samples of the prediction target for\\neach time series and we deﬁned the prediction probability\\nover the three classes as the proportion of samples predicted\\nper class.\\nARIMA Autoregressive Integrated Moving Average\\n(ARIMA) models capture autocorrelations in the data\\nusing a combination approach of autoregressive model,\\nmoving average model, and differencing (Wilks 2011). We\\ncompared the continuous valued ARIMA forecasts with the\\nlast known price in the input data to deﬁne the predicted\\nsign, and the corresponding prediction probability was\\ndeﬁned as the percentage of the absolute delta between\\nthe predicted and the last known price with respect to the\\nstandard deviation of the past 80 data points, capped by 1.\\nEMA An exponential moving average (EMA) is a type of', metadata={'source': '/data/documents/2304.04912.pdf', 'page': 2}),\n",
       "  Document(page_content='with the performance evaluation metrics and report the ex-\\nperimental results. Finally, we highlight some concluding re-\\nmarks and future directions for this study.\\nMethod\\nOur proposed method is called CNN and Transformer based\\ntimeseries modeling (CTTS) as shown in overview Figure 1.\\nPreprocessing\\nWe used standard min-max scaling to standardize each input\\ntime series within [0,1]. Given a raw stock prices time series\\nx, the standardized time series were calculated as\\nxstandardized =x−min(x)\\nmax( x)−min(x)\\nThisxstandardized is then passed into our model to learn the\\nsign of the change in the very next time step, which is de-\\nﬁned as our prediction target.\\nForecasting\\nAs shown in Figure 1, we use 1D CNN kernels to convo-\\nlute through a time series, projecting each local window intoan embedding vector that we call a token. Each token car-\\nries the short-term patterns of the time series. we then add\\npositional embedding (Vaswani et al. 2017) to the token and', metadata={'source': '/data/documents/2304.04912.pdf', 'page': 1})]}"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "query = \"simply introduce the data sets the paper used.\"\n",
    "qa_chain_with_memory({\"query\": query})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Token indices sequence length is longer than the specified maximum sequence length for this model (4873 > 4096). Running this sequence through the model will result in indexing errors\n",
      "/root/miniconda3/envs/llm-tutorial/lib/python3.9/site-packages/transformers/generation/utils.py:1369: UserWarning: Using `max_length`'s default (4096) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation.\n",
      "  warnings.warn(\n",
      "Input length of input_ids is 4873, but `max_length` is set to 4096. This can lead to unexpected behavior. You should consider increasing `max_new_tokens`.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new StuffDocumentsChain chain...\u001b[0m\n",
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mYou are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\n",
      "If you don't know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\n",
      "Context= work, autoregression and recurrent neural networks (Hastie,\n",
      "Tibshirani, and Friedman 2001). More recent works have ap-\n",
      "plied Deep learning (DL) methods on numeric time series\n",
      "data (Bao, Yue, and Rao 2017; Gensler et al. 2016; Romeu\n",
      "et al. 2015; Sagheer and Kotb 2019; Sutskever, Vinyals, and\n",
      "Le 2014). DL automates the process of feature extraction\n",
      "and eliminates the need for domain expertise.\n",
      "Since the introduction of transformers (Vaswani et al.\n",
      "2017), they have become the state of the art model to im-arXiv:2304.04912v1  [cs.LG]  11 Apr 2023\n",
      "\n",
      "and long-term dependencies within a time series, and forecast\n",
      "if the price would go up, down or remain the same (ﬂat) in\n",
      "the future. In our experiments, we demonstrated the success\n",
      "of the proposed method in comparison to commonly adopted\n",
      "statistical and deep learning methods on forecasting intraday\n",
      "stock price change of S&P 500 constituents.\n",
      "Introduction\n",
      "Time series forecasting is challenging, especially in the ﬁ-\n",
      "nancial industry (Pedersen 2019). It involves statistically\n",
      "understanding complex linear and non-linear interactions\n",
      "within historical data to predict the future. In the ﬁnan-\n",
      "cial industry, common applications for forecasting include\n",
      "predicting buy/sell or positive/negative price changes for\n",
      "company stocks traded on the market. Traditional statis-\n",
      "tical approaches commonly adapt linear regression, expo-\n",
      "nential smoothing (Holt 2004; Winters 1960; Gardner Jr\n",
      "and McKenzie 1985) and autoregression models (Makri-\n",
      "dakis, Spiliotis, and Assimakopoulos 2020). With the ad-\n",
      "\n",
      "information processing systems , 27.\n",
      "Taieb, S. B.; Sorjamaa, A.; and Bontempi, G. 2010.\n",
      "Multiple-output modeling for multi-step-ahead time series\n",
      "forecasting. Neurocomputing , 73(10-12): 1950–1957.\n",
      "Vaswani, A.; Shazeer, N.; Parmar, N.; Uszkoreit, J.; Jones,\n",
      "L.; Gomez, A. N.; Kaiser, Ł.; and Polosukhin, I. 2017. At-\n",
      "tention is all you need. Advances in neural information pro-\n",
      "cessing systems , 30.\n",
      "Wilks, D. S. 2011. Statistical methods in the atmospheric\n",
      "sciences , volume 100. Academic press.\n",
      "Winters, P. R. 1960. Forecasting sales by exponentially\n",
      "weighted moving averages. Management science , 6(3): 324–\n",
      "342.\n",
      "\n",
      "tistical tools, such as, exponential smoothing (ETS) (Holt\n",
      "2004; Winters 1960; Gardner Jr and McKenzie 1985)\n",
      "and autoregressive integrated moving average (ARIMA)\n",
      "(Makridakis, Spiliotis, and Assimakopoulos 2020), on nu-\n",
      "merical time series data for making one-step-ahead predic-\n",
      "tions. These predictions are then recursively fed into the\n",
      "future inputs to obtain multi-step forecasts. Multi-horizon\n",
      "forecasting methods such as (Taieb, Sorjamaa, and Bon-\n",
      "tempi 2010; Marcellino, Stock, and Watson 2006) directly\n",
      "generate simultaneous predictions for multiple pre-deﬁned\n",
      "future time steps.\n",
      "Machine learning and deep learning based approaches\n",
      "Machine learning (ML) approaches have shown to improve\n",
      "performance by addressing high-dimensional and non-linear\n",
      "feature interactions in a model-free way. These methods in-\n",
      "clude tree-based algorithms, ensemble methods, neural net-\n",
      "work, autoregression and recurrent neural networks (Hastie,\n",
      "Tibshirani, and Friedman 2001). More recent works have ap-\n",
      "History = [HumanMessage(content='what is the main theory of the paper?', additional_kwargs={}, example=False), AIMessage(content=\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\\nIf you don't know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\\nContext= and long-term dependencies within a time series, and forecast\\nif the price would go up, down or remain the same (ﬂat) in\\nthe future. In our experiments, we demonstrated the success\\nof the proposed method in comparison to commonly adopted\\nstatistical and deep learning methods on forecasting intraday\\nstock price change of S&P 500 constituents.\\nIntroduction\\nTime series forecasting is challenging, especially in the ﬁ-\\nnancial industry (Pedersen 2019). It involves statistically\\nunderstanding complex linear and non-linear interactions\\nwithin historical data to predict the future. In the ﬁnan-\\ncial industry, common applications for forecasting include\\npredicting buy/sell or positive/negative price changes for\\ncompany stocks traded on the market. Traditional statis-\\ntical approaches commonly adapt linear regression, expo-\\nnential smoothing (Holt 2004; Winters 1960; Gardner Jr\\nand McKenzie 1985) and autoregression models (Makri-\\ndakis, Spiliotis, and Assimakopoulos 2020). With the ad-\\n\\ntistical tools, such as, exponential smoothing (ETS) (Holt\\n2004; Winters 1960; Gardner Jr and McKenzie 1985)\\nand autoregressive integrated moving average (ARIMA)\\n(Makridakis, Spiliotis, and Assimakopoulos 2020), on nu-\\nmerical time series data for making one-step-ahead predic-\\ntions. These predictions are then recursively fed into the\\nfuture inputs to obtain multi-step forecasts. Multi-horizon\\nforecasting methods such as (Taieb, Sorjamaa, and Bon-\\ntempi 2010; Marcellino, Stock, and Watson 2006) directly\\ngenerate simultaneous predictions for multiple pre-deﬁned\\nfuture time steps.\\nMachine learning and deep learning based approaches\\nMachine learning (ML) approaches have shown to improve\\nperformance by addressing high-dimensional and non-linear\\nfeature interactions in a model-free way. These methods in-\\nclude tree-based algorithms, ensemble methods, neural net-\\nwork, autoregression and recurrent neural networks (Hastie,\\nTibshirani, and Friedman 2001). More recent works have ap-\\n\\nFigure 1: Overview of the proposed approach. Quick peak of the transformer encoder architecture on the right (Dosovitskiy\\net al. 2020)\\nprove the performance of NLP applications. The commonly\\nused approach is to pre-train on a large dataset and then ﬁne-\\ntune on a smaller task-speciﬁc dataset (Devlin et al. 2018).\\nTransformers leverage from multi-headed self-attention and\\nreplace the recurrent layers most commonly used in encoder-\\ndecoder architectures. In contrast to RNNs and LSTMs\\nwhere the input data is sequentially processed, transformers\\nbypass the recursion to ingest all inputs at once; thus, trans-\\nformers allow for parallel computations to reduce training\\ntime and do not suffer from long-term memory dependency\\nissues.\\nThe remainder of the paper is organized as follows. We\\ndiscuss our proposed methodology for time series modeling\\nFurther, we discuss our benchmark baseline models along\\nwith the performance evaluation metrics and report the ex-\\nperimental results. Finally, we highlight some concluding re-\\n\\nare often correct, thus thresholding ﬁlters out erroneous low-\\nconﬁdence predictions, leading to a boost in the sign accu-\\nracy.\\nAnother highlight is the signiﬁcant gap that CTTS has\\nachieved for thresholded 3-class∗accuracy compared to\\nother baselines. This can be harnessed in the future for trad-\\ning decision-making. For example, a straightforward trad-\\ning decision can be buy/sell/hold stocks when the predicted\\nclass is up/down/ﬂat, respectively. Given that CTTS’s pre-\\ndicted probablitlies are reliable as discussed earlier, the\\namount of stock shares to buy/sell/hold can depend on the\\nHistory = []\\nQuestion= what is the main theory of the paper?\\nHelpful Answer:\\nThe main theory of the paper is the use of a transformer-based approach for time series forecasting, specifically for intraday stock price change prediction of S&P 500 constituents. The proposed method demonstrates success in comparison to traditional statistical and deep learning methods. The transformer architecture allows for parallel computations and bypasses the recursion issue in RNNs and LSTMs, making it suitable for handling long-term dependencies within a time series.\", additional_kwargs={}, example=False), HumanMessage(content='simply introduce the data sets the paper used.', additional_kwargs={}, example=False), AIMessage(content='You are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\\nIf you don\\'t know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\\nContext= are often correct, thus thresholding ﬁlters out erroneous low-\\nconﬁdence predictions, leading to a boost in the sign accu-\\nracy.\\nAnother highlight is the signiﬁcant gap that CTTS has\\nachieved for thresholded 3-class∗accuracy compared to\\nother baselines. This can be harnessed in the future for trad-\\ning decision-making. For example, a straightforward trad-\\ning decision can be buy/sell/hold stocks when the predicted\\nclass is up/down/ﬂat, respectively. Given that CTTS’s pre-\\ndicted probablitlies are reliable as discussed earlier, the\\namount of stock shares to buy/sell/hold can depend on the\\n\\nand 4) naive constant class predictions. We benchmarked the\\nperformance using sign prediction accuracy and thresholded\\nversion of the sign prediction accuracy (discussed later) us-\\ning our model prediction probabilities. We ran our experi-\\nments on a Linux machine with 8 16GB NVIDIA T4 Ten-\\nsor Core GPUs, and using PyTorch v1.0.0 DL platform in\\nPython 3.6. In all models, we set a ﬁxed random seed for\\nreproducible results.\\nExperimental setup\\nData We used the intraday stock prices of S&P 500 con-\\nstituent stocks obtained from licensed Bloomberg data ser-\\nvice (blo 2022). The data was sampled at 1 minute interval\\nfor the year 2019 (52 weeks, each week has 5 trading days).\\nFor every stock, we sampled 7 time series for each week.\\nData from the ﬁrst three quarters (weeks 1 to 39) were used\\nfor training and validation. Data was randomly split and 80%\\nwas used for training and the remaining 20% for validation.\\n\\npatience of 15. The base learning rate was 1e-3, adjusted by\\nlearning rate scheduler with decay factor 0.1 and a patience\\nof 5. We also used dropout with probability 0.1. DeepAR\\ngenerated multiple (200) samples of the prediction target for\\neach time series and we deﬁned the prediction probability\\nover the three classes as the proportion of samples predicted\\nper class.\\nARIMA Autoregressive Integrated Moving Average\\n(ARIMA) models capture autocorrelations in the data\\nusing a combination approach of autoregressive model,\\nmoving average model, and differencing (Wilks 2011). We\\ncompared the continuous valued ARIMA forecasts with the\\nlast known price in the input data to deﬁne the predicted\\nsign, and the corresponding prediction probability was\\ndeﬁned as the percentage of the absolute delta between\\nthe predicted and the last known price with respect to the\\nstandard deviation of the past 80 data points, capped by 1.\\nEMA An exponential moving average (EMA) is a type of\\n\\nwith the performance evaluation metrics and report the ex-\\nperimental results. Finally, we highlight some concluding re-\\nmarks and future directions for this study.\\nMethod\\nOur proposed method is called CNN and Transformer based\\ntimeseries modeling (CTTS) as shown in overview Figure 1.\\nPreprocessing\\nWe used standard min-max scaling to standardize each input\\ntime series within [0,1]. Given a raw stock prices time series\\nx, the standardized time series were calculated as\\nxstandardized =x−min(x)\\nmax( x)−min(x)\\nThisxstandardized is then passed into our model to learn the\\nsign of the change in the very next time step, which is de-\\nﬁned as our prediction target.\\nForecasting\\nAs shown in Figure 1, we use 1D CNN kernels to convo-\\nlute through a time series, projecting each local window intoan embedding vector that we call a token. Each token car-\\nries the short-term patterns of the time series. we then add\\npositional embedding (Vaswani et al. 2017) to the token and\\nHistory = [HumanMessage(content=\\'what is the main theory of the paper?\\', additional_kwargs={}, example=False), AIMessage(content=\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\\\\nIf you don\\'t know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\\\\nContext= and long-term dependencies within a time series, and forecast\\\\nif the price would go up, down or remain the same (ﬂat) in\\\\nthe future. In our experiments, we demonstrated the success\\\\nof the proposed method in comparison to commonly adopted\\\\nstatistical and deep learning methods on forecasting intraday\\\\nstock price change of S&P 500 constituents.\\\\nIntroduction\\\\nTime series forecasting is challenging, especially in the ﬁ-\\\\nnancial industry (Pedersen 2019). It involves statistically\\\\nunderstanding complex linear and non-linear interactions\\\\nwithin historical data to predict the future. In the ﬁnan-\\\\ncial industry, common applications for forecasting include\\\\npredicting buy/sell or positive/negative price changes for\\\\ncompany stocks traded on the market. Traditional statis-\\\\ntical approaches commonly adapt linear regression, expo-\\\\nnential smoothing (Holt 2004; Winters 1960; Gardner Jr\\\\nand McKenzie 1985) and autoregression models (Makri-\\\\ndakis, Spiliotis, and Assimakopoulos 2020). With the ad-\\\\n\\\\ntistical tools, such as, exponential smoothing (ETS) (Holt\\\\n2004; Winters 1960; Gardner Jr and McKenzie 1985)\\\\nand autoregressive integrated moving average (ARIMA)\\\\n(Makridakis, Spiliotis, and Assimakopoulos 2020), on nu-\\\\nmerical time series data for making one-step-ahead predic-\\\\ntions. These predictions are then recursively fed into the\\\\nfuture inputs to obtain multi-step forecasts. Multi-horizon\\\\nforecasting methods such as (Taieb, Sorjamaa, and Bon-\\\\ntempi 2010; Marcellino, Stock, and Watson 2006) directly\\\\ngenerate simultaneous predictions for multiple pre-deﬁned\\\\nfuture time steps.\\\\nMachine learning and deep learning based approaches\\\\nMachine learning (ML) approaches have shown to improve\\\\nperformance by addressing high-dimensional and non-linear\\\\nfeature interactions in a model-free way. These methods in-\\\\nclude tree-based algorithms, ensemble methods, neural net-\\\\nwork, autoregression and recurrent neural networks (Hastie,\\\\nTibshirani, and Friedman 2001). More recent works have ap-\\\\n\\\\nFigure 1: Overview of the proposed approach. Quick peak of the transformer encoder architecture on the right (Dosovitskiy\\\\net al. 2020)\\\\nprove the performance of NLP applications. The commonly\\\\nused approach is to pre-train on a large dataset and then ﬁne-\\\\ntune on a smaller task-speciﬁc dataset (Devlin et al. 2018).\\\\nTransformers leverage from multi-headed self-attention and\\\\nreplace the recurrent layers most commonly used in encoder-\\\\ndecoder architectures. In contrast to RNNs and LSTMs\\\\nwhere the input data is sequentially processed, transformers\\\\nbypass the recursion to ingest all inputs at once; thus, trans-\\\\nformers allow for parallel computations to reduce training\\\\ntime and do not suffer from long-term memory dependency\\\\nissues.\\\\nThe remainder of the paper is organized as follows. We\\\\ndiscuss our proposed methodology for time series modeling\\\\nFurther, we discuss our benchmark baseline models along\\\\nwith the performance evaluation metrics and report the ex-\\\\nperimental results. Finally, we highlight some concluding re-\\\\n\\\\nare often correct, thus thresholding ﬁlters out erroneous low-\\\\nconﬁdence predictions, leading to a boost in the sign accu-\\\\nracy.\\\\nAnother highlight is the signiﬁcant gap that CTTS has\\\\nachieved for thresholded 3-class∗accuracy compared to\\\\nother baselines. This can be harnessed in the future for trad-\\\\ning decision-making. For example, a straightforward trad-\\\\ning decision can be buy/sell/hold stocks when the predicted\\\\nclass is up/down/ﬂat, respectively. Given that CTTS’s pre-\\\\ndicted probablitlies are reliable as discussed earlier, the\\\\namount of stock shares to buy/sell/hold can depend on the\\\\nHistory = []\\\\nQuestion= what is the main theory of the paper?\\\\nHelpful Answer:\\\\nThe main theory of the paper is the use of a transformer-based approach for time series forecasting, specifically for intraday stock price change prediction of S&P 500 constituents. The proposed method demonstrates success in comparison to traditional statistical and deep learning methods. The transformer architecture allows for parallel computations and bypasses the recursion issue in RNNs and LSTMs, making it suitable for handling long-term dependencies within a time series.\", additional_kwargs={}, example=False)]\\nQuestion= simply introduce the data sets the paper used.\\nHelpful Answer:\\nThe paper used licensed Bloomberg data service for intraday stock prices of S&P 500 constituents for the year 2019, sampled at 1-minute interval for 52 weeks with 7 time series for each week. The data was randomly split into training (80%) and validation (20%) sets.', additional_kwargs={}, example=False)]\n",
      "Question= summarize the paper, please.\n",
      "Helpful Answer:\n",
      "\u001b[0m\n",
      "\n",
      "\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'query': 'summarize the paper, please.',\n",
       " 'result': 'You are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\\nIf you don\\'t know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\\nContext= work, autoregression and recurrent neural networks (Hastie,\\nTibshirani, and Friedman 2001). More recent works have ap-\\nplied Deep learning (DL) methods on numeric time series\\ndata (Bao, Yue, and Rao 2017; Gensler et al. 2016; Romeu\\net al. 2015; Sagheer and Kotb 2019; Sutskever, Vinyals, and\\nLe 2014). DL automates the process of feature extraction\\nand eliminates the need for domain expertise.\\nSince the introduction of transformers (Vaswani et al.\\n2017), they have become the state of the art model to im-arXiv:2304.04912v1  [cs.LG]  11 Apr 2023\\n\\nand long-term dependencies within a time series, and forecast\\nif the price would go up, down or remain the same (ﬂat) in\\nthe future. In our experiments, we demonstrated the success\\nof the proposed method in comparison to commonly adopted\\nstatistical and deep learning methods on forecasting intraday\\nstock price change of S&P 500 constituents.\\nIntroduction\\nTime series forecasting is challenging, especially in the ﬁ-\\nnancial industry (Pedersen 2019). It involves statistically\\nunderstanding complex linear and non-linear interactions\\nwithin historical data to predict the future. In the ﬁnan-\\ncial industry, common applications for forecasting include\\npredicting buy/sell or positive/negative price changes for\\ncompany stocks traded on the market. Traditional statis-\\ntical approaches commonly adapt linear regression, expo-\\nnential smoothing (Holt 2004; Winters 1960; Gardner Jr\\nand McKenzie 1985) and autoregression models (Makri-\\ndakis, Spiliotis, and Assimakopoulos 2020). With the ad-\\n\\ninformation processing systems , 27.\\nTaieb, S. B.; Sorjamaa, A.; and Bontempi, G. 2010.\\nMultiple-output modeling for multi-step-ahead time series\\nforecasting. Neurocomputing , 73(10-12): 1950–1957.\\nVaswani, A.; Shazeer, N.; Parmar, N.; Uszkoreit, J.; Jones,\\nL.; Gomez, A. N.; Kaiser, Ł.; and Polosukhin, I. 2017. At-\\ntention is all you need. Advances in neural information pro-\\ncessing systems , 30.\\nWilks, D. S. 2011. Statistical methods in the atmospheric\\nsciences , volume 100. Academic press.\\nWinters, P. R. 1960. Forecasting sales by exponentially\\nweighted moving averages. Management science , 6(3): 324–\\n342.\\n\\ntistical tools, such as, exponential smoothing (ETS) (Holt\\n2004; Winters 1960; Gardner Jr and McKenzie 1985)\\nand autoregressive integrated moving average (ARIMA)\\n(Makridakis, Spiliotis, and Assimakopoulos 2020), on nu-\\nmerical time series data for making one-step-ahead predic-\\ntions. These predictions are then recursively fed into the\\nfuture inputs to obtain multi-step forecasts. Multi-horizon\\nforecasting methods such as (Taieb, Sorjamaa, and Bon-\\ntempi 2010; Marcellino, Stock, and Watson 2006) directly\\ngenerate simultaneous predictions for multiple pre-deﬁned\\nfuture time steps.\\nMachine learning and deep learning based approaches\\nMachine learning (ML) approaches have shown to improve\\nperformance by addressing high-dimensional and non-linear\\nfeature interactions in a model-free way. These methods in-\\nclude tree-based algorithms, ensemble methods, neural net-\\nwork, autoregression and recurrent neural networks (Hastie,\\nTibshirani, and Friedman 2001). More recent works have ap-\\nHistory = [HumanMessage(content=\\'what is the main theory of the paper?\\', additional_kwargs={}, example=False), AIMessage(content=\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\\\\nIf you don\\'t know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\\\\nContext= and long-term dependencies within a time series, and forecast\\\\nif the price would go up, down or remain the same (ﬂat) in\\\\nthe future. In our experiments, we demonstrated the success\\\\nof the proposed method in comparison to commonly adopted\\\\nstatistical and deep learning methods on forecasting intraday\\\\nstock price change of S&P 500 constituents.\\\\nIntroduction\\\\nTime series forecasting is challenging, especially in the ﬁ-\\\\nnancial industry (Pedersen 2019). It involves statistically\\\\nunderstanding complex linear and non-linear interactions\\\\nwithin historical data to predict the future. In the ﬁnan-\\\\ncial industry, common applications for forecasting include\\\\npredicting buy/sell or positive/negative price changes for\\\\ncompany stocks traded on the market. Traditional statis-\\\\ntical approaches commonly adapt linear regression, expo-\\\\nnential smoothing (Holt 2004; Winters 1960; Gardner Jr\\\\nand McKenzie 1985) and autoregression models (Makri-\\\\ndakis, Spiliotis, and Assimakopoulos 2020). With the ad-\\\\n\\\\ntistical tools, such as, exponential smoothing (ETS) (Holt\\\\n2004; Winters 1960; Gardner Jr and McKenzie 1985)\\\\nand autoregressive integrated moving average (ARIMA)\\\\n(Makridakis, Spiliotis, and Assimakopoulos 2020), on nu-\\\\nmerical time series data for making one-step-ahead predic-\\\\ntions. These predictions are then recursively fed into the\\\\nfuture inputs to obtain multi-step forecasts. Multi-horizon\\\\nforecasting methods such as (Taieb, Sorjamaa, and Bon-\\\\ntempi 2010; Marcellino, Stock, and Watson 2006) directly\\\\ngenerate simultaneous predictions for multiple pre-deﬁned\\\\nfuture time steps.\\\\nMachine learning and deep learning based approaches\\\\nMachine learning (ML) approaches have shown to improve\\\\nperformance by addressing high-dimensional and non-linear\\\\nfeature interactions in a model-free way. These methods in-\\\\nclude tree-based algorithms, ensemble methods, neural net-\\\\nwork, autoregression and recurrent neural networks (Hastie,\\\\nTibshirani, and Friedman 2001). More recent works have ap-\\\\n\\\\nFigure 1: Overview of the proposed approach. Quick peak of the transformer encoder architecture on the right (Dosovitskiy\\\\net al. 2020)\\\\nprove the performance of NLP applications. The commonly\\\\nused approach is to pre-train on a large dataset and then ﬁne-\\\\ntune on a smaller task-speciﬁc dataset (Devlin et al. 2018).\\\\nTransformers leverage from multi-headed self-attention and\\\\nreplace the recurrent layers most commonly used in encoder-\\\\ndecoder architectures. In contrast to RNNs and LSTMs\\\\nwhere the input data is sequentially processed, transformers\\\\nbypass the recursion to ingest all inputs at once; thus, trans-\\\\nformers allow for parallel computations to reduce training\\\\ntime and do not suffer from long-term memory dependency\\\\nissues.\\\\nThe remainder of the paper is organized as follows. We\\\\ndiscuss our proposed methodology for time series modeling\\\\nFurther, we discuss our benchmark baseline models along\\\\nwith the performance evaluation metrics and report the ex-\\\\nperimental results. Finally, we highlight some concluding re-\\\\n\\\\nare often correct, thus thresholding ﬁlters out erroneous low-\\\\nconﬁdence predictions, leading to a boost in the sign accu-\\\\nracy.\\\\nAnother highlight is the signiﬁcant gap that CTTS has\\\\nachieved for thresholded 3-class∗accuracy compared to\\\\nother baselines. This can be harnessed in the future for trad-\\\\ning decision-making. For example, a straightforward trad-\\\\ning decision can be buy/sell/hold stocks when the predicted\\\\nclass is up/down/ﬂat, respectively. Given that CTTS’s pre-\\\\ndicted probablitlies are reliable as discussed earlier, the\\\\namount of stock shares to buy/sell/hold can depend on the\\\\nHistory = []\\\\nQuestion= what is the main theory of the paper?\\\\nHelpful Answer:\\\\nThe main theory of the paper is the use of a transformer-based approach for time series forecasting, specifically for intraday stock price change prediction of S&P 500 constituents. The proposed method demonstrates success in comparison to traditional statistical and deep learning methods. The transformer architecture allows for parallel computations and bypasses the recursion issue in RNNs and LSTMs, making it suitable for handling long-term dependencies within a time series.\", additional_kwargs={}, example=False), HumanMessage(content=\\'simply introduce the data sets the paper used.\\', additional_kwargs={}, example=False), AIMessage(content=\\'You are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\\\\nIf you don\\\\\\'t know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\\\\nContext= are often correct, thus thresholding ﬁlters out erroneous low-\\\\nconﬁdence predictions, leading to a boost in the sign accu-\\\\nracy.\\\\nAnother highlight is the signiﬁcant gap that CTTS has\\\\nachieved for thresholded 3-class∗accuracy compared to\\\\nother baselines. This can be harnessed in the future for trad-\\\\ning decision-making. For example, a straightforward trad-\\\\ning decision can be buy/sell/hold stocks when the predicted\\\\nclass is up/down/ﬂat, respectively. Given that CTTS’s pre-\\\\ndicted probablitlies are reliable as discussed earlier, the\\\\namount of stock shares to buy/sell/hold can depend on the\\\\n\\\\nand 4) naive constant class predictions. We benchmarked the\\\\nperformance using sign prediction accuracy and thresholded\\\\nversion of the sign prediction accuracy (discussed later) us-\\\\ning our model prediction probabilities. We ran our experi-\\\\nments on a Linux machine with 8 16GB NVIDIA T4 Ten-\\\\nsor Core GPUs, and using PyTorch v1.0.0 DL platform in\\\\nPython 3.6. In all models, we set a ﬁxed random seed for\\\\nreproducible results.\\\\nExperimental setup\\\\nData We used the intraday stock prices of S&P 500 con-\\\\nstituent stocks obtained from licensed Bloomberg data ser-\\\\nvice (blo 2022). The data was sampled at 1 minute interval\\\\nfor the year 2019 (52 weeks, each week has 5 trading days).\\\\nFor every stock, we sampled 7 time series for each week.\\\\nData from the ﬁrst three quarters (weeks 1 to 39) were used\\\\nfor training and validation. Data was randomly split and 80%\\\\nwas used for training and the remaining 20% for validation.\\\\n\\\\npatience of 15. The base learning rate was 1e-3, adjusted by\\\\nlearning rate scheduler with decay factor 0.1 and a patience\\\\nof 5. We also used dropout with probability 0.1. DeepAR\\\\ngenerated multiple (200) samples of the prediction target for\\\\neach time series and we deﬁned the prediction probability\\\\nover the three classes as the proportion of samples predicted\\\\nper class.\\\\nARIMA Autoregressive Integrated Moving Average\\\\n(ARIMA) models capture autocorrelations in the data\\\\nusing a combination approach of autoregressive model,\\\\nmoving average model, and differencing (Wilks 2011). We\\\\ncompared the continuous valued ARIMA forecasts with the\\\\nlast known price in the input data to deﬁne the predicted\\\\nsign, and the corresponding prediction probability was\\\\ndeﬁned as the percentage of the absolute delta between\\\\nthe predicted and the last known price with respect to the\\\\nstandard deviation of the past 80 data points, capped by 1.\\\\nEMA An exponential moving average (EMA) is a type of\\\\n\\\\nwith the performance evaluation metrics and report the ex-\\\\nperimental results. Finally, we highlight some concluding re-\\\\nmarks and future directions for this study.\\\\nMethod\\\\nOur proposed method is called CNN and Transformer based\\\\ntimeseries modeling (CTTS) as shown in overview Figure 1.\\\\nPreprocessing\\\\nWe used standard min-max scaling to standardize each input\\\\ntime series within [0,1]. Given a raw stock prices time series\\\\nx, the standardized time series were calculated as\\\\nxstandardized =x−min(x)\\\\nmax( x)−min(x)\\\\nThisxstandardized is then passed into our model to learn the\\\\nsign of the change in the very next time step, which is de-\\\\nﬁned as our prediction target.\\\\nForecasting\\\\nAs shown in Figure 1, we use 1D CNN kernels to convo-\\\\nlute through a time series, projecting each local window intoan embedding vector that we call a token. Each token car-\\\\nries the short-term patterns of the time series. we then add\\\\npositional embedding (Vaswani et al. 2017) to the token and\\\\nHistory = [HumanMessage(content=\\\\\\'what is the main theory of the paper?\\\\\\', additional_kwargs={}, example=False), AIMessage(content=\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context and answer the question at the end.\\\\\\\\nIf you don\\\\\\'t know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\\\\\\\\nContext= and long-term dependencies within a time series, and forecast\\\\\\\\nif the price would go up, down or remain the same (ﬂat) in\\\\\\\\nthe future. In our experiments, we demonstrated the success\\\\\\\\nof the proposed method in comparison to commonly adopted\\\\\\\\nstatistical and deep learning methods on forecasting intraday\\\\\\\\nstock price change of S&P 500 constituents.\\\\\\\\nIntroduction\\\\\\\\nTime series forecasting is challenging, especially in the ﬁ-\\\\\\\\nnancial industry (Pedersen 2019). It involves statistically\\\\\\\\nunderstanding complex linear and non-linear interactions\\\\\\\\nwithin historical data to predict the future. In the ﬁnan-\\\\\\\\ncial industry, common applications for forecasting include\\\\\\\\npredicting buy/sell or positive/negative price changes for\\\\\\\\ncompany stocks traded on the market. Traditional statis-\\\\\\\\ntical approaches commonly adapt linear regression, expo-\\\\\\\\nnential smoothing (Holt 2004; Winters 1960; Gardner Jr\\\\\\\\nand McKenzie 1985) and autoregression models (Makri-\\\\\\\\ndakis, Spiliotis, and Assimakopoulos 2020). With the ad-\\\\\\\\n\\\\\\\\ntistical tools, such as, exponential smoothing (ETS) (Holt\\\\\\\\n2004; Winters 1960; Gardner Jr and McKenzie 1985)\\\\\\\\nand autoregressive integrated moving average (ARIMA)\\\\\\\\n(Makridakis, Spiliotis, and Assimakopoulos 2020), on nu-\\\\\\\\nmerical time series data for making one-step-ahead predic-\\\\\\\\ntions. These predictions are then recursively fed into the\\\\\\\\nfuture inputs to obtain multi-step forecasts. Multi-horizon\\\\\\\\nforecasting methods such as (Taieb, Sorjamaa, and Bon-\\\\\\\\ntempi 2010; Marcellino, Stock, and Watson 2006) directly\\\\\\\\ngenerate simultaneous predictions for multiple pre-deﬁned\\\\\\\\nfuture time steps.\\\\\\\\nMachine learning and deep learning based approaches\\\\\\\\nMachine learning (ML) approaches have shown to improve\\\\\\\\nperformance by addressing high-dimensional and non-linear\\\\\\\\nfeature interactions in a model-free way. These methods in-\\\\\\\\nclude tree-based algorithms, ensemble methods, neural net-\\\\\\\\nwork, autoregression and recurrent neural networks (Hastie,\\\\\\\\nTibshirani, and Friedman 2001). More recent works have ap-\\\\\\\\n\\\\\\\\nFigure 1: Overview of the proposed approach. Quick peak of the transformer encoder architecture on the right (Dosovitskiy\\\\\\\\net al. 2020)\\\\\\\\nprove the performance of NLP applications. The commonly\\\\\\\\nused approach is to pre-train on a large dataset and then ﬁne-\\\\\\\\ntune on a smaller task-speciﬁc dataset (Devlin et al. 2018).\\\\\\\\nTransformers leverage from multi-headed self-attention and\\\\\\\\nreplace the recurrent layers most commonly used in encoder-\\\\\\\\ndecoder architectures. In contrast to RNNs and LSTMs\\\\\\\\nwhere the input data is sequentially processed, transformers\\\\\\\\nbypass the recursion to ingest all inputs at once; thus, trans-\\\\\\\\nformers allow for parallel computations to reduce training\\\\\\\\ntime and do not suffer from long-term memory dependency\\\\\\\\nissues.\\\\\\\\nThe remainder of the paper is organized as follows. We\\\\\\\\ndiscuss our proposed methodology for time series modeling\\\\\\\\nFurther, we discuss our benchmark baseline models along\\\\\\\\nwith the performance evaluation metrics and report the ex-\\\\\\\\nperimental results. Finally, we highlight some concluding re-\\\\\\\\n\\\\\\\\nare often correct, thus thresholding ﬁlters out erroneous low-\\\\\\\\nconﬁdence predictions, leading to a boost in the sign accu-\\\\\\\\nracy.\\\\\\\\nAnother highlight is the signiﬁcant gap that CTTS has\\\\\\\\nachieved for thresholded 3-class∗accuracy compared to\\\\\\\\nother baselines. This can be harnessed in the future for trad-\\\\\\\\ning decision-making. For example, a straightforward trad-\\\\\\\\ning decision can be buy/sell/hold stocks when the predicted\\\\\\\\nclass is up/down/ﬂat, respectively. Given that CTTS’s pre-\\\\\\\\ndicted probablitlies are reliable as discussed earlier, the\\\\\\\\namount of stock shares to buy/sell/hold can depend on the\\\\\\\\nHistory = []\\\\\\\\nQuestion= what is the main theory of the paper?\\\\\\\\nHelpful Answer:\\\\\\\\nThe main theory of the paper is the use of a transformer-based approach for time series forecasting, specifically for intraday stock price change prediction of S&P 500 constituents. The proposed method demonstrates success in comparison to traditional statistical and deep learning methods. The transformer architecture allows for parallel computations and bypasses the recursion issue in RNNs and LSTMs, making it suitable for handling long-term dependencies within a time series.\", additional_kwargs={}, example=False)]\\\\nQuestion= simply introduce the data sets the paper used.\\\\nHelpful Answer:\\\\nThe paper used licensed Bloomberg data service for intraday stock prices of S&P 500 constituents for the year 2019, sampled at 1-minute interval for 52 weeks with 7 time series for each week. The data was randomly split into training (80%) and validation (20%) sets.\\', additional_kwargs={}, example=False)]\\nQuestion= summarize the paper, please.\\nHelpful Answer:\\n\\n',\n",
       " 'source_documents': [Document(page_content='work, autoregression and recurrent neural networks (Hastie,\\nTibshirani, and Friedman 2001). More recent works have ap-\\nplied Deep learning (DL) methods on numeric time series\\ndata (Bao, Yue, and Rao 2017; Gensler et al. 2016; Romeu\\net al. 2015; Sagheer and Kotb 2019; Sutskever, Vinyals, and\\nLe 2014). DL automates the process of feature extraction\\nand eliminates the need for domain expertise.\\nSince the introduction of transformers (Vaswani et al.\\n2017), they have become the state of the art model to im-arXiv:2304.04912v1  [cs.LG]  11 Apr 2023', metadata={'source': '/data/documents/2304.04912.pdf', 'page': 0}),\n",
       "  Document(page_content='and long-term dependencies within a time series, and forecast\\nif the price would go up, down or remain the same (ﬂat) in\\nthe future. In our experiments, we demonstrated the success\\nof the proposed method in comparison to commonly adopted\\nstatistical and deep learning methods on forecasting intraday\\nstock price change of S&P 500 constituents.\\nIntroduction\\nTime series forecasting is challenging, especially in the ﬁ-\\nnancial industry (Pedersen 2019). It involves statistically\\nunderstanding complex linear and non-linear interactions\\nwithin historical data to predict the future. In the ﬁnan-\\ncial industry, common applications for forecasting include\\npredicting buy/sell or positive/negative price changes for\\ncompany stocks traded on the market. Traditional statis-\\ntical approaches commonly adapt linear regression, expo-\\nnential smoothing (Holt 2004; Winters 1960; Gardner Jr\\nand McKenzie 1985) and autoregression models (Makri-\\ndakis, Spiliotis, and Assimakopoulos 2020). With the ad-', metadata={'source': '/data/documents/2304.04912.pdf', 'page': 0}),\n",
       "  Document(page_content='information processing systems , 27.\\nTaieb, S. B.; Sorjamaa, A.; and Bontempi, G. 2010.\\nMultiple-output modeling for multi-step-ahead time series\\nforecasting. Neurocomputing , 73(10-12): 1950–1957.\\nVaswani, A.; Shazeer, N.; Parmar, N.; Uszkoreit, J.; Jones,\\nL.; Gomez, A. N.; Kaiser, Ł.; and Polosukhin, I. 2017. At-\\ntention is all you need. Advances in neural information pro-\\ncessing systems , 30.\\nWilks, D. S. 2011. Statistical methods in the atmospheric\\nsciences , volume 100. Academic press.\\nWinters, P. R. 1960. Forecasting sales by exponentially\\nweighted moving averages. Management science , 6(3): 324–\\n342.', metadata={'source': '/data/documents/2304.04912.pdf', 'page': 3}),\n",
       "  Document(page_content='tistical tools, such as, exponential smoothing (ETS) (Holt\\n2004; Winters 1960; Gardner Jr and McKenzie 1985)\\nand autoregressive integrated moving average (ARIMA)\\n(Makridakis, Spiliotis, and Assimakopoulos 2020), on nu-\\nmerical time series data for making one-step-ahead predic-\\ntions. These predictions are then recursively fed into the\\nfuture inputs to obtain multi-step forecasts. Multi-horizon\\nforecasting methods such as (Taieb, Sorjamaa, and Bon-\\ntempi 2010; Marcellino, Stock, and Watson 2006) directly\\ngenerate simultaneous predictions for multiple pre-deﬁned\\nfuture time steps.\\nMachine learning and deep learning based approaches\\nMachine learning (ML) approaches have shown to improve\\nperformance by addressing high-dimensional and non-linear\\nfeature interactions in a model-free way. These methods in-\\nclude tree-based algorithms, ensemble methods, neural net-\\nwork, autoregression and recurrent neural networks (Hastie,\\nTibshirani, and Friedman 2001). More recent works have ap-', metadata={'source': '/data/documents/2304.04912.pdf', 'page': 0})]}"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "query = \"summarize the paper, please.\"\n",
    "qa_chain_with_memory({\"query\": query})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--------------------number of relevant documents--------------------\n",
      "4\n"
     ]
    }
   ],
   "source": [
    "# query = \"这篇文章的主题是什么\\n\"\n",
    "# docs = docsearch.get_relevant_documents(query)\n",
    "# print(\"-\"*20+\"number of relevant documents\"+\"-\"*20)\n",
    "# print(len(docs))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.callbacks.manager import CallbackManagerForLLMRun\n",
    "from langchain.schema.output import LLMResult\n",
    "\n",
    "class ChainStreamHandler(CallbackManagerForLLMRun):\n",
    "    def __init__(self):\n",
    "        self.tokens = []\n",
    "        self.num = 0\n",
    "        # 记得结束后这里置true\n",
    "        self.finish = False\n",
    "\n",
    "    def on_llm_new_token(self, token: str, **kwargs):\n",
    "        print(token)\n",
    "        self.num += 1\n",
    "        self.tokens.append(token)\n",
    "\n",
    "    def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
    "        self.finish = 1\n",
    "\n",
    "    def on_llm_error(self, error: Exception, **kwargs) -> None:\n",
    "        print(str(error))\n",
    "        self.tokens.append(str(error))\n",
    "\n",
    "    def generate_tokens(self):\n",
    "        while not self.finish or self.tokens:\n",
    "            if self.tokens:\n",
    "                data = self.tokens.pop(0)\n",
    "                yield data\n",
    "            else:\n",
    "                pass"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "mycallbackmanager = ChainStreamHandler()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "ename": "AttributeError",
     "evalue": "'ChainStreamHandler' object has no attribute 'dict'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[24], line 4\u001b[0m\n\u001b[1;32m      1\u001b[0m llm_stream \u001b[38;5;241m=\u001b[39m TransformersLLM\u001b[38;5;241m.\u001b[39mfrom_model_id(\n\u001b[1;32m      2\u001b[0m         model_id\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m/data/chatglm3-6B/\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m      3\u001b[0m         model_kwargs\u001b[38;5;241m=\u001b[39m{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtemperature\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;241m0\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmax_length\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;241m4096\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtrust_remote_code\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28;01mTrue\u001b[39;00m},\n\u001b[0;32m----> 4\u001b[0m         run_manager \u001b[38;5;241m=\u001b[39m \u001b[43mmycallbackmanager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdict\u001b[49m()\n\u001b[1;32m      5\u001b[0m         \n\u001b[1;32m      6\u001b[0m     )\n",
      "\u001b[0;31mAttributeError\u001b[0m: 'ChainStreamHandler' object has no attribute 'dict'"
     ]
    }
   ],
   "source": [
    "llm_stream = TransformersLLM.from_model_id(\n",
    "        model_id=\"/data/chatglm3-6B/\",\n",
    "        model_kwargs={\"temperature\": 0, \"max_length\": 4096, \"trust_remote_code\": True},\n",
    "        run_manager = mycallbackmanager\n",
    "        \n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/llm-tutorial/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. \n",
      "The tokenizer class you load from this checkpoint is 'ChatGLMTokenizer'. \n",
      "The class this function is called from is 'LlamaTokenizer'.\n",
      "You are using the legacy behaviour of the <class 'transformers.models.llama.tokenization_llama.LlamaTokenizer'>. This means that tokens that come after special tokens will not be properly handled. We recommend you to read the related pull request available at https://github.com/huggingface/transformers/pull/24565\n",
      "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoTokenizer, LlamaTokenizer\n",
    "tokenizer = LlamaTokenizer.from_pretrained('/data/chatglm3-6B/')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "100"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "input_ids = tokenizer([\"\"\"\n",
    "You are an assistant for question-answering tasks. Use the retrieved {context} and answer the question at the end.\n",
    "If you don't know the answer just say you d not know an do not try to make up the answer nor try to use outside souces to answer. Keep the answer as concise as possible.\n",
    "History = {history}\n",
    "Question= {question}\n",
    "Don't add text above in the result.\n",
    "Helpful Answer:\n",
    "\"\"\"], return_tensors=\"pt\")\n",
    "input_ids['input_ids'].shape[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': tensor([[64789, 30910,    13,  3651,   383,   284,  9319,   332,  2021, 30941,\n",
       "         64717,  1521,  7760, 30930,  5165,   267, 29604,   729, 11355, 30983,\n",
       "           293,  3238,   267,  2021,   388,   267,   934, 30930,    13,  4906,\n",
       "           344,   883, 30953, 30912,   683,   267,  3238,   680,  1105,   344,\n",
       "           292,   430,   683,   284,   480,   430,  1961,   289,   794,   541,\n",
       "           267,  3238,  5030,  1961,   289,   792,  2726, 16969,   974,   289,\n",
       "          3238, 30930,  6936,   267,  3238,   362, 30130,   362,  1809, 30930,\n",
       "            13, 21441,   542,   729, 19973, 30983,    13,  5792, 23483, 30980,\n",
       "           729, 22980, 30983,    13,  6676, 30953, 30912,   784,  2254,  2129,\n",
       "           291,   267,  1169, 30930,    13, 28658,   759, 20115, 30954,    13]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
       "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
       "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
       "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
       "         1, 1, 1, 1]])}"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "input_ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "You are using a model of type chatglm3 to instantiate a model of type chatglm. This is not supported for all configurations of models and can yield errors.\n",
      "Loading checkpoint shards: 100%|██████████| 7/7 [00:06<00:00,  1.07it/s]\n",
      "2024-01-21 13:57:55,545 - INFO - Converting the current model to sym_int4 format......\n"
     ]
    }
   ],
   "source": [
    "from bigdl.llm.transformers import (\n",
    "                AutoModel,\n",
    "                AutoModelForCausalLM,\n",
    "                # AutoModelForSeq2SeqLM,\n",
    "            )\n",
    "\n",
    "model = AutoModelForCausalLM.from_pretrained('/data/chatglm3-6B/', load_in_4bit=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[]"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "History:\n",
      "{history}\n",
      "\n",
      "Question:\n",
      "{question}\n",
      "\n",
      "Answer:\n",
      "{answer}\n",
      "\n",
      "{additional_info}\n",
      "\n",
      "Answer: {answer}\n",
      "\n",
      "{additional_info}\n",
      "\n",
      "Answer: {answer}\n",
      "\n",
      "{additional_info}\n",
      "\n",
      "Answer: {answer}\n",
      "\n",
      "{additional_info}\n",
      "\n",
      "Answer: {answer}\n",
      "\n",
      "{additional_info}\n",
      "\n",
      "Answer: {answer}\n",
      "\n",
      "{additional_info}\n",
      "\n",
      "Answer: {answer}\n",
      "\n",
      "{additional_info}\n",
      "\n",
      "Answer: {answer}\n",
      "\n",
      "{additional_info}\n",
      "\n",
      "Answer: {answer}\n",
      "\n",
      "{additional_info}\n",
      "\n",
      "Answer: {answer}\n",
      "\n",
      "{additional_info}\n",
      "\n",
      "An\n"
     ]
    }
   ],
   "source": [
    "from transformers import TextStreamer,TextIteratorStreamer\n",
    "import threading\n",
    "# input_ids = tokenizer([custom_prompt_template], return_tensors=\"pt\")\n",
    "input_length = input_ids['input_ids'].shape[1]\n",
    "# streamer = TextIteratorStreamer(tokenizer, timeout=60, skip_prompt=True, skip_special_tokens=True)\n",
    "streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n",
    "# output = model.generate(**input_ids, streamer=streamer, max_new_tokens=200)\n",
    "generate_kwargs = {\n",
    "    \"input_ids\": input_ids['input_ids'],\n",
    "    \"attention_mask\": input_ids['attention_mask'],\n",
    "    \"streamer\" : streamer,\n",
    "    \"max_new_tokens\": 200\n",
    "}\n",
    "t = threading.Thread(target=model.generate, kwargs=generate_kwargs)\n",
    "# text = tokenizer.batch_decode(output[:, input_length:], skip_special_tokens=True)[0]\n",
    "\n",
    "t.start()\n",
    "history = []\n",
    "for new_token in streamer.token_cache:\n",
    "    history += new_token\n",
    "\n",
    "history\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<transformers.generation.streamers.TextStreamer at 0x7efafd5fbac0>"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "streamer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[]"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "history = []\n",
    "for new_token in streamer.token_cache:\n",
    "    if new_token != '':\n",
    "            history += new_token\n",
    "            \n",
    "history\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[]"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "history"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "llm-tutorial",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
