# #@title Установка библиотек. Сервисные функции
# !pip -q install --upgrade tiktoken
# !pip -q install langchain openai chromadb
# !pip -q install gspread oauth2client
#
# from langchain.llms import OpenAI
# from langchain.docstore.document import Document
# import requests
# from langchain.embeddings.openai import OpenAIEmbeddings
# from langchain.vectorstores import Chroma
# from langchain.text_splitter import CharacterTextSplitter
# from langchain.prompts import PromptTemplate
# import pathlib
# import subprocess
# import tempfile
# import ipywidgets as widgets
# import os
# import gspread
# from oauth2client.service_account import ServiceAccountCredentials
# import re
#
# import os
# import openai
# import tiktoken
# import re
#
#
# class bcolors:
#     HEADER = '\033[95m'
#     OKBLUE = '\033[94m'
#     OKCYAN = '\033[96m'
#     OKGREEN = '\033[92m'
#     WARNING = '\033[93m'
#     FAIL = '\033[91m'
#     ENDC = '\033[0m'
#     BOLD = '\033[1m'
#     UNDERLINE = '\033[4m'
#
# class GPT():
#   def __init__(self):
#     pass
#
#   @classmethod
#   def set_key(cls):
#       password_input = widgets.Password(
#           description='Введите пароль:',
#           layout=widgets.Layout(width='500px'),
#           style={'description_width': 'initial', 'white-space': 'pre-wrap', 'overflow': 'auto'})
#       login_button = widgets.Button(description='Авторизация')
#       output = widgets.Output()
#
#       def on_button_clicked(_):
#           with output:
#               openai.api_key = password_input.value
#               os.environ["OPENAI_API_KEY"] = openai.api_key
#               print(f'{bcolors.OKGREEN}{bcolors.BOLD}Ключ сохранен!{bcolors.ENDC}')
#               password_input.layout.display = 'none'
#               login_button.layout.display = 'none'
#
#       login_button.on_click(on_button_clicked)
#       display(widgets.VBox([password_input, login_button, output]))
#
#   def load_search_indexes(self, url: str) -> str:
#     # Extract the document ID from the URL
#     match_ = re.search('/document/d/([a-zA-Z0-9-_]+)', url)
#     if match_ is None:
#         raise ValueError('Invalid Google Docs URL')
#     doc_id = match_.group(1)
#
#     # Download the document as plain text
#     response = requests.get(f'https://docs.google.com/document/d/{doc_id}/export?format=txt')
#     response.raise_for_status()
#     text = response.text
#     return self.create_embedding(text)
#
#   def load_prompt(self,
#                   url: str) -> str:
#     # Extract the document ID from the URL
#     match_ = re.search('/document/d/([a-zA-Z0-9-_]+)', url)
#     if match_ is None:
#         raise ValueError('Invalid Google Docs URL')
#     doc_id = match_.group(1)
#
#     # Download the document as plain text
#     response = requests.get(f'https://docs.google.com/document/d/{doc_id}/export?format=txt')
#     response.raise_for_status()
#     text = response.text
#     return f'{text}'
#
#
#   def create_embedding(self, data):
#     def num_tokens_from_string(string: str, encoding_name: str) -> int:
#       """Returns the number of tokens in a text string."""
#       encoding = tiktoken.get_encoding(encoding_name)
#       num_tokens = len(encoding.encode(string))
#       return num_tokens
#
#     source_chunks = []
#     splitter = CharacterTextSplitter(separator="\n", chunk_size=1024, chunk_overlap=0)
#
#     for chunk in splitter.split_text(data):
#       source_chunks.append(Document(page_content=chunk, metadata={}))
#
#     # Создание индексов документа
#     search_index = Chroma.from_documents(source_chunks, OpenAIEmbeddings(), )
#
#     count_token = num_tokens_from_string(' '.join([x.page_content for x in source_chunks]), "cl100k_base")
#     print('\n ===========================================: ')
#     print('Количество токенов в документе :', count_token)
#     print('ЦЕНА запроса:', 0.0004*(count_token/1000), ' $')
#     return search_index
#
#   def answer(self, system, topic, temp = 1):
#     messages = [
#       {"role": "system", "content": system},
#       {"role": "user", "content": topic}
#       ]
#
#     completion = openai.ChatCompletion.create(
#       model="gpt-3.5-turbo",
#       messages=messages,
#       temperature=temp
#       )
#
#     return completion.choices[0].message.content
#
#   def num_tokens_from_messages(self, messages, model="gpt-3.5-turbo-0301"):
#     """Returns the number of tokens used by a list of messages."""
#     try:
#         encoding = tiktoken.encoding_for_model(model)
#     except KeyError:
#         encoding = tiktoken.get_encoding("cl100k_base")
#     if model == "gpt-3.5-turbo-0301":  # note: future models may deviate from this
#         num_tokens = 0
#         for message in messages:
#             num_tokens += 4  # every message follows <im_start>{role/name}\n{content}<im_end>\n
#             for key, value in message.items():
#                 num_tokens += len(encoding.encode(value))
#                 if key == "name":  # if there's a name, the role is omitted
#                     num_tokens += -1  # role is always required and always 1 token
#         num_tokens += 2  # every reply is primed with <im_start>assistant
#         return num_tokens
#     else:
#         raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
# See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
#
#   def insert_newlines(self, text: str, max_len: int = 170) -> str:
#     words = text.split()
#     lines = []
#     current_line = ""
#     for word in words:
#         if len(current_line + " " + word) > max_len:
#             lines.append(current_line)
#             current_line = ""
#         current_line += " " + word
#     lines.append(current_line)
#     return "\n".join(lines)
#
#
#   def dialog(self):
#     user = ''
#     dialog = ''
#
#     print(f'{bcolors.OKBLUE}{bcolors.BOLD}С чем связан ваш интерес к искусственному интеллекту?{bcolors.ENDC}')
#
#     while user.lower() not in ['stop', 'exit', 'выход']:
#       user = input('Клиент: ')
#       if (user == 'Stop'): break
#
#       dialog += '\n\n' + 'Клиент: ' + user
#       add_dialog = gpt.answer(expert_prompt, user)
#
#       dialog += '\n\n' + 'Менеджер: ' + add_dialog
#       print(f'\n{bcolors.OKBLUE}{bcolors.BOLD}Менеджер:{bcolors.ENDC} {self.insert_newlines(add_dialog)}')
#       report = gpt.answer(validation_prompt, dialog)
#       answer = gpt.answer(action_prompt, report)
#
#       print(f'\n{bcolors.OKGREEN}{bcolors.BOLD}Отчёт системы:\n {bcolors.ENDC}{report}')
#       print(f'\n{bcolors.HEADER}{bcolors.BOLD}Менеджер: {bcolors.ENDC}{self.insert_newlines(answer)}\n\n')
#
#     return dialog
#
#   def answer_index(self, system, topic, search_index, temp = 1, verbose = 0):
#
#     #Выборка документов по схожести с вопросом
#     docs = search_index.similarity_search(topic, k=5)
#     if (verbose): print('\n ===========================================: ')
#     message_content = re.sub(r'\n{2}', ' ', '\n '.join([f'\nОтрывок документа №{i+1}\n=====================' + doc.page_content + '\n' for i, doc in enumerate(docs)]))
#     if (verbose): print('message_content :\n ======================================== \n', message_content)
#
#     messages = [
#       {"role": "system", "content": system + f"{message_content}"},
#       {"role": "user", "content": topic}
#       ]
#
#     # example token count from the function defined above
#     if (verbose): print('\n ===========================================: ')
#     if (verbose): print(f"{self.num_tokens_from_messages(messages, 'gpt-3.5-turbo-0301')} токенов использовано на вопрос")
#
#     completion = openai.ChatCompletion.create(
#     model="gpt-3.5-turbo",
#     messages=messages,
#     temperature=temp
#     )
#     if (verbose): print('\n ===========================================: ')
#     if (verbose): print(f'{completion["usage"]["total_tokens"]} токенов использовано всего (вопрос-ответ).')
#     if (verbose): print('\n ===========================================: ')
#     if (verbose): print('ЦЕНА запроса с ответом :', 0.002*(completion["usage"]["total_tokens"]/1000), ' $')
#     if (verbose): print('\n ===========================================: ')
#     print('ОТВЕТ : \n', self.insert_newlines(completion.choices[0].message.content))
#
#     # return completion
#
#   def get_chatgpt_ansver3(self, system, topic, search_index, temp = 1):
#
#     messages = [
#       {"role": "system", "content": system},
#       {"role": "user", "content": topic}
#       ]
#
#     completion = openai.ChatCompletion.create(
#       model="gpt-3.5-turbo",
#       messages=messages,
#       temperature=temp
#       )
#     print('ОТВЕТ : \n', self.insert_newlines(completion.choices[0].message.content))