# import requests
# from bs4 import BeautifulSoup
# from threading import Thread, Lock
# import time
#
# # 豆瓣电影TOP250的基础URL
# BASE_URL = 'https://movie.douban.com/top250'
# HEADERS = {
#     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
#
# # 创建一个锁对象，用于线程间同步
# lock = Lock()
#
# # 存储电影信息的列表
# movies = []
#
#
# def fetch_page(url, page_num):
#     """
#     获取指定页面的内容
#     """
#     try:
#         response = requests.get(url, headers=HEADERS, timeout=10)
#         response.raise_for_status()
#         return response.text
#     except requests.RequestException as e:
#         print(f"Error fetching page {page_num}: {e}")
#         return None
#
#
# def parse_movie_info(html_content, start_index):
#     """
#     解析HTML内容，提取电影信息
#     """
#     soup = BeautifulSoup(html_content, 'html.parser')
#
#     # 提取电影信息
#     movie_list = soup.find_all('div', class_='item')
#     print(movie_list)
#     for movie in movie_list:
#         rank = movie.find('em').text
#         title = movie.find('span', class_='title').text
#         rating = movie.find('span', class_='rating_num').text
#         link = movie.find('a')['href']
#
#         with lock:
#             movies.append({
#                 'rank': rank,
#                 'title': title,
#                 'rating': rating,
#                 'link': link
#             })
#
#
# def crawl_douban_top250():
#     """
#     多线程爬取豆瓣电影TOP250
#     """
#     threads = []
#     for i in range(10):  # 豆瓣TOP250分为10页，每页25条记录
#         url = f'{BASE_URL}?start={i * 25}&filter='
#         html_content = fetch_page(url, i + 1)
#         if html_content:
#             # 为每个页面创建一个线程解析电影信息
#             thread = Thread(target=parse_movie_info, args=(html_content, i * 25))
#             threads.append(thread)
#             thread.start()
#
#             # 等待所有线程完成
#     for thread in threads:
#         thread.join()
#
#
# if __name__ == '__main__':
#     start_time = time.time()
#     crawl_douban_top250()
#     end_time = time.time()
#
#     # 打印爬取到的电影信息
#     for movie in movies:
#         print(movie)
#
#     print(f"爬取完成，共耗时: {end_time - start_time:.2f} 秒")



# from langchain_community.llms import Tongyi
# from langchain.memory import ConversationBufferMemory
# from langchain_core.runnables.history import RunnableWithMessageHistory
# from langchain.prompts import PromptTemplate
#
# # 创建 Tongyi LLM 实例（假设你已经设置了正确的 Tongyi 配置）
# llm = Tongyi()
#
# # 创建内存，用于存储历史对话
# memory = ConversationBufferMemory(memory_key="history", return_messages=True)
#
# # 创建一个函数用于获取会话历史
# def get_session_history():
#
#     return memory.buffer
#
# # 使用 RunnableWithMessageHistory 创建对话处理对象
# conversation = RunnableWithMessageHistory(
#     runnable=llm,
#     get_session_history=get_session_history,
#     verbose=True
# )
#
# # 进行多轮对话
# def start_conversation():
#     print("系统: 你好！我可以帮助你解答问题。")
#     while True:
#         user_input = input("用户: ")
#         if user_input.lower() in ['exit', 'quit']:
#             print("系统: 对话结束！")
#             break
#         # 获取模型的响应
#         response = conversation.invoke(input=user_input)
#         print(f"系统: {response['text']}")
#
# if __name__ == "__main__":
#     start_conversation()




