import requests
from langchain.tools import tool
import praw
import time


class BrowserTool:
    @tool("Scrape reddit content")
    def scrape_reddit(max_comments_per_post=7):
        """Useful to scrape a reddit content"""
        reddit = praw.Reddit(
            client_id="client-id",
            client_secret="client-secret",
            user_agent="user-agent",
        )
        subreddit = reddit.subreddit("LocalLLaMA")
        scraped_data = []

        for post in subreddit.hot(limit=12):
            post_data = {"title": post.title, "url": post.url, "comments": []}

            try:
                post.comments.replace_more(limit=0)  # Load top-level comments only
                comments = post.comments.list()
                if max_comments_per_post is not None:
                    comments = comments[:7]

                for comment in comments:
                    post_data["comments"].append(comment.body)

                scraped_data.append(post_data)

            except praw.exceptions.APIException as e:
                print(f"API Exception: {e}")
                time.sleep(60)  # Sleep for 1 minute before retrying

        return scraped_data
    @tool("Search Article on CSDN")
    def search(word):
        """Search Article on CSDN"""
        search_list = []
        num = 1
        page = 1
        url = 'https://so.csdn.net/api/v3/search'
        data = {
            'q': word,
            'p': page,
        }
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36'
        }
        response = requests.get(url=url, params=data, headers=headers)
        for index in response.json()['result_vos']:
            if num == 10:
                break
            title = index["title"].replace('<em>', '').replace('</em>', '')
            author = index["nickname"].replace('<em>', '').replace('</em>', '')
            dit = {
                'title': title,
                'link': index['url'],
            }
            num += 1
            search_list.append(dit)
        return search_list
    
    @tool("查看CSDN上的文章")
    def view_article(link):
        """查看CSDN上的文章"""
        response = requests.get(url=link)
        # 解析文章内容，blog-content-box类的div标签为文章内容
        html = response.text
        from bs4 import BeautifulSoup
        soup = BeautifulSoup(html, 'lxml')
        article = soup.find('div', class_='blog-content-box')
        return article.text    

