import requests
import time
from w3lib.html import (remove_tags, 
                        remove_comments, 
                        remove_tags_with_content,
                        replace_escape_chars,
                        replace_entities)
from urllib.parse import urljoin
from parsel import Selector
import html
from html.parser import HTMLParser

class Crawler(HTMLParser):
    def __init__(self,max_depth=2):
        super().__init__()
        self.reset()
        self.fed = []
        self.visited_urls = set()
        self.max_depth = max_depth

    def crawl(self, url, retry=3, timeout=1):
        for _attempt in range(retry):
            try:
                response = requests.get(url, timeout=timeout)
                if response.status_code == 200:
                    return response.text
            except requests.RequestException as e:
                print(f"Attempt {_attempt + 1} failed: {e}")
            time.sleep(timeout*(2 ** _attempt))  # Exponential backoff
        return None
    
    def crawl_recursive(self, url, depth=0):
        if depth > self.max_depth or url in self.visited_urls:
            return
        self.visited_urls.add(url)
        content = self.crawl(url)
        if content:
            self.feed(content)
            selector = Selector(text=content)
            links = selector.css('a::attr(href)').getall()
            for link in links:
                full_url = urljoin(url, link)
                self.crawl_recursive(full_url, depth + 1)

    def handle_data(self, d):
        self.fed.append(d)
    
    def get_data(self):
        return ' '.join(self.fed)
    
    def parse_html(self, html_content):
        if not html_content:
            return ""
        else:
            self.feed(html.unescape(html_content))
            text = self.get_data()
            return text
    
    def clean_html(self, html_content):
        text = remove_tags_with_content(html_content, which_ones=('script', 'style', 'noscript', 'header', 'footer', 'svg', 'img', 'figure', 'figcaption', 'meta', 'link', 'iframe'))
        text = remove_comments(text)
        text = remove_tags(text)
        text = replace_entities(text)
        text = replace_escape_chars(text, which_ones=('\n', '\t', '\r'), replace_by=' ')
        text = ' '.join(text.split())
        return text

    def extract_links(self, html_content):
        selector = Selector(text=html_content)
        links = selector.css('a::attr(href)').getall()
        return links


if __name__ == "__main__":
    crawler = Crawler()
    url = "https://baike.baidu.com/item/%E4%B8%89%E5%9B%BD%E6%BC%94%E4%B9%89/5782"
    content = crawler.crawl(url)
    print(content)
    if content:
        print("Crawled content successfully.")
    else:
        print("Failed to crawl content.")