import datetime
import re
import time
import urllib.parse
import urllib.request
import urllib3
from urllib.parse import urlparse
from urllib.parse import urljoin
from urllib.request import urlopen
import urllib.robotparser
import csv

http = urllib3.PoolManager()



# 定义一个类，用于控制延时
class Throttle:
    '''
    用于控制爬虫访问统一域名资源时的延时
    '''
    def __init__(self,delay):
        self.delay = delay
        self.domains = {}

    def wait(self,url):
        # 解析url，获取域名
        domain = urlparse(url).netloc
        # 获取上一次访问的时间
        last_accessed = self.domains.get(domain)
        # 如果设置到延时并且已经访问过了
        if self.delay > 0 and last_accessed is not None:
            # 计算从上次访问到当前时间过去的秒数与规定的延迟时长的差值
            sleep_secs = self.delay - (datetime.datetime.now() - last_accessed).seconds
            # 判断距离上次访问的时间间隔是否达到了延迟要求
            if sleep_secs > 0:
                # 如果时间还没有达到，就调用time.sleep，进行休眠
                time.sleep(sleep_secs)
        # 更新本次访问的时间
        self.domains[domain] = datetime.datetime.now()

# 定义下载网页的方法
def download(url,user_agent='wswp',request_max=3,proxy=None):
    print("Starting Download, The URL is: {}".format(url))
    # 发起GET请求
    headers = {'User-agent':user_agent}
    request = urllib.request.Request(url,headers=headers)
    response = http.request('GET', url,headers=headers)
    opener = urllib.request.build_opener()
    if proxy:
        proxy_params = {urlparse.urlparse(url).scheme:proxy}
        opener.add_handler(urllib.request.ProxyHandler(proxy_params))
        try:
            response = opener.open(request)
            if response.status == 200:
                html = response.data
        except Exception as e:
            print("Download error:{}".format(e))
            html = None
            if request_max >=3:
                html = download(url,user_agent,proxy,request_max-1)
        finally:
            response.release_conn()
        return html
    else:
        # 如果没有选择代理
        try:
            if response.status == 200:
                htmlfile = response.data  # 或者 response.data.decode('utf-8') 如果需要字符串
                # 在这里处理 htmlfile，比如保存到文件或进行解析等
                return htmlfile  # 如果需要返回下载的内容
            else:
                print("遇到了错误，状态码是：{}".format(response.status))
                if request_max >= 0:
                    download(url,user_agent,request_max-1)
        except urllib3.exceptions.HTTPError as e:
            print("An error occurred: {}".format(e))
        except Exception as ex:
            print("An error occurred: {}".format(ex))
        finally:
            response.release_conn()  # 确保资源被释放，如果需要的话

def link_crawl_do(seed_url,link_regex,max_deepth=5):
    # 创建延时类
    throttler = Throttle(5)
    # 定义一个User_agent列表
    user_agent_list = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:128.0) Gecko/20100101 Firefox/128.0','BadCrawler','GoodCrawler']
    # 解析网站的robots.txt
    rp = urllib.robotparser.RobotFileParser()
    rp.set_url("{}/robots.txt".format(seed_url))
    rp.read()
    # 定义一个用户当前设置的user_agent
    current_user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:128.0) Gecko/20100101 Firefox/128.0'
    # 从列表中找一个网站允许的user_agent
    for user_agent in user_agent_list:
        if rp.can_fetch(user_agent,seed_url):
            current_user_agent = user_agent
            break
        else:
            print("Blocked by robots.txt")
    # 从提供的种子url生成一个待解析的url列表
    crawl_url_queue = [seed_url]
    # 定义一个字典，记录链接和深度，用于判断链接是否已经下载，避免在不同页面中反复横跳
    have_crawl_url_queue = {seed_url:0}
    while crawl_url_queue:
        print(len(crawl_url_queue))
        # 只要列表中有值，则弹出一个url用于解析
        url = crawl_url_queue.pop()
        # 读取深度

        deepth = have_crawl_url_queue[url]
        if deepth <= max_deepth:
            # 在尝试解析url前，先判断延时条件
            throttler.wait(url)
            # 保存的路径
            file_path = 'D:/Crawl_Results/htmls'
            parsed_url = urlparse(url)
            # 获取路径部分，并去除开头的'/'（如果有的话）
            path = parsed_url.path.lstrip('/')
            safe_path = path.replace('/', '_')
            target_url = "{}/{}.html".format(file_path,safe_path)
            html = download(url,user_agent=current_user_agent)

            if not html ==  None:
                with open(target_url,'wb')as html_file_download:
                    html_file_download.write(html)
                    # 从下载到的html网页中递归的获取链接
                links_from_html = get_links(html)
                if not links_from_html == None:
                    for link in links_from_html:
                        link = urljoin(seed_url,link)
                        # 判断找到的链接是否符合我们想要的正则表达式
                        if re.match(link_regex,link):
                            #print("找到了一条链接==>{}".format(link))
                            # 如果符合，再判断是否已经下载过了，如果没有下载过，就把它加到待解析的url列表和已下载集合中
                            if link not in have_crawl_url_queue:
                                have_crawl_url_queue[link] = deepth + 1
                                crawl_url_queue.append(link)
            else:
                print("html==>{}是空的，所以不保存，继续解析".format(target_url))


def get_links(html):
    webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']',re.IGNORECASE)
    if not html == None:
        html_string = html.decode("utf-8")
        return webpage_regex.findall(html_string)
    else:
        return None


# 测试
seed_url="https://movie.douban.com/top250"
link_regex="^https://(?!music\\.douban\\.com/subject/)movie\\.douban\\.com/subject/(\\d+)/$"

link_crawl_do(seed_url,link_regex)
