import datetime
import os
import pathlib as path_util
import re
import time
import urllib.parse
import urllib.request
import urllib3
from urllib.parse import urlparse
from urllib.parse import urljoin
from urllib.request import urlopen
import urllib.robotparser
from lxml import html as lhtml
import csv

# 定义连接管理池
http = urllib3.PoolManager()
# 定义一个scrape_callback类，用于存储解析到的数据
class scrape_callback:
    def __init__(self):
        self.writer = csv.writer(open('D:/Crawl_Results/htmls/downloaded_data.csv','w', newline=''))
        self.fields = ('中文名','外文名')
        self.writer.writerow(self.fields)

    def __call__(self,html):
        html_string = html.decode("utf-8")
        root = lhtml.fromstring(html_string)
        result_list = []
        try:
            div_content = root.cssselect("div#content")[0]
            span_info = div_content.cssselect('span[property="v:itemreviewed"]')[0]
            title_text = span_info.text_content().split(" ",1)
            #chinese_name = re.findall(r'[\u4e00-\u9fff]+',title_text)[0].strip()
            #english_name = re.findall(r'[a-zA-Z\s]+',title_text)[0].strip()
            for name in title_text:
                result_list.append(name)
            print("电影的标题是==>{}".format(title_text))
            self.writer.writerow(result_list)
            return result_list
        except IndexError:
            print("未找到指定的元素")
        except Exception as e:
            print(f"处理过程中发生错误: {e}")

# 定义一个类，用于控制延时
class Throttle:
    '''
    用于控制爬虫访问统一域名资源时的延时
    '''
    # 初始化函数
    def __init__(self,delay):
        self.delay = delay
        self.domains = {}
    # 控制延时
    def wait(self,url):
        # 解析url，获取域名
        domain = urlparse(url).netloc
        # 获取上一次访问的时间
        last_accessed = self.domains.get(domain)
        # 如果设置到延时并且已经访问过了
        if self.delay > 0 and last_accessed is not None:
            # 计算从上次访问到当前时间过去的秒数与规定的延迟时长的差值
            sleep_secs = self.delay - (datetime.datetime.now() - last_accessed).seconds
            # 判断距离上次访问的时间间隔是否达到了延迟要求
            if sleep_secs > 0:
                print("正在休眠，将等待{}秒后再次连接".format(sleep_secs))
                # 如果时间还没有达到，就调用time.sleep，进行休眠
                time.sleep(sleep_secs)
        # 更新本次访问的时间
        self.domains[domain] = datetime.datetime.now()

# 定义下载网页的方法
def download(url,user_agent='wswp',request_max=3,proxy=None):
    print("正在下载, URL==>{}".format(url))
    # 发起GET请求
    headers = {'User-agent':user_agent}
    request = urllib.request.Request(url,headers=headers)
    response = http.request('GET', url,headers=headers)
    # 如果使用代理的话
    if proxy:
        opener = urllib.request.build_opener()
        proxy_params = {urlparse.urlparse(url).scheme:proxy}
        opener.add_handler(urllib.request.ProxyHandler(proxy_params))
        try:
            response = opener.open(request)
            if response.status == 200:
                html = response.data
            else:
                print("遇到了错误，状态码是：{}".format(response.status))
                if request_max >= 0:
                    download(url,user_agent,request_max-1)
        except Exception as e:
            print("下载遇到了错误,错误代码是==>{}".format(e))
            html = None
            if request_max >=3:
                html = download(url,user_agent,proxy,request_max-1)
        finally:
            response.release_conn()
        return html
    else:
        # 如果没有选择代理，那就正常请求
        try:
            if response.status == 200:
                html_file = response.data  # 或者 response.data.decode('utf-8') 如果需要字符串
                # 在这里处理 htmlfile，比如保存到文件或进行解析等
                return html_file
            else:
                print("遇到了错误，状态码是：{}".format(response.status))
                if request_max >= 0:
                    download(url,user_agent,request_max-1)
        except urllib3.exceptions.HTTPError as e:
            print("遇到了错误，错误代码是==>{}".format(e))
        except Exception as ex:
            print("遇到了错误，错误代码是==>{}".format(ex))
        finally:
            response.release_conn()

# 爬取网页的函数
def link_crawl_do(seed_url,link_regex,max_deepth=5,scrape_callback=scrape_callback()):
    # 定义一个列表保存结果
    results = []
    # 创建延时类，定义延时5秒
    throttler = Throttle(5)
    # 定义一个User_agent列表
    user_agent_list = ['BadCrawler','GoodCrawler']
    # 解析网站的robots.txt
    rp = urllib.robotparser.RobotFileParser()
    rp.set_url(f"{seed_url}/robots.txt")
    rp.read()
    # 定义一个用户当前设置的user_agent
    current_user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:128.0) Gecko/20100101 Firefox/128.0'
    # 只有当默认的火狐这个User-agent被禁，再从user_agent_list中找看还有合适的没
    if not rp.can_fetch(current_user_agent,seed_url):
        # 从列表中找一个网站允许的user_agent
        for user_agent in user_agent_list:
            if rp.can_fetch(user_agent,seed_url):
                current_user_agent = user_agent
                break
            else:
                print("该网站的robots.txt禁止我们访问")
    # 从提供的种子url生成一个待解析的url列表
    crawl_url_queue = [seed_url]
    # 定义一个字典，记录链接和深度，用于判断链接是否已经下载，避免在不同页面中反复横跳
    have_crawl_url_queue = {seed_url:0}
    while crawl_url_queue:
        print("当前有带解析的链接共{}条".format(len(crawl_url_queue)))
        # 只要列表中有值，则弹出一个url用于解析
        url = crawl_url_queue.pop()
        # 读取当前要解析url的深度，如果深度超过最大值，则停止
        deepth = have_crawl_url_queue[url]
        if deepth <= max_deepth:
            # 在尝试解析url前，先判断延时条件
            throttler.wait(url)
            # 执行下载
            html = download(url,user_agent=current_user_agent)
            # 如果有传入回调函数，则调用它
            if scrape_callback:
                results.extend(scrape_callback(html) or [])
            if not html ==  None:
                # 保存到本地
                #save_html(html,url)
                # 从下载到的html网页中递归的获取链接
                links_from_html = get_links(html)
                if not links_from_html == None:
                    for link in links_from_html:
                        link = urljoin(seed_url,link)
                        # 判断找到的链接是否符合我们想要的正则表达式
                        if re.match(link_regex,link):
                            # 如果符合，再判断是否已经下载过了，如果没有下载过，就把它加到待解析的url列表和已下载集合中
                            if link not in have_crawl_url_queue:
                                have_crawl_url_queue[link] = deepth + 1
                                crawl_url_queue.append(link)
            else:
                print("未获取到任何HTML，跳过...")

# 从下载到的html中继续解析连接
def get_links(html):
    webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']',re.IGNORECASE)
    if not html == None:
        html_string = html.decode("utf-8")
        return webpage_regex.findall(html_string)
    else:
        return None

# 将html文件保存到本地的函数
def save_html(html,url,file_path="D:/Crawl_Results/htmls"):
    try:
        # 解析url,动态组装要保存的文件名
        parsed_url = urlparse(url)
        # 获取路径部分，并去除开头的'/'（如果有的话）
        path = parsed_url.path.lstrip('/')
        file_name = path.replace('/', '_').rstrip('_')
        save_dir = path_util.Path(file_path)
        # 如果文件夹不存在，则创建
        if not save_dir:
            os.mkdir(save_dir)
        # 拼接保存路径，并去除后面的"_"
        #target_url = (path_util.Path(file_path)/file_name).rstrip('_')
        target_url = "{}/{}.html".format(file_path,file_name)
        with open(target_url,'wb')as html_file_download:
            html_file_download.write(html)
    except Exception as e:
        print("保存HTML到本地失败，遇到了错误{}".format(e))


# 测试
seed_url="https://movie.douban.com/top250"
seed_url2 = "https://movie.douban.com/annual/2023/"
link_regex="^https://(?!music\\.douban\\.com/subject/)movie\\.douban\\.com/subject/(\\d+)/$"

link_crawl_do(seed_url2,link_regex)
