from concurrent.futures import ThreadPoolExecutor
from datetime import datetime,timedelta
import os
import random
import re
import threading
import time
import urllib.parse
import urllib.request
import urllib3
from urllib.parse import urlparse, urlsplit
from urllib.parse import urljoin
import urllib.robotparser
from lxml import html as lhtml
import csv
import pickle
import zlib
from bson.binary import Binary
from pymongo import MongoClient
from zipfile import ZipFile
from io import StringIO
# 多线程爬虫

# 封装MongoDB缓存类
class MongoCache:
    def __init__(self,client=None,expires=timedelta(days=30)):
        if client == None:
            self.client = MongoClient('localhost',27017)
        else:
            self.client = client
        self.db = self.client['cache']
        self.webpage = self.db['webcrawler']
        self.expires = expires
        self.webpage.create_index('timestamp',expireAfterSeconds=expires.total_seconds())

    def __getitem__(self,url):
        '''
            根据url从磁盘提取缓存
        '''
        record = self.webpage.find_one({'_id':url})
        if record:
            return pickle.loads(zlib.decompress(record['result']))
        else:
            raise KeyError(url + "不存在")

    def __setitem__(self,url,result):
        '''
            将数据存入磁盘缓存中
        '''
        record = {'result':Binary(zlib.compress(pickle.dumps(result))),'timestamp':datetime.now()}
        self.webpage.update_one({'_id':url},{'$set':record},upsert=True)

# 将下载功能封装成一个类
class Downloader:
    def __init__(self,delay=5,user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:128.0) Gecko/20100101 Firefox/128.0',proxies=None,request_max=3,cache=None):
        self.throttle = Throttle(delay=delay)
        self.user_agent = user_agent
        self.proxies = proxies
        self.request_max = request_max
        self.cache = cache
        # 定义连接管理池
        self.http = urllib3.PoolManager()

    def __call__(self,url):
        result = None
        if self.cache:
            try:
                result = self.cache[url]
            except KeyError:
                pass
            else:
                if self.request_max > 0 and 500 <= result['code'] < 600:
                    result = None
        if result is None:
            self.throttle.wait(url)
            proxy = random.choice(self.proxies) if self.proxies else None
            headers = {'User-agent':self.user_agent}
            result = self.download(url,headers,self.request_max,proxy)
            if self.cache:
                self.cache[url] = result
        return result['html']

    def download(self,url,headers,request_max,proxy=None):
        print("正在下载, URL==>{}".format(url))
        # 发起GET请求
        request = urllib.request.Request(url,headers=headers)
        response = self.http.request('GET', url,headers=headers)
        # 如果使用代理的话
        if proxy:
            opener = urllib.request.build_opener()
            proxy_params = {urlparse.urlparse(url).scheme:proxy}
            opener.add_handler(urllib.request.ProxyHandler(proxy_params))
            try:
                response = opener.open(request)
                if response.status == 200:
                    html = response.data
                else:
                    print("遇到了错误，状态码是：{}".format(response.status))
                    if request_max >= 0:
                        self.download(url,headers,request_max-1,proxy)
            except Exception as e:
                print("下载遇到了错误,错误代码是==>{}".format(e))
                html = None
                if request_max >=3:
                    html = self.download(url,headers,request_max-1,proxy)
            finally:
                response.release_conn()
            return {'html':html,'code':response.status}
        else:
            # 如果没有选择代理，那就正常请求
            try:
                if response.status == 200:
                    html_file = response.data  # 或者 response.data.decode('utf-8') 如果需要字符串
                    # 在这里处理 htmlfile，比如保存到文件或进行解析等
                    return {'html':html_file,'code':response.status}
                else:
                    print("遇到了错误，状态码是：{}".format(response.status))
                    if request_max >= 0:
                        self.download(url,headers,request_max-1)
            except urllib3.exceptions.HTTPError as e:
                print("遇到了错误，错误代码是==>{}".format(e))
            except Exception as ex:
                print("遇到了错误，错误代码是==>{}".format(ex))
            finally:
                response.release_conn()

# 定义一个scrape_callback类，用于存储解析到的数据
class scrape_callback:
    def __init__(self):
        self.writer = csv.writer(open('D:/Crawl_Results/downloaded_data.csv','w', encoding='utf-8',newline='',errors='replace'))
        self.fields = ('中文名','外文名')
        self.writer.writerow(self.fields)

    def __call__(self,html):
        html_string = html.decode("utf-8")
        root = lhtml.fromstring(html_string)
        result_list = []
        try:
            div_content = root.cssselect("div#content")[0]
            span_info = div_content.cssselect('span[property="v:itemreviewed"]')[0]
            title_text = span_info.text_content().split(" ",1)
            for name in title_text:
                result_list.append(name)
            print("电影的标题是==>{}".format(title_text))
            self.writer.writerow(result_list)
            return result_list
        except IndexError:
            print("未找到指定的元素")
        except Exception as e:
            print(f"处理过程中发生错误: {e}")

# 定义一个类，用于控制延时
class Throttle:
    '''
    用于控制爬虫访问统一域名资源时的延时
    '''
    # 初始化函数
    def __init__(self,delay):
        self.delay = delay
        self.domains = {}
    # 控制延时
    def wait(self,url):
        # 解析url，获取域名
        domain = urlparse(url).netloc
        # 获取上一次访问的时间
        last_accessed = self.domains.get(domain)
        # 如果设置到延时并且已经访问过了
        if self.delay > 0 and last_accessed is not None:
            # 计算从上次访问到当前时间过去的秒数与规定的延迟时长的差值
            sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
            # 判断距离上次访问的时间间隔是否达到了延迟要求
            if sleep_secs > 0:
                print("正在休眠，将等待{}秒后再次连接".format(sleep_secs))
                # 如果时间还没有达到，就调用time.sleep，进行休眠
                time.sleep(sleep_secs)
        # 更新本次访问的时间
        self.domains[domain] = datetime.now()


# 爬取网页的函数
def threaded_crawler(delay,request_max,seed_url,link_regex,max_deepth=5,max_threads=6,scrape_callback=scrape_callback(),cache=MongoCache(),proxies=None):
    # 定义一个列表保存结果
    results = []
    # 定义一个User_agent列表
    user_agent_list = ['BadCrawler','GoodCrawler']
    # 解析网站的robots.txt
    rp = urllib.robotparser.RobotFileParser()
    rp.set_url(f"{seed_url}/robots.txt")
    rp.read()
    # 定义一个用户当前设置的user_agent
    current_user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:128.0) Gecko/20100101 Firefox/128.0'
    # 只有当默认的火狐这个User-agent被禁，再从user_agent_list中找看还有合适的没
    if not rp.can_fetch(current_user_agent,seed_url):
        # 从列表中找一个网站允许的user_agent
        for user_agent in user_agent_list:
            if rp.can_fetch(user_agent,seed_url):
                current_user_agent = user_agent
                break
            else:
                print("该网站的robots.txt禁止我们访问")
    # 从提供的种子url生成一个待解析的url列表
    crawl_url_queue = [seed_url]
    # 定义一个字典，记录链接和深度，用于判断链接是否已经下载，避免在不同页面中反复横跳
    have_crawl_url_queue = {seed_url:0}
    downloader = Downloader(delay=delay,user_agent=current_user_agent,cache=cache,request_max=request_max,proxies=proxies)


    def process_queue():
        while crawl_url_queue:
            try:
                print("当前有带解析的链接共{}条".format(len(crawl_url_queue)))
                # 只要列表中有值，则弹出一个url用于解析
                url = crawl_url_queue.pop()
            except IndexError as index_error:
                break
            else:
                # 读取当前要解析url的深度，如果深度超过最大值，则停止
                deepth = have_crawl_url_queue[url]
                if deepth <= max_deepth:
                    # 执行下载
                    html = downloader(url)
                    if not html == None:
                        # 如果有传入提取数据的回调函数，则调用它
                        if scrape_callback:
                            results.extend(scrape_callback(html) or [])
                        # 从下载到的html网页中递归的获取链接
                        links_from_html = get_links(html)
                        if not links_from_html == None:
                            for link in links_from_html:
                                link = urljoin(seed_url,link)
                                # 判断找到的链接是否符合我们想要的正则表达式
                                if re.match(link_regex,link):
                                    # 如果符合，再判断是否已经下载过了，如果没有下载过，就把它加到待解析的url列表和已下载集合中
                                    if link not in have_crawl_url_queue:
                                        have_crawl_url_queue[link] = deepth + 1
                                        crawl_url_queue.append(link)
    threads = []
    while threads or crawl_url_queue:
        for thread in threads:
            if not thread.is_alive():
                threads.remove(thread)
        while len(threads) < max_threads and crawl_url_queue:
            thread = threading.Thread(target=process_queue)
            thread.setDaemon(True)
            thread.start()
            threads.append(thread)


# 从下载到的html中继续解析连接
def get_links(html):
    webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']',re.IGNORECASE)
    if not html == None:
        html_string = html.decode("utf-8")
        return webpage_regex.findall(html_string)
    else:
        return None

# 测试
seed_url="https://movie.douban.com/top250"
link_regex="^https://(?!music\\.douban\\.com/subject/)movie\\.douban\\.com/subject/(\\d+)/$"

threaded_crawler(5,5,seed_url,link_regex,5)
