# -*- coding: utf-8 -*-
"""
Created on Sat Dec 17 13:47:27 2022

@author: DELL
"""

import requests
from bs4 import BeautifulSoup
import threading
import time
class Spider:
    filename = 1        #文件名
    url_list = []       #url队列，广度优先爬取
    current_pages = 0   #当前保存的页面总数
    url_set = set()     #已爬取的url集合，防止重复
    result_list = []    #爬取得到的结果暂存池
    poll = []           #线程池，多个异步线程下载网页
    threadLock = threading.Lock()   #互斥锁
    #种子url，爬取网页总数
    def __init__(self, url, number):
        self.url = url
        self.number = number

    #下载网页，返回网页内容
    def download_content(self, url):
        #请求头部，反京东的反爬
        kv = {
            "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
            "cookie":"***"
        }
        
        response = requests.get(url, headers = kv, timeout = 100)


        #判断返回值是否为200，若不是200抛出异常
        response.raise_for_status()
        #指定编码方式为当前网站的编码方式
        response.encoding = response.apparent_encoding
        return response.text
    
    
    # 将字符串内容保存到文件中
    # 第一个参数为所要保存的文件名，第二个参数为要保存的字符串内容的变量
    def save_to_file(self, filename, content):
        with open(filename, mode="w", encoding="utf-8") as f:
            f.write(content)
    
    # 输入参数为要分析的 html 文件名，返回值为对应的 BeautifulSoup 对象
    def create_doc_from_filename(self, filename):
        with open(filename, "r", encoding='utf-8') as f:
            html_content = f.read()
            doc = BeautifulSoup(html_content,'lxml')
            return doc
        
    #从网页中解析出url
    def parse(self, doc):
        a_list = doc.find_all("a")
        for a in a_list:
            if 'href' in a.attrs.keys():
                a_url = a.attrs['href']
                #判断该url是否为有效网址
                if 'jd.com' in a_url and a_url not in self.url_set:
                    self.url_list.append(a_url)
                    self.url_set.add(a_url)
                    
    #读取指定网页，将该网页中的有效url加入到url_list中
    def url_add(self, filename):
        doc = self.create_doc_from_filename(filename)
        self.parse(doc)
    
    #下载网页内容
    def download_thread(self, url):
        try:
            result = self.download_content(url)
            self.threadLock.acquire()
            self.result_list.append(result)
            self.threadLock.release()
        except:
            self.threadLock.acquire()
            print("fail")
            self.url_set.remove(url)
            self.threadLock.release()
    #开始爬取
    def run(self):
        result = self.download_content(self.url)
        filename = "./jd_data/" + str(self.filename)+'.html'
        self.filename += 1
        self.save_to_file(filename, result)
        self.current_pages += 1
        self.url_add(filename)

        #广度优先遍历
        while(self.url_list or self.result_list):
            if self.url_list:
                url = self.url_list.pop(0)
                #创建线程，多线程异步访问url，防止网站卡死
                self.poll.append(threading.Thread(target = self.download_thread, args=(url,)))
                self.poll[-1].start()
            
            self.threadLock.acquire()
            if self.result_list:
                #若结果残存池有元素，写文件
                filename = "./jd_data/" + str(self.filename)+'.html'
                self.filename += 1
                self.save_to_file(filename, self.result_list.pop(0))
                self.current_pages += 1
                
                if self.current_pages >= self.number:
                    self.threadLock.release()
                    break

                self.url_add(filename)
                self.threadLock.release()
            elif not self.url_list:
                #如果url列表和结果暂存池都为空，等待网页下载
                self.threadLock.release()
                time.sleep(5)
            else:
                #结果暂存池为空，网页未下载完，继续访问下一个url
                self.threadLock.release()
        print("finish")        
    

if __name__ == '__main__':
    spider = Spider("https://www.jd.com", 15000)
    spider.run()
