from urllib.request import Request,urlopen
import re

headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36"}  #找到用户代理，冲破爬取阻拦
url="https://read.douban.com/provider/all"
req=Request(url=url,headers=headers) # 突破反爬，将ulr添加机制变成请求
douban = urlopen(req).read().decode('utf-8')   # 这个地址爬取被拦截了
print(f"log100017:{douban}")

# .*表示div的class属性中全部内容，即第一个<div class="name">到最后一个</div>之间的全部内容，
# 加一个？限制直接去取每一个<div class="name"></div>中的内容
book_name_pat = '<div class="name">(.*?)</div>'
book_count_pat = '<div class="works-num">(.*?)</div>'
book_name =  re.compile(book_name_pat).findall(str(douban))
book_count = re.compile(book_count_pat).findall(str(douban))
print(f"log100018:{book_name},{len(book_name)}")
print(f"log100019:{book_count} {len(book_count)}")
book_merage=[]
for i in range(len(book_name)):
    tem_string=book_name[i]+":"+book_count[i]
    book_merage.append(tem_string)
print(f"log100020:{book_merage} {len(book_merage)}")

storge_path= "../files/txt/爬取出版社.txt"
fh = open(storge_path,"a",encoding="utf-8")
for i in range(len(book_merage)):
    fh.write(book_merage[i]+"\n")

print(f"log100021:爬取内容存储在{storge_path}完成")
