#!/usr/bin/env python3
# -*- coding: utf-8 -*-

'''
获取豆瓣所有标签
遍历标签,获取书籍编号
保存subject_id 到 文件中
'''

from lxml import etree
import requests
import re
import random
import time

HOST = 'https://book.douban.com'

def getTags():
    url = HOST + '/tag/?view=type&icn=index-sorttags-all'
    res = requests.get(url)
    html = etree.HTML(res.text)
    result = html.xpath('//*[@id="content"]/div/div/div/div/table/tbody/tr/td/a')
    tags = []
    for i in result:
        tags.append(i.text)
    return tags

def getBookList(url):
    res = requests.get(url)
    html = etree.HTML(res.text)
    result = html.xpath('//*[@id="subject_list"]/ul/li/div/h2/a/@href')
    return result

done_tag = []
def init():
	global done_tag
	try:
		with open('done_tag.txt','r', encoding='utf-8') as f:
			line = f.readline()
			while line:
				done_tag.append(line.replace('\n',''))
				line = f.readline()
	except:
		print('read done_tag  error')

if __name__ == '__main__':

	init()
	path = 'ids.txt'
	ids = []
	count = 0

	done_tags = []
	# 获取所有标签
	tags = getTags();
	# 遍历标签
	for tag in tags:
		done_tags.append(tag)
		# 仅获取前20页数据
		for i in range(0, 20):
			ss = tag + str(i + 1)
			if ss in done_tag:
				continue

			print('Tags: %d/%d ---- Tag: %s(%d/20)' % (len(done_tags),len(tags),tag,i+1))
			tag_url = 'https://book.douban.com/tag/' + tag + '?start=' + str(i * 20) + '&type=T'
			url_list = getBookList(tag_url)
			# 没有数据提前结束循环
			if len(url_list) == 0:
				break
			for url in url_list:
				# 提取书籍ID
				id = re.sub("\D", "", url)
				ids.append(id)
			with open(path,'a+', encoding='utf-8') as f:
				while len(ids) > 0:
					count += 1
					f.write(ids.pop() + '\n')
			with open('done_tag.txt','a+', encoding='utf-8') as f:
				f.write(ss + '\n')
			done_tag.append(ss)
			# 随机延迟 3-8 秒
			time.sleep(random.randint(5, 8))