import requests
import os
from pyquery import PyQuery as pq
import json
import LConf


conf ={
	'domain':'https://www.610ti.com',
	'pageUrlCache':'./urlList.json' 
}

# 从详情页获取视频的下载地址
def getVideoUrl(url):
	r = False
	try:
		r = requests.get(url)
		r.encoding = 'utf-8'
		html = r.text
	except Exception as e:
		if r:
			r.close()
		return False,''
	doc = pq(r.text)
	title = doc('title').text()
	downDom = doc('.downurl a')
	return downDom.html(),title

# 从列表页获取页面的连接池
def getUrlList(url):
	res = []
	try:
		r = requests.get(url)
		r.encoding = 'utf-8'
		html = r.text
	except Exception as e:
		if r:
			r.close
	doc = pq(html)
	conDom = doc('#content')
	npDom = doc('#long-page a.next')
	nPurl = npDom.eq(0).attr.href
	alist = conDom('a.video-pic')
	for aitem in alist.items():
		res.append(aitem.attr.href)
	return res,nPurl

# 获取绝对路径
def absUrl(domain,url):
	if 'http' in url:
		return url
	else:
		return domain+url;
# 爬去视频
def vido(conf):
	inUrl = '/htm/mp4list6/' # 入口
	nPurl = absUrl(conf['domain'],inUrl); # 拼接起始页
	urlList = []
	# 判断缓存
	if os.path.exists(conf['pageUrlCache']):
		urlList = LConf.LConf(conf['pageUrlCache'])
		nPurl = False
	# 遍历下一页并获取此页所以详情页地址
	while nPurl :
		tempList,nPurl = getUrlList(absUrl(conf['domain'],nPurl))
		urlList += tempList
		if nPurl:
			print('当前页 '+nPurl + '总量：'+str(len(urlList)))
	if not os.path.exists(conf['pageUrlCache']): #详情页连接池存到缓存中
		with open(conf['pageUrlCache'],'w') as fp:
			fp.write(json.dumps(urlList))

	f = open('./vidoUrls.txt','a')
	vidoUrls = []
	i = 0
	urlCount = len(urlList)
	for oneUrl in urlList:
		vidoUrl,title = getVideoUrl(absUrl(conf['domain'],oneUrl))
		if not vidoUrl:
			print("此条错误")
			continue
		vidoUrls.append({'title':title,'url':vidoUrl})
		f.write(vidoUrl+'\n')
		print("%s=>%s (%d/%d)"%(title,vidoUrl,i,urlCount))
		i += 1
	f.close()
	with open('./vidoUrls.json','w') as fp:
		fp.write(json.dumps(vidoUrls))



def main():
	



if __name__ == '__main__':
	main(conf)
	