from email.policy import strict
import requests
from lxml import etree
import os
import stat
import urllib.request
import sys

fileDownList = [
    {'url': "https://www.ntdtv.com/gb/prog40", 'save': "D:/细语人生/1"},
    {'url': "https://www.ntdtv.com/gb/prog40/2", 'save': "D:/细语人生/2"},
    {'url': "https://www.ntdtv.com/gb/prog40/3", 'save': "D:/细语人生/3"},
    {'url': "https://www.ntdtv.com/gb/prog40/4", 'save': "D:/细语人生/4"},
    {'url': "https://www.ntdtv.com/gb/prog40/5", 'save': "D:/细语人生/5"},
    {'url': "https://www.ntdtv.com/gb/prog40/6", 'save': "D:/细语人生/6"},
    {'url': "https://www.ntdtv.com/gb/prog40/7", 'save': "D:/细语人生/7"},
    {'url': "https://www.ntdtv.com/gb/prog40/8", 'save': "D:/细语人生/8"},
    {'url': "https://www.ntdtv.com/gb/prog40/9", 'save': "D:/细语人生/9"},
]

# print('请选择：')

# print('请输入:'+str(index)+'----'+fileDownList[index]['save'])
# num = int(input('请输入0-'+str(len(fileDownList)-1)+':进行下载'))
print('启动自动下载程序开始下载')

# print(num)
num = 0


def progressbar(cur, total=100):
    percent = '{:.2%}'.format(cur / total)
    sys.stdout.write('\r')
    # sys.stdout.write("[%-50s] %s" % ('=' * int(math.floor(cur * 50 / total)),percent))
    sys.stdout.write("[%-100s] %s" % ('=' * int(cur), percent))
    sys.stdout.flush()


def schedule(blocknum, blocksize, totalsize):
    if totalsize == 0:
        percent = 0
    else:
        percent = blocknum * blocksize / totalsize
    if percent > 1.0:
        percent = 1.0
    percent = percent * 100
    print("download : %.2f%%" % (percent))
    progressbar(percent)


class New():
    def __init__(self):
        self.url = ''
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
        }
        # self.getList(self.url)

    # 获取作者
    def getAuthor(self, url):
        self.url = fileDownList[num]['url']
        res = self.commonGet(url)
        resWrapList = res.xpath('//div[@class="post_list"]')[0]
        print('resWrapList')
        print(resWrapList)
        Note = open('1.txt',mode='a')
        Note.write(str(num+1)+'\n') #\n 换行符
        Note.close()
        resWrap = resWrapList.xpath('//div[@class="list_wrapper"]')[0]
        items = resWrap.xpath(
            '//div[@class="one_post"]')
        for item in items:
            title = item.xpath('.//div[@class="title"]//a/text()')[0].strip()
            link = item.xpath('.//div[@class="title"]//a/@href')[0]
            # print(title, link)
            self.getList(link, title)

    def getList(self, url, title):
        link = url
        response = self.commonGet(url)
        url = response.xpath(
            '//div[@class="download_link"]//a')
        length = len(url)-1
        if(length > -1):
            urls = url[len(url)-1].xpath('..//@href')[0]
            print(urls)
            # self.downFile(title, urls)
        else:
            print('下载失败')
            print(link)
            Note=open('1.txt',mode='a')
            Note.write(link+'\n') #\n 换行符
            Note.close()
    def downFile(self, title, url):
        print(fileDownList[num]['save'])
        file_path = fileDownList[num]['save']
        # 是否有这个路径
        if not os.path.exists(file_path):
            # 创建路径
            os.makedirs(file_path)
        file_suffix = os.path.splitext(url)[1]
        filename = '{}{}{}{}'.format(file_path, os.sep, title, file_suffix)
        # print('文件名：'+filename)
        # print('下载地址：'+url)
        if not os.path.exists(title):
            try:
                opener = urllib.request.build_opener()
                opener.addheaders = [
                    ('User-Agent', self.headers['User-Agent'])]
                urllib.request.install_opener(opener)
                urllib.request.urlretrieve(url, filename, schedule)
                print('下载完成_'+filename)
            except Exception as e:
                print('异常')
                print(e)

    def commonGet(self, url):
        res = requests.get(url, headers=self.headers)
        res.encoding = "utf8"
        response = etree.HTML(res.text)
        return response


for idx in range(len(fileDownList)):
    num = idx
    url = fileDownList[idx]['url']+'?page=0'
    new = New()
    new.getAuthor(url)
    # new.getList('https://www.ntdtv.com/gb/2022/06/10/a103451750.html','fsdafsa')
