"""
pip 下载源
清华：https://pypi.tuna.tsinghua.edu.cn/simple
阿里云：http://mirrors.aliyun.com/pypi/simple/
豆瓣：http://pypi.douban.com/simple/

"""

import requests,os,re
from lxml.html import etree

source_dic={'1':'https://pypi.tuna.tsinghua.edu.cn/simple',
            '2':'http://mirrors.aliyun.com/pypi/simple/',
            '3':'http://pypi.douban.com/simple/'}
header={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.17 Safari/537.36'}

class Pip_downs:
    def check_file_lines(self,filepath):#检查行数
        count = 0
        for index,line in enumerate(open(filepath, 'r')):
            count += 1
            print(index,line)
        print('count',count)

    def write_page(self,file_page,a):#写入文件
        if not os.path.exists(file_page):
            with open(file_page,'w',encoding='utf-8') as f:
                f.write(a)

    def read_page(self,file_page):#读取结果
        with open(file_page,'r') as f:
            res=f.read()
        return res

    def get_cons(self,flag):#先把html写入文件,输出文件列表
        file_s=''#初始化字符串
        url=''#初始化链接
        if not os.path.exists('res/'):
            os.mkdir('res/')
        if flag=='1':
            file_s='page_tsing.txt'
            url=source_dic['1']
        elif flag=='2':
            file_s='page_ali.txt'
            url=source_dic['2']
        elif flag=='3':
            file_s='page_douban.txt'
            url=source_dic['3']
        file_page='res/'+file_s
        res=requests.get(url,headers=header).text
        self.write_page(file_page,res)#写入文件
        res=self.read_page(file_page)#读取文件
        html=etree.HTML(res)
        lis=html.xpath('/html/body/a/text()')
        print('len_page',len(lis))
        return lis

    def get_files_url(self,flag,name):
        base_url=''
        if flag=='1':
            base_url=source_dic['1']
        elif flag=='2':
            base_url=source_dic['2']
        elif flag=='3':
            base_url=source_dic['3']
        full_url=base_url+'/'+name
        new_res=requests.get(full_url).content.decode('utf-8')
        # print(new_res)
        html=etree.HTML(new_res)
        file_names=html.xpath('//body/a/text()')
        file_urls=html.xpath('//body/a/@href')
        print(len(file_names))
        files_dic={}
        for name,url in zip(file_names,file_urls):
            files_dic[name]=base_url[:-7]+url[5:]
        print(files_dic)
        return files_dic

    def down_file(self,flag,name,name_s,path):
        files_dic=self.get_files_url(flag,name)
        url=files_dic[name_s]
        fi=requests.get(url).content
        with open(path+'/'+name_s,'wb') as f:
            f.write(fi)
        print('{}已下载完成'.format(name))
