# from urllib import request                                  # 获得排名前十的男星和女星图片
# from lxml import etree
# import requests
# import sys
# import time
#
# url = 'http://www.yue365.com/mingxing/list/neidinv/'
#
#
# # --------------------------------------------------------------------------------用于显示进度条------------------------------
# def Schedule(a, b, c):
#     '''
#     a:已经下载的数据块
#     b:数据块的大小
#     c:远程文件的大小
#    '''
#     # print(a , b, c)
#
#     time.sleep(0.4)
#     par = 100.0 * a * b / c
#     if par > 100:
#         par = 100
#     per = 100 - int(par)
#
#     sys.stdout.write('[' + '*' * int(par // 2) + '-' * (per // 2) + ']' + str(int(par)) + '%' + '\r')  # 这里这个\r是关键
#     # sys.stdout.flush()                      #刷新缓存
#
#
# # -----------------------------------------------------------------------------------爬虫正文---------------------------
#
# ht = request.urlopen(url).read().decode('utf-8')
# html = etree.HTML(ht)
#
# p_urls = html.xpath('//li[@class="show dis-10"]/div[1]')
# t = 0
# for l in p_urls:
#     t += 1
#     p_name = l.xpath('a/@title')
#     img_url = l.xpath('a/img/@src')
#     path = 'H:\\新建文件夹\\明星\\'
#     f_name = path + str(t) + p_name[0] + '.jpg'
#     try:
#         #         request.urlretrieve(img_url[0],f_name)
#         print("第{}张图片正在下载！".format(t))
#         request.urlretrieve(img_url[0], f_name, Schedule)
#         print("\n")
#     except:
#         print("error")
#
# ***********************************************************************************************************************
#
# ***********************************************************************************************************************

from urllib import request  # 获取女明星人气排行榜txt文件
from lxml import etree

url = 'http://www.yue365.com/mingxing/list/neidinv/index_2.shtml'


# ----------------------------------------------------------------------------------
def allpage():  # 获得所有网页
    all_url = ['http://www.yue365.com/mingxing/list/neidinv/']
    for i in range(2, 12):
        each_url = url.replace(url[-7], str(i))  # 替换
        all_url.append(each_url)
    return (all_url)  # 返回地址列表


# ----------------------------------------------------------------------------------

if __name__ == '__main__':
    urls = allpage()
    for url in urls:
        ht = request.urlopen(url).read().decode('utf-8')
        html = etree.HTML(ht)
        p_urls = html.xpath('//li[@class="mx_xx"]')
        for l in p_urls:
            p_number = l.xpath('span/text()')
            p_name = l.xpath('dt/a/text()')
            p_news = l.xpath('div/text()')
            p_r = l.xpath('i/text()')
            path = 'H:\\新建文件夹\\明星榜.txt'
            with open(path, 'a', encoding='utf-8') as f:
                f.write("No: " + p_number[0] + '\n')
                f.write(p_name[0] + '\n')
                f.write(p_news[0] + '\n')
                f.write(p_r[0] + '\n')
                f.write('\n' * 3)
