import csv
import time
import requests
from lxml import etree
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Bar
from pyecharts.charts import Map
from pyecharts.charts import Pie


def parse_html():
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:94.0) Gecko/20100101 Firefox/94.0',
               'Cookie': 'bid=Cqkkd_P-tmQ; __utma=30149280.892568344.1638772489.1639286374.1639294471.7; \
    __utmz=30149280.1639282150.5.5.utmcsr=link.csdn.net|utmccn=(referral)|utmcmd=referral|utmcct=/; \
    ll="118318"; __gads=ID=e598d5a2d7f1cf08-22a4537d5ccf0053:T=1638772705:RT=1638772705:S=ALNI_MbibV13XGLC4ptZbrSRCXYBdreEkg; \
    __utmc=30149280; ap_v=0,6.0; __utmb=30149280.5.10.1639294471; dbcl2="251561912:O6jnrIMmaIQ"; ck=pAP3; push_noty_num=0; \
    push_doumail_num=0; __utmv=30149280.25156; __utmt=1'}  # 构造headers

    files = open(r'C:\Users\lyc\Desktop\电影top250信息.csv', 'a+', encoding='utf-8', newline='')  # 创建文件
    cvs_writer = csv.writer(files)  # 操作对象
    cvs_writer.writerow(["电影名字", "链接", "导演", "主演", "时间", "国家", "类型", "评分", "评价", "简介"])  # 写入首行

    for i in range(0, 226, 25):  # 构造url链接
        i = str(i)
        url = 'https://movie.douban.com/top250?start=' + i + '&filter='
        res = requests.get(url, headers=headers)  # 发送请求包
        res.encoding = 'utf-8'  # 设置编码
        stats = res.status_code  # 状态码
        text = res.text  # 转为源码
        if stats == 200:  # 如果抓取到数据包，则：
            text = etree.HTML(text)  # 用于xpath解析
            n = 1
            while n <= 25:  # 从上到下遍历网页结构
                n = str(n)
                name = text.xpath(
                    '/html/body/div[3]/div[1]/div/div[1]/ol/li[' + n + ']/div/div[2]/div[1]/a/span[1]/text()')  # 解析网页结构
                line = text.xpath(
                    '/html/body/div[3]/div[1]/div/div[1]/ol/li[' + n + ']/div/div[2]/div[1]/a/@href')  # 提取出播放链接
                daoyan = text.xpath(
                    '/html/body/div[3]/div[1]/div/div[1]/ol/li[' + n + ']/div/div[2]/div[2]/p[1]/text()')
                pingfen = text.xpath(
                    '/html/body/div[3]/div[1]/div/div[1]/ol/li[' + n + ']/div/div[2]/div[2]/div/span[2]/text()')
                pingjia = text.xpath(
                    '/html/body/div[3]/div[1]/div/div[1]/ol/li[' + n + ']/div/div[2]/div[2]/div/span[4]/text()')
                jianjie = text.xpath(
                    '/html/body/div[3]/div[1]/div/div[1]/ol/li[' + n + ']/div/div[2]/div[2]/p[2]/span/text()')
                img = text.xpath(
                    '/html/body/div[3]/div[1]/div/div[1]/ol/li[' + n + ']/div/div[1]/a/img/@src')  # 提取出图片链接

                name = ''.join(name)  # 格式化字符串
                daoyan = ''.join(daoyan).replace('\xa0', '').split("\n")  # 格式化daoyan内容且格式化
                guojia = daoyan[2].split('/')[1].split(' ')[0]  # 取出国家内容且格式
                if "中国大陆" in guojia or "香港" in guojia or "台湾" in guojia:
                    guojia = '中国'
                zhuyan = daoyan[1].find("主演:")  # 查找主演下标起始位置
                ryuan = daoyan[1]  # 取出人员内容
                zhuyan_len = len(daoyan[1])  # 主演下标结束长度
                zhuyan2 = ryuan[zhuyan:zhuyan_len]  # 取出主演信息
                daoyan2 = ryuan[0:zhuyan].strip()  # 取出导演信息且去除行首空格
                year = daoyan[2].strip()[0:4]  # 截取上映时间
                leix = daoyan[2].split("/")[2]  # 以/作为分割取下标为2的元素（提取出类型）
                line = ''.join(line)  # 转为字符串，同下
                pingfen = ''.join(pingfen)
                pingjia = ''.join(pingjia)
                jianjie = ''.join(jianjie)
                img_url = ''.join(img)  # 提取出图片链接
                xiazai = requests.get(img_url)  # 请求图片
                img_get = xiazai.content  # 返回的图片为二进制
                with open(r'img\img' + str(i) + str(n) + '.jpg', 'wb') as img:  # 保存文件格式为：
                    img.write(img_get)  # 保存图片文件
                cvs_writer.writerow(
                    [name, line, daoyan2, zhuyan2, year, guojia, leix, pingfen, pingjia, jianjie])  # 写入文件
                n = int(n) + 1  # 循化+1
                print('正在爬取数据:{}'.format(name))
        else:
            print('请求失败，状态码{}'.format(stats))
        time.sleep(2)  # 防禁封ip
    files.close()  # 关闭文件

    return 0


def shujukeshihua(guojia):
    guojia.sort_values(ascending=False, inplace=True)  # ascending=True表示数据从小到大
    bar0 = (
        Bar()
            .add_xaxis(list(guojia.index))  # 添加x轴数据
            .add_yaxis("数量", guojia.values.tolist())  # y轴数据
            .set_global_opts(
            title_opts=opts.TitleOpts(title="各国电影数量排名"),  # 表名字
            yaxis_opts=opts.AxisOpts(name="数量"),  # x轴名字
            xaxis_opts=opts.AxisOpts(name="国家"),  # y轴名字
            # datazoom_opts=[opts.DataZoomOpts(),opts.DataZoomOpts(type_="inside")]
        )
    )
    bar0.render(r'C:\Users\lyc\Desktop\电影top250柱状图.html')


def meiguitu(shijian):
    shijian.sort_values(ascending=False, inplace=True)
    bar1 = (
        Pie()
            .add(
            '',
            [list(z) for z in zip(shijian.index, shijian.values.tolist())],
            radius=["20%", "60%"],  # 设置饼图的半径大小（内圈半径，外圈半径）
            center=["50%", "60%"],  # 饼图靠左还是靠右，靠上还是靠下
            rosetype="radius",
            label_opts=opts.LabelOpts(is_show=False),
        )
            .set_series_opts(label_opts=opts.LabelOpts(formatter='{b}: {c}'))
            .render(r'C:\Users\lyc\Desktop\电影每年上映时间玫瑰图.html')
    )


def dinggeermeiguitu(guojia):
    provinces = guojia.index
    num = guojia.values.tolist()
    c1 = (
        Pie()
            .add('', [list(z) for z in zip(provinces, num)],
                 radius=["40%", "80%"],
                 rosetype="area"
                 )
            .set_global_opts(title_opts=opts.TitleOpts(title="各国家电影图"),
                             legend_opts=opts.LegendOpts(is_show=False),
                             toolbox_opts=opts.ToolboxOpts())
            .set_series_opts(label_opts=opts.LabelOpts(is_show=True), position="inside", font_size=12,
                             formatter='{b}: {c}', font_style="italic",
                             font_weight='bold', font_family="Microsoft YaHei")
    )
    c1.render(r'C:\Users\lyc\Desktop\丁格尔玫瑰图.html')
    print("程序执行结束")


if __name__ == '__main__':  # 执行入口
    parse_html()  # 调用parse_html函数

    df = pd.read_csv(r'C:\Users\lyc\Desktop\电影top250信息.csv')  # 打开数据文件
    guojia = df['国家'].value_counts()  # 按照国家名字进行统计计算
    guojia.columns = ['国家', '数量']  # 两列
    guojia = guojia.sort_index()  # 赋值参数

    shijian = df['时间'].value_counts()  # 按照上映时间进行统计计算
    shijian.columns = ['时间', '数量']  # 两列
    shijian = shijian.sort_index()

    shujukeshihua(guojia)  # 传参，同下
    meiguitu(shijian)
    dinggeermeiguitu(guojia)
