﻿# ! /usr/bin/nev python
# -*-coding:utf-8*-
#author:Big Big
import requests
from urllib import request
import re
import bs4
import time
import pandas as pd
import random

#遍历20个网页，每个网页有25部影片
#片名；评分；评价人数；类型；国籍；年份;片长
name = []
mark = []
people = []
filmtype = []
country = []
year = []
filmtime = []
ip = []
with open('ips.txt') as fp:
    for p in fp.readlines():
        p = p.strip()
        ip.append(p)
proxies={
    'http': 'http://'+random.choice(ip)
}
for i in range(100,250,25):
    url = 'https://movie.douban.com/top250?start='+str(i)+'&filter='
    print(url)
    head = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.134 Safari/537.36 Edg/103.0.1264.71',
            'Cookie':'ll="118178"; bid=ZfAMqjLx470; push_doumail_num=0; push_noty_num=0; __utmv=30149280.25396; __yadk_uid=5sdwyxu208BS1W0v7n9e3CD1KmeYug9f; __gads=ID=c0fe1e399a7f1378-2219ecdd55d5003e:T=1658720980:RT=1658720980:S=ALNI_Mawnu2qsPkOg-VhglEQ7aTjGwsdXg; Hm_lvt_16a14f3002af32bf3a75dfe352478639=1658721143; _vwo_uuid_v2=DA94A349AF08840B8919DC944B0D769B4|72c301fa7aa9f01a6400041cf5654bc6; _vwo_uuid_v2=DA94A349AF08840B8919DC944B0D769B4|72c301fa7aa9f01a6400041cf5654bc6; douban-fav-remind=1; ct=y; __utmc=30149280; __utmc=223695111; __gpi=UID=0000080942b3f21a:T=1658720980:RT=1658897324:S=ALNI_MaVJcXAUeuvzLGdDkd1eyLIF8vuBg; dbcl2="253965069:pqEdRW4JzZw"; ck=w6QE; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1658909259%2C%22https%3A%2F%2Faccounts.douban.com%2F%22%5D; _pk_ses.100001.4cf6=*; __utma=30149280.722721788.1658221145.1658906130.1658909259.19; __utmb=30149280.0.10.1658909259; __utmz=30149280.1658909259.19.8.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utma=223695111.231957855.1658221145.1658906130.1658909259.18; __utmb=223695111.0.10.1658909259; __utmz=223695111.1658909259.18.7.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; _pk_id.100001.4cf6=d158a93a4caf2e9b.1658221145.17.1658909280.1658906135.',
            'Host':'movie.douban.com',
            'Referer':'https://movie.douban.com/subject/1292052/'
            }
    resp = requests.get(url=url, proxies=proxies,headers=head,timeout=(3,3))
    resp.encoding = 'utf-8'
    data = resp.text
    data_bs = bs4.BeautifulSoup(data,'html.parser')
    #找到所有的a标签
    films_web = data_bs.find_all('a')
    #存放每部电影链接的列表
    films = []
    #正则表达式筛选a标签中的电影链接
    flag = 1                                     #爬取到两个一样的电影网址，控制只将一个放入列表
    for f in films_web:
        text = f.get('href')
        add = re.findall('[a-zA-z]+://(.+subject/)(\d+)/',text)
        if add:
            #控制一次进，一次不进
            if flag == 1:
                films.append(text)
                flag = -flag
            else:
                flag = -flag
    #进入每部影片
    #把每部电影的片名；评分；评价人数；类型；国籍；年份储存
    for film in films:
        resp1 = requests.get(film,headers = head)
        resp1.encoding = 'utf-8'
        data1 = resp1.text
        data_bs1 = bs4.BeautifulSoup(data1, 'html.parser')
        #获取片名
        name_base = data_bs1.find('meta', property='og:title')
        film_name = name_base.get('content')
        #正则表达式匹配中文名
        film_name_chinese = re.findall('[\u4e00-\u9fa5]+',film_name)
        name.append(film_name_chinese[0])
        #获取评分
        mark_base = data_bs1.find('strong',class_='rating_num')
        #正则表达式匹配评分
        film_mark = re.findall('\d+.\d',str(mark_base))
        film_mark_str = str(film_mark)
        #转成float形式存储
        mark.append(float(film_mark_str[2:5]))
        #获取评价人数
        people_base = data_bs1.find_all('span',property='v:votes')
        film_people = re.findall('\d+',str(people_base))
        film_people_str = str(film_people)
        people.append(int(film_people_str[2:-2]))
        #获取类型
        type_base = data_bs1.find_all('span',property='v:genre')
        film_type = re.findall('[\u4e00-\u9fa5]+',str(type_base))
        filmtype.append(film_type)
        #获取国家
        country_base = data_bs1.find_all('div',id='info')
        country_1 = re.findall('(制片国家/地区:</span> )+(.*)',str(country_base))
        country_2 = str(country_1)
        #切片得到国家
        country.append(country_2[23:-8])
        #获取年份
        year_base = data_bs1.find('span',property='v:initialReleaseDate')
        film_year = str(year_base.get('content'))[0:10]
        year.append(film_year)
        #获取时长
        time_base = data_bs1.find('span',property='v:runtime')
        filmtime.append(int(time_base.get('content')))
        time.sleep(2)
        print(str(film),'succeed')
    print(i,'succeed')

frame = pd.DataFrame({'片名':name,'评分':mark,'人数':people,'类型':filmtype,'国籍':country,'年份':year,'片长':filmtime})
frame.to_csv("ex2.csv",index=False,sep=',')
print('data_get success!')










