from bs4 import BeautifulSoup
import requests,csv
import time

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3427.400 QQBrowser/9.6.12088.400',
    'Cookie':'ll="118281"; bid=CSp-Ve2rzOc; __yadk_uid=ZVc0rqOHmDqqYv2FX2Ik4JmJ3eCYKe3I; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1506842413%2C%22http%3A%2F%2Fwww.id97.com%2Fmovie%2F622180.html%22%5D; _vwo_uuid_v2=96EC092C1B182B4B00DB45BF1EC89883|ec8e1552a495de83474d974ed79e3273; _pk_id.100001.4cf6=cb126c9d0a66afb9.1498400792.15.1506842421.1504422658.; _pk_ses.100001.4cf6=*; __utma=30149280.1741386291.1498400792.1504422294.1506842413.15; __utmb=30149280.0.10.1506842413; __utmc=30149280; __utmz=30149280.1504422294.14.13.utmcsr=id97.com|utmccn=(referral)|utmcmd=referral|utmcct=/movie/622180.html; __utmv=30149280.10282; __utma=223695111.1748773891.1498400792.1504422294.1506842413.15; __utmb=223695111.0.10.1506842413; __utmc=223695111; __utmz=223695111.1504422294.14.13.utmcsr=id97.com|utmccn=(referral)|utmcmd=referral|utmcct=/movie/622180.html'
}

url = 'https://movie.douban.com/top250?start={}&filter='

def makesoup(url):
    wb_data = requests.get(url,headers=headers)
    soup = BeautifulSoup(wb_data.text,'lxml')

   # print(type(movie_list))
    return soup

def listdetail(url):
    try:
        info2 =[]
        soup = makesoup(url)
        movie_list = soup.find(class_='grid_view').find_all('li')
        for movie in movie_list:
            info = []
            title = movie.find(class_='title').get_text()  #电影名称
            #title2 =movie.find(class_='other').get_text()
            id = movie.a['href'].split('/')[4]
            link = movie.a['href']
            score = movie.find(class_='rating_num').get_text()
            info.extend([id,title,score,link])
            info2.append(info)
            moviedetail(link)

        #       print(title + "|" + score + "|" + link)
        #    print(info)
        with open('C:\pro\movie\movie250.csv', 'a',newline='') as csvfile1:
            movieInfo = csv.writer(csvfile1,dialect=("excel"))

            movieInfo.writerows(info2)
    except:
        pass

def moviedetail(url):
    try:
        info2 = []
        soup = makesoup(url)

        info = []
        id = url.split('/')[4]
        rank = soup.find(class_='top250-no').get_text()
        name = soup.find(property='v:itemreviewed').get_text()
        year = soup.find(class_='year').get_text()
        staff = soup.find_all(class_='attrs')
        director = staff[0].get_text()
        screenwriter = staff[1].get_text()
        actor = staff[2].get_text()
        typelist = []
        for type in soup.find_all(property='v:genre'):
            types = type.get_text()
            typelist.append(types)
        #area = soup.select('#info > span:nth-of-type')
        score = soup.find(class_='ll rating_num').getText()
        info.extend([id, rank, name, year, score, director,screenwriter,actor, url])
        info2.append(info)

        print(info2)
        with open('C:\pro\movie\movie250edtail.csv', 'a',newline='') as csvfile2:
           movieInfodetail = csv.writer(csvfile2,dialect=("excel"))
           movieInfodetail.writerows(info2)
    except:
        pass

def get_page():
    with open('C:\pro\movie\movie250.csv', 'a', newline='') as csvfile1:
        movieInfo = csv.writer(csvfile1, dialect=("excel"))
        movieInfo.writerow(["ID","电影名称", "电影评分", "链接"])
    with open('C:\pro\movie\movie250edtail.csv', 'a', newline='') as csvfile2:
        movieInfodtail = csv.writer(csvfile2, dialect=("excel"))
        movieInfodtail.writerow(["ID", "排名", "电影名称", "上映年份", "电影评分", "导演", "编剧", "主演", "链接"])
    for page in range(0,250,25):
        listdetail(url.format(page))

        time.sleep(2)



get_page()