#_*_coding:utf-8_*_
from bs4 import BeautifulSoup
import urllib.request
import re
import os
#爬取上海地区当前热映的影片信息
def getmoveids():   #先把ID爬出来放到一个列表里面
    response=urllib.request.urlopen("https://movie.douban.com/cinema/nowplaying/shanghai/");
    buff=response.read();
    html=buff.decode("utf8");
    soup=BeautifulSoup(html)
    pidsoup = BeautifulSoup(str(soup.find(attrs={"id":"nowplaying"})));
    lists=BeautifulSoup(str(pidsoup.ul));   #所有的热映电影内容
    moves=lists.find_all('li',attrs={"class":"list-item"});  #得到所有电影信息的列表
    moveids=[];
    for m in moves:
        moveids.append(m.get("id"))
    return moveids;
def totxt(moveids):
    lists=moveids;
    print("list完成");
    f = open("D:\python\moves.txt", "a+");
    for a in lists:
        msoup =BeautifulSoup(urllib.request.urlopen("https://movie.douban.com/subject/{0}".format(a)).read());
        mcontent=msoup.find(id="content")
        title=mcontent.h1.span.get_text().split(" ")[0];
        year=mcontent.find(attrs={"class":"year"}).get_text()[1:-1];
        moveinfo=msoup.find(id="info");
        actor=moveinfo.find(attrs={"class":"attrs"}).get_text();
        types=moveinfo.findAll(attrs={"property":"v:genre"});
        type="";
        for t in types:type=type.join(t.get_text());
        country=moveinfo.find("span",string=re.compile('制片国家/地区')).next_sibling[1:];
        date=moveinfo.find(attrs={"property":"v:initialReleaseDate"}).get_text();
        if moveinfo.find(attrs={"property":"v:runtime"})==None: time="";
        else : time=moveinfo.find(attrs={"property":"v:runtime"}).get_text();
        if moveinfo.find("a",attrs={"rel":"nofollow"})==None :href="";
        else : href=moveinfo.find("a",attrs={"rel":"nofollow"}).get("href");
        move=title+"/"+year+"/"+actor+"/"+type+"/"+country+"/"+date+"/"+time+"/"+href; #拼接影片信息
        print(move);
        f.writelines(move+"\n");
    #27025637


totxt(getmoveids());
