#https://www.domp4.com/list/6-1.html 爬取电影网站信息保存到文本
import requests
import re
from bs4 import BeautifulSoup
from urllib.parse import urlparse,parse_qs
import os

headers = {
    'Connection':'close',
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
}

#获取网站的源码
def get_url_content(url): 
    response=requests.get(url, headers)
    #增加一行，编译转码信息, 防止乱码
    response.encoding =" utf-8 " 

    if response.status_code==200:
        return response.text
    else:
        return False

#解析html内容
def parse_Web_Content(content):
    Object=BeautifulSoup(content,'html.parser')

    filmName=get_film_name(Object)
    filmCast=get_film_cast(Object)
    filmIntro=get_film_introduction(Object)
    filmUrl=get_film_url(Object)

    film=[]
    for i in range(len(filmName)):
        indiv={
            'fileName':filmName[i],
            'filmCast':filmCast[i],
            'filmIntro':filmIntro[i],
            'filmurl':'https://www.domp4.com'+filmUrl[i]
        }
        film.append(indiv)
    return film

#获取电影名称
def get_film_name(Soup):
    Name=Soup.select(".text_info")
    name_list=[]
    for i in range(len(Name)):
        parsedName=Name[i].a.string
        name_list.append(parsedName)
    return name_list

#解析电影地区
def get_film_cast(Soup):
    Cast=Soup.find_all('span',attrs={'class':'area'})
    film_Cast = []
    for i in range(len(Cast)):
        parsedCast=Cast[i].text
        film_Cast.append(parsedCast)
    return film_Cast

#解析电影简介
def get_film_introduction(Soup):
    Introduction=Soup.find_all('p',attrs={'class':'info'})
    intro_list=[]
    for i in range(len(Introduction)):
        parsedIntro=Introduction[i].text
        intro_list.append(parsedIntro)
    return intro_list

#解析电影链接
def get_film_url(Soup):

    filmUrl=Soup.select("#list_all > ul > li")
    # print(str(filmUrl))
    Url_list=[]
    for i in range(len(filmUrl)):
        href=filmUrl[i].a['href']
        Url_list.append(href)
    return Url_list

#保存到文件
def writeTofile(parsedWebcontent):
    with open('film.json','a',encoding='utf-8') as f:
        for i in range(len(parsedWebcontent)):
            f.write('{"名称":"' + parsedWebcontent[i]['fileName']+'", ')
            f.write('"地区":"' + parsedWebcontent[i]['filmCast'] + '", ')
            f.write('"简介":"' + parsedWebcontent[i]['filmIntro'] + '", ')
            f.write('"链接":"' + parsedWebcontent[i]['filmurl'] + '"}, ')
            f.write('\n')
        f.close()



if __name__ == '__main__':
    link="https://www.domp4.com/list/6-"
    for i in range(1,5):
        url=link + str(i) + ".html"
        webContent=get_url_content(url)

        if webContent!=False:
            Content=parse_Web_Content(webContent)
            # print(Content)
            writeTofile(Content)