# 爬虫程序爬取电影和电影院信息
# 猫眼电影 https://maoyan.com/
# 搜狗电影票http://tuan.sogou.com/

# 影院信息: 名字，地址，特征标签，电话,图片
# 影院场次信息：电影id，电影院id,开始时间，结束时间，影厅
# 电影信息：电影名，上映时间，主演，介绍，电影信息（时常，导演），电影类型,评分，封面，电影图片

# https://maoyan.com/films/0-343144
#from movie_app import models
import random
import datetime
import requests
from bs4 import BeautifulSoup
import  time
import sys
import pymysql
from movie_app import models

# 电影信息：电影名，上映时间，主演，介绍，电影信息（时常，导演），电影类型
def movie_spider(count):
    url = "https://maoyan.com/films/" + str(count)
    print(url)
    header = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh-HK;q=0.9,zh;q=0.8,en-TT;q=0.7,en;q=0.6",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Host": "maoyan.com",
        "Pragma": "no-cache",
        "sec-ch-ua": '"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"',
        "sec-   ch-ua-mobile": "?0",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "none",
        "Sec-Fetch-User": "?1",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.19 Safari/537.36"
    }

    user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
    headers = {
        'User-Agent' : user_agent,
        'Cookie': '__mta=208959789.1585106920033.1593509077842.1593509107607.47; _lxsdk_cuid=1710fbc224bc8-0048503dcb84eb-f313f6d-1a298c-1710fbc224cc8; mojo-uuid=bc73035186bc203e1e0a1a9d69cf0c8f; uuid_n_v=v1; uuid=010A4750BAB111EA977B252D9527D646FCA82B59C6B54FB3934C361D719643F2; _csrf=ab7e60b187089a5c797755f042abdbd14eed1760f8308dc455570ee9ea4edfa2; mojo-session-'
    }
    h5page = requests.get(url,headers=headers)
    h5page.encoding = 'utf-8'
    return h5page

# 电影信息：电影名，上映时间，主演，介绍，电影信息（时常，导演），电影类型,评分，封面图片，评论，资讯，图集
# 解析电影网页信息
def parsinghtml(htmltext):
    soup = BeautifulSoup(htmltext, 'html.parser')
    name = soup.find(class_="name").text
    main_img = soup.find(class_="avatar")['src']  # 封面图片地址
    Type = soup.findAll(class_="text-link")  # 电影类型
    str_type = ""
    for item in Type:
        str_type = str_type + "," + item.text
    str_type = str_type.replace(" ", "")  # 电影类型格式   ,剧情,动作,爱情,历史,古装
    baseinfo = soup.findAll(class_="ellipsis")  # 所有电影基本信息
    show_time = ""
    time = ""
    for i in baseinfo:
        if "上映" in i.text:
            show_time = i.text[:10]
        elif "分钟" in i.text:
            time = i.text
    pot = time.index("分钟")
    time = time[pot-3:pot+2]

    # 主演信息
    actinfo = soup.findAll(attrs={'target': '_blank', 'class': 'name'})
    actlist = []
    for j in actinfo:
        actlist.append(j.text.replace(" ", "").replace("\n", ""))
    director = actlist[0]  # 导演名称
    mainact = ""  # 主演信息 ,黎明,张涵予,刘亦菲,冯绍峰
    for k in range(4):
        mainact = mainact + "," + actlist[1 + k]

    # 剧情简介
    mov_intro = soup.find(class_="dra")
    # mov_intro.text  剧情介绍内容

    #电影信息（时常，导演）
    mov_info = ","+director+","+time

    #评分信息(暂时无法获取)

    #评论信息
    #author content likecount belong
    commentlist = []
    coment_author = soup.findAll(class_="user")
    coment_content = soup.findAll(class_="comment-content")
    likecount = soup.findAll(class_="num")
    for x in range(10):
        dic = {}
        dic['author'] = coment_author[x].text.replace("\n","")
        dic['content'] = coment_content[x].text
        dic['likecount'] = likecount[x].text
        commentlist.append(dic)

    #图集
    imglink_list = soup.findAll(class_="default-img")
    imglist = []

    for imgitem in imglink_list:
        if ("图集" in imgitem['alt']):
            imglist.append(imgitem['data-src'])
    #资讯
    newrelist = soup.findAll(class_="news-title")
    newslist = []
    for newitem in newrelist:
        href = "https://maoyan.com"+newitem.a['href']
        newstext = newitem.text.replace(" ","")
        newstext = newstext.replace("\n","")
        dic = {}
        dic['content'] = newstext
        dic['href'] = href
        newslist.append(dic)
    #把所有信息打包成字典  电影名，上映时间，主演，介绍，电影信息（时常，导演），电影类型,评分，封面图片，评论，资讯，图集
    dict = {}
    dict['mov_name'] = name
    dict['show_time'] = show_time
    dict['main_act'] = mainact
    dict['introduce'] = mov_intro.text
    dict['mov_info'] = mov_info
    dict['mov_type'] = str_type
    dict['main_img'] = main_img
    dict['comment'] = commentlist
    dict['news'] =newslist
    dict['imglist'] = imglist
    return dict

def save_data(dict,count):
    try:
        movie_info = models.Movie_info(id=count)
        movie_info.name = dict['mov_name']
        movie_info.show_time = datetime.datetime.strptime(dict['show_time'], '%Y-%m-%d')
        movie_info.mainact = dict['main_act']
        movie_info.introduce = dict['introduce']
        movie_info.movie_info = dict['mov_info']
        movie_info.type = dict['mov_type']
        movie_info.score = get_range()
        movie_info.main_img = dict['main_img']
        imglist = ""
        for item in dict['imglist']:
            imglist = ","+item
        movie_info.mediadir = imglist
        movie_info.save()
        # 电影信息保存完毕,开始保存评论
    except Exception as e:
        print(e)
        return 1
    try:
        for item in dict['comment']:
            comment = models.Movie_comment(belong=count)
            comment.author = item['author']
            comment.content = item['content']
            comment.likecount = int(item['likecount'])
            comment.save()
            #评论信息保存完毕，开始保存新闻

    except Exception as e:
        print(e)
        return 2
    try:
        for item in dict['news']:
            news = models.Movie_news(belong=count)
            news.introduce = item['content']
            news.linkaddr = item['href']
            news.save()
            #评论信息保存完毕
    except:
        return 3
    return 0

def get_range():
    fir = random.randint(5,9)
    sec = random.randint(0,9)
    fir = str(fir) +"."+ str(sec)
    fir = float(fir)
    return fir

def spider_main():
    count = 1339160
    errorcount = 0
    T1 = time.time()
    while True:
        try:
            msg = ""
            try:
                print("开始爬取数据")
                h5 = movie_spider(count)
                print(h5.text)
            except:
                msg ="爬取错误"
            try:
                print("开始解析数据")
                dict = parsinghtml(h5.text)
            except:
                msg="解析错误（极有可能是爬虫错误）"
                print(dict)
                print(msg)
                return
            print("开始保存数据")
            result = save_data(dict,count)
            if result != 0:
                if result == 1:
                    msg = "电影保存错误"
                elif result == 2:
                    msg = "评论保存错误"
                elif result == 3:
                    msg = "新闻保存错误"
            time.sleep(5)
            count = count+1
            print("count="+count+"errorcount="+errorcount)
            print(msg)
        except:
            errorcount = errorcount + 1
            time.sleep(1)
            print(msg)
            if(errorcount > 100):
                print("count = " + count)
                T2 = time.time()
                print('程序运行时间:%s毫秒' % ((T2 - T1)*1000))
                return