# -*-coding:utf-8-*-
import requests
import json
import random
import re
import time
import urllib.request
from urllib.parse import urlencode
import pymongo
import requests
from bs4 import BeautifulSoup
from pyquery import PyQuery as pq

# 本地创建名为 Douban_TVseries 的数据库
client = pymongo.MongoClient('localhost')
db = client['TV_info']



def login_in():
    # 登录豆瓣
    session = requests.Session()
    # 添加多个地域名，使得用户不容易因为访问次数过多而被封掉
    proxies1 = {"http": "http://47.105.140.15：80"}
    proxies2 = {"http": "http://36.89.85.91:41697"}
    proxies3 = {"http": "http://124.42.211.80:23500"}
    proxies4 = {"http": "http://212.85.67.238:58672"}
    proxies5 = {"http": "http://174.32.109.38:87"}
    Proxies = [proxies1, proxies2, proxies3, proxies4, proxies5]
    i = random.randint(0, 4)
    proxies = Proxies[i]
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36 Edg/121.0.0.0"}
    # 设置登录参数
    formdata = {
        "source": "None", "redir": "https://www.douban.com/", "form_email": "19506597221", "form_password": "19506597221",
        "login": u"登录"}
    # 设置登录地址
    url = 'https://accounts.douban.com/login'
    # 发送登录请求
    response = session.post(url, formdata, headers=headers, proxies=proxies)
    html = response.text
    doc = pq(html)

    # 获取验证码图片 url
    captcha_url = doc('#lzform > div.item.item-captcha > div').children('img').attr('src')
    print(captcha_url)
    # 获取验证码id
    captcha_id = doc('#lzform > div.item.item-captcha > div > div >input[type="hidden"]:nth-child(3)').attr('value')
    print(captcha_id)
    # 下载验证码图片
    if captcha_url: urllib.request.urlretrieve(captcha_url, "captcha.jpg")
    # 输入验证码
    captcha = input("please input the captcha:")
    # 设置验证码
    formdata['captcha-solution'] = captcha
    formdata['captcha-id'] = captcha_id
    # 再次获取网页源代码
    r = session.post(url, formdata, headers=headers, proxies=proxies).text
    doc_1 = pq(r)
    # 获取登录名，判定是否登录成功
    user_name = doc_1('#db-global-nav > div > div.top-nav-info > ul > li.nav-user-account >a').children('span').text()
    if user_name: print(user_name, u'登陆成功')
    return session


# 获取网页源代码
def requests_url(session, url):
    headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36 Edg/121.0.0.0"}
    proxies1 = {"http": "http://47.105.140.15：80"}
    proxies2 = {"http": "http://36.89.85.91:41697"}
    proxies3 = {"http": "http://124.42.211.80:23500"}
    proxies4 = {"http": "http://212.85.67.238:58672"}
    proxies5 = {"http": "http://174.32.109.38:87"}
    Proxies = [proxies1, proxies2, proxies3, proxies4, proxies5]
    i = random.randint(0, 4)
    proxies = Proxies[i]
    response = session.get(url, headers=headers, proxies=proxies)
    html = response.text
    time.sleep(2)
    return html


# 获得一页电视剧的信息
def get_one_page_source(session, url_pre, i):
    source = []
    data = {'start': i, }
    url = url_pre + urlencode(data)
    html = requests_url(session, url)  # 获得的源代码是 json 型
    dics = json.loads(html)['data']
    if dics:  # 判断这一页是否有数据
        i += 20
        for dic in dics:
            de = {
            '电视剧': dic['title'],
            'url': dic['url'],
            'rate': dic['rate'],
            '主演': dic['casts'],
            '导演': dic['directors']
        }
        source.append(de)
    else:
        pass

    return data, source,i


#获得每部电视剧的具体信息
def get_series_information(session,url):
    html = requests_url(session, url)
    # 首播年份
    try:
        soup = BeautifulSoup(html, 'lxml')
        date = int(soup.select('h1 span')[1].text[1:5])
    except:
        patterns = re.compile('<span class="pl">首播: < / span >.* ?content = ".*?" > (.* ?) < / span > ',re.S)
        da = re.findall(patterns, html)
        if da:
            date = da[0][:4]
        else:
            date = None
    doc = pq(html)
    # 网友短评链接和数量
    div = doc('#comments-section > div.mod-hd > h2 > span').children('a')
    short_comment_url = div.attr('href')
    short_comment_num = int(div.text().split(' ')[1])

    # 电视剧风格
    soup = BeautifulSoup(html, 'lxml')
    styles = [i.string for i in soup.find_all(attrs={'property': 'v:genre'})]
    pattern = re.compile('<span class="pl">制片国家/地区:</span>(.*?)<br/>', re.S)

    # 电视剧出品地
    area = re.findall(pattern, html)[0].strip()
    # 电视剧标签
    doc_tags = pq(html)
    tags = doc_tags('div.tags-body').children('a').text()
    # 电视剧集数
    pattern1 = re.compile('<span class="pl">集数:</span>(.*?)<br/>', re.S)
    try:
        episodes = int(re.findall(pattern1, html)[0])
        return short_comment_url, short_comment_num, styles, area, episodes, date, tags
    except:
        episodes = 0
        return short_comment_url, short_comment_num, styles, area, episodes, date, tags

#存储到 MongDB 数据库 Douban_TVseries 中
def save_to_mongo(result):
    if db['All_TV'].insert_one(result):
        print('存储到 MongoDB 成功', result)
        return True
    return False

#设置程序停止函数 time_sto
def time_stop(second,print_nums,word):
    i=1
    while i<=second:
        time.sleep(int(second/print_nums))
        print(word+'程序正在运行......')
        i+=int(second/print_nums)

# 获取所有信息并存储
def main():
    session=login_in()
    url_pre = 'https://movie.douban.com/j/new_search_subjects?sort=U&range=0,10&;tags=%E7%94%B5%E8%A7%86%E5%89%A7&amp;'
    url_pre_1 = 'https://movie.douban.com/tag/#/?sort=U&range=0,10&tags=电视剧'
    i = 0
    data, source, k = get_one_page_source(session, url_pre, i)
    i = 0
    while source:
        data = {'start': i, }
        url = url_pre + urlencode(data)
        print(url)
        data, source, i = get_one_page_source(session, url_pre, i)

        if source:
            for j in range(0, len(source)):
                dic = source[j]
                time_stop(1, 1, dic['电视剧'])
                short_comment_url, short_comment_num, styles, area, episodes, date, tags = get_series_information(session, dic['url'])
                dic['短评 url']=short_comment_url
                dic['短评数']=short_comment_num
                dic['类型']=styles
                dic['出品地']=area
                dic['集数']=episodes
                dic['出品时间']=date
                dic['电视剧标签']=tags
                save_to_mongo(dic)
                #print(dic)
            time_stop(60, 5, '电视剧换页')
        else:
            print('电视剧爬取结束')

if __name__=='__main__':
    main()

