from pymongo import MongoClient
from bs4 import BeautifulSoup

import requests
import pprint
import random
import time
import json
import re

client = MongoClient('127.0.0.1', 27017, connect = False)

douban_movie_db = client['douban_movie_db']
# douban_movie_db.drop_collection('douban_movie_coll')
douban_movie_coll = douban_movie_db['douban_movie_coll']


def scrapy_film_content(item):
    print(item)

    try:
        print('scrapy_film_content[3]')
        text = requests.get(item['url'], proxies = parse_proxy_ip(), timeout = 3).text
    except:
        return scrapy_film_content(item)

    soup = BeautifulSoup(text, 'lxml')

    if soup.find(id = 'info').find(text = re.compile('导演')):
        actor = soup.find(id = 'info').find(text = re.compile('导演')).parent.find_next_sibling().get_text()
    else:
        actor = ''

    if soup.find(id = 'info').find(text = re.compile('编剧')):
        writer = soup.find(id = 'info').find(text = re.compile('编剧')).parent.find_next_sibling().get_text()
    else:
        writer = ''

    if soup.find(id = 'info').find(text = re.compile('主演')):
        main_roles = [i.strip() for i in soup.find(id = 'info').find(text = re.compile('主演')).parent.find_next_sibling().get_text().split('/')]
    else:
        main_roles = []

    if soup.find(id = 'info').find(text = re.compile('类型')):
        film_type = soup.find(id = 'info').find(text = re.compile('类型')).parent.find_next_sibling().get_text()
    else:
        film_type = ''

    if soup.find(id = 'info').find(text = re.compile('制片国家')):
        country = soup.find(id = 'info').find(text = re.compile('制片国家')).parent.next_element.next_element
    else:
        country = ''

    if soup.find(id = 'info').find(text = re.compile('语言')):
        lang = soup.find(id = 'info').find(text = re.compile('语言')).parent.next_element.next_element
    else:
        lang = ''

    if soup.find(id = 'info').find(text = re.compile('上映日期')):
        release_date = soup.find(id = 'info').find(text = re.compile('上映日期')).parent.find_next_sibling().get_text()
    else:
        release_date = ''

    if soup.find(id = 'info').find(text = re.compile('片长')):
        sheet_len = soup.find(id = 'info').find(text = re.compile('片长')).parent.find_next_sibling().get_text()
    else:
        sheet_len = ''

    if soup.find(id = 'info').find(text = re.compile('又名')):
        other_names = soup.find(id = 'info').find(text = re.compile('又名')).parent.next_element.next_element
    else:
        other_names = ''

    introduce = soup.find(id = 'link-report').get_text().replace('©豆瓣', '').strip()

    item = {
        'actor': actor,
        'writer': writer,
        'main_roles': main_roles,
        'film_name': item['title'],
        'film_type': film_type,
        'country': country,
        'lang': lang,
        'rate': item['rate'],
        'release_date': release_date,
        'sheet_len': sheet_len,
        'other_names': other_names,
        'introduce': introduce,
        'url': item['url'],
        'id': item['id'],
        'comments': scrapy_file_comments(item['id'], 0, [])
    }

    if not douban_movie_coll.find_one({'id': item['id']}):
        douban_movie_coll.insert_one(item)

        pprint.pprint(item)


def scrapy_file_comments(film_id, start, results):
    url = 'https://movie.douban.com/subject/' + film_id + '/comments?start=' + str(start) + '&limit=20&sort=new_score&status=P'

    try:
        print('scrapy_file_comments[2]')
        text = requests.get(url, proxies = parse_proxy_ip(), timeout = 3).text
    except:
        return scrapy_file_comments(film_id, start, results)

    if '你没有权限访问这个页面' in text:
        print('影评查看权限结束')
        return results

    soup = BeautifulSoup(text, 'lxml')

    for item in soup.select('.comment-item'):
        if item.find(class_ = 'rating'):
            comment = {
                'text': item.find(class_ = 'short').get_text(),
                'score': int(item.find(class_ = 'rating').get('class')[0][-2:]) / 10,
                'time': item.find(class_ = 'comment-time').get('title')
            }
            print(comment['text'])
            print()
            results.append(comment)

    return scrapy_file_comments(film_id, start + 20, results)


headers = {
    'Accept': '*/*',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Connection': 'keep-alive',
    'Host': 'movie.douban.com',
    'Referer': 'https://movie.douban.com/explore',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest'
}


def parse_proxy_ip():
    while True:
        r = requests.get('http://mvip.piping.mogumiao.com/proxy/api/get_ip_bs?appKey=8c7aa78c580045cabeef24795fbb324c&count=1&expiryDate=0&format=1&newLine=2')

        if isinstance(r.json()['msg'], str):
            time.sleep(5 + random.random())
        else:
            break

    ip = r.json()['msg'][0]['ip']
    port = r.json()['msg'][0]['port']

    proxies = {
        'http': 'http://' + ip + ':' + port,
        'https': 'http://' + ip + ':' + port
    }
    pprint.pprint(proxies)
    return proxies


def search_subjects(page_start):
    try:
        print('search_subjects[1]')
        data = requests.get('https://movie.douban.com/j/search_subjects', proxies = parse_proxy_ip(), headers = headers, params = {
            'type': 'movie',
            'tag': '热门',
            'sort': 'recommend',
            'page_limit': 20,
            'page_start': page_start
        }, timeout = 3)

        if len(data.text) < 500:
            return search_subjects(page_start)

        data = data.json()

        for i in data['subjects']:
            if douban_movie_coll.find_one({'id': i['id']}):
                print(i['title'], '已访问')
                continue

            scrapy_film_content(i)
    except:
        return search_subjects(page_start)


def begin():
    for page_start in range(0, 1000, 20):
        search_subjects(page_start)


if __name__ == '__main__':
    begin()
