import requests
import random
from bs4 import BeautifulSoup
from pymongo import MongoClient
from multiprocessing import Pool

'''
解析首页 => 所有tab（分类）页
解析tab页 => 1.tab页下的分页  2.每页的内容
解析内容 => 获取电影信息
'''

url_home = "https://www.dy2018.com"
client = MongoClient("mongodb://localhost:27017/")
db = client["film_db"]
collection = db["film_collection"]
headers = [
    "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
    "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
    'Opera/9.25 (Windows NT 5.1; U; en)',
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
    'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
    'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
    'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
    "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
    "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0",
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'
]


def get_html(url):
    response = requests.get(url, headers={'User-Agent': random.choice(headers)})
    response.encoding = 'GB2312'
    return response.text


def analysis_home_page():

    html = get_html(url_home)
    print(html[1:1000])
    soup = BeautifulSoup(html, 'html.parser')
    contain = soup.find(id='menu')
    lis = contain.find_all('li')
    # p = Pool(len(lis[1: 16]))
    for li in lis:
        tab_url = url_home + li.a.get('href')
        tab_name = li.a.text
        print(tab_name, tab_url)
        # p.apply_async(analysis_tab, args=(tab_url, tab_name))
    # p.close()
    # p.join()


def analysis_tab(url):
    print('开始解析tab：' + url)
    try:
        html = get_html(url)
        soup = BeautifulSoup(html, 'html.parser')
        select = soup.find('select')
        options = select.find_all('option')
        print(len(options))
    except BaseException:
        print('Exception', url)


def analysis_detail(url, tab_name):
    data = {
        "tab_name": tab_name,
        "film_name": film_name,
        "main_img": mainImgUrl.get("src"),
        "detail": detail,
        "imgs": imgs,
        "urls": urls
    }
    collection.insert_one(data)


def main():
    analysis_home_page()


if __name__ == '__main__':
    main()
