#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
采用BeautifulSoup解析html并获取内容
注解
"""

__author__ = 'hubert'


from bs4 import BeautifulSoup
import requests
import json
import csv
from datetime import datetime
import time
import random
import threading
import py1.py_baidu_ocr
import pymysql

import logging
logging.basicConfig(level=logging.INFO)


class Movie(object):
    def __init__(self):
        # 中文名称
        self.name = ''
        # 外文名称
        self.foreignname = ''
        # 别名
        self.alias = ''
        # 导演
        self.director = ''
        # 演员
        self.actor = ''
        # 编剧
        self.screenwriter = ''
        # 出品时间/上映日期
        self.releaseyear = ''
        # 制片国家/地区
        self.area = ''
        # 类型
        self.type = ''
        # 片长
        self.duration = ''
        # 评分
        self.rate = ''
        # 简介
        self.description = ''
        # 海报地址
        self.poster_url = ''
        # 语言
        self.language = ''
        # 集数
        self.episodes = ''
        # 豆瓣ID
        self.subject_id = ''


    def getValue(self):
        # 利用str的format格式化字符串
        # 利用生成器推导式去获取key和self中key对应的值的集合
        return ",".join("{}={}".format(key,getattr(self,key)) for key in self.__dict__.keys())


    # 重写__str__定义对象的打印内容
    def __str__(self):
        return "{}->({})".format(self.__class__.__name__, self.getValue())



encoding = 'uft-8'


# http的代理池，代理网站https://www.kuaidaili.com/free/inha/
def get_ip():
    proxies = {}
    with open('ip池.txt', 'r') as r:
        http_ip_port = r.readlines()
        proxies["http"] = random.choice(http_ip_port).strip("\n")
        return proxies

# 随机生产浏览器标识user-agent
def get_ua():
    first_num = random.randint(55, 76)
    third_num = random.randint(0, 3800)
    fourth_num = random.randint(0, 140)
    os_type = ['(Windows NT 6.1;WOW64)', '(Windows NT 10.0;WOW64)', '(X11;Linux x86_64)', '(Macintosh;Intel Mac OS X 10_14_5)']
    chrome_version = 'Chrome/{}.0.{}.{}'.format(first_num, third_num, fourth_num)
    ua = ''.join(["Mozilla/5.0 ", random.choice(os_type), ' AppleWebKit/537.36 ', '(KHTML,like Gecko) ', chrome_version, ' safari/537.36 ', 'Edg/100.0.1185.50'])
    return ua

# head
headers = {
    'User-Agent': get_ua()
}

#proxies = {
#    # "http": "http://218.14.108.53:8060"
#    # "http": "http://39.99.54.91:80"
#    # "http": "http://47.92.113.71:80"
#}


# 查看数量，用于判断循环
def douban_subjects(url):
    time.sleep(random.random()*5)
    # time.sleep(10)
    # s = requests.Session()
    proxies = get_ip()
    r = requests.get(url, headers=headers, proxies=proxies)
    # 设置编码为utf8
    r.encoding = encoding
    # 直接获取的是json串
    json_str = r.text
    # json 串，只有1个元素，key:subjects,vlaue：json串
    json_object = json.loads(json_str)
    # 数组，里面为json数据
    subjects = json_object["subjects"]
    num = len(subjects)
    return num


def get_detail_url(url):
    time.sleep(random.random()*5)
    # time.sleep(10)
    json_str_detail = ""
    try:
        proxies = get_ip()
        # s = requests.Session()
        # 请求列表页
        r = requests.get(url, headers=headers, proxies=proxies)
        # 设置编码为utf8
        r.encoding = encoding
        # 直接获取的是json串
        json_str_detail = r.text
    except Exception as ex_res:
        print("链接错误", str(ex_res))

    try:
        # json 串，只有1个元素，key:subjects,vlaue：json串
        json_object = json.loads(json_str_detail)
    except Exception as ex:
        # IP被封的情况
        print('get_detail_url异常', str(ex))
        print("请求列表失败（get_detail_url）：", json_str_detail, proxies)
        return "", -1

    # 数组，里面为json数据
    subjects = json_object["subjects"]
    list_json = subjects

    num = len(subjects)

    url_datail_list = []
    if len(list_json) < 1:
        return url_datail_list, -1
    for item in list_json:
        title = item["title"]
        picurl = item["cover"]
        detail_url = item["url"]
        subject_id = item["id"]
        # print(title, picurl, detail_url)
        url_datail_list.append({"detail_url": detail_url, "subject_id": subject_id})
    return url_datail_list, num


# 下载验证码
def dowload_captcha(img_url, img_name):
    r = requests.get(img_url, headers=headers, stream=True)
    if r.status_code == 200:
        with open("/Users/hubert/Downloads/pic/ca"+img_name, "wb") as f_img:
            f_img.write(r.content)


# 解析详情页
def __parser_douban__2(f, url_datail_list):
    print("写CSV")

# 解析详情页
def __parser_douban__(url_datail_list):
    # req = requests.Session()
    for item_url in url_datail_list:

        movie = Movie()
        csv_data = []
        # 休眠，防止被禁用,大概3秒左右
        time.sleep(random.random()*5)
        # time.sleep(10)
        proxies = get_ip()
        try:
            # https://movie.douban.com/subject/25964071/
            res = requests.get(item_url["detail_url"], headers=headers, proxies=proxies)
            # 设置编码为utf8
            res.encoding = encoding
            soup = BeautifulSoup(res.text, 'lxml')
            div_content = soup.find(id="content")
            if div_content is None:
                # ip 可能被封
                print("ip 可能被封", soup)
                continue
            div_form = div_content.find("form")
            if div_form is not None:
                print("遇到验证码", div_content, res.url)
                captcha_img_url = div_form.find("img").get("src")
                # 下载验证码图片
                #dowload_captcha(captcha_img_url, "")
                # https://www.douban.com/misc/sorry
                print(captcha_img_url)
                # 结束本次循环
                continue
            dl_span = div_content.find("h1")
            titles = dl_span.find_all("span")
            if len(titles) < 1:
                print("无法获取名称", div_content)
            elif len(titles) == 2:
                movie.name = titles[0].text + titles[1].text
            elif len(titles) == 1:
                movie.name = titles[0].text
        except Exception as ex:
            # IP被封的情况
            print('解析异常：', str(ex))
            print(soup)

        print(movie.name, res.url, proxies)

        # name
        csv_data.append(movie.name)

        div_pic = div_content.find(id="mainpic")
        try:
            # 节目海报 poster_url
            movie.poster_url = div_pic.find("img").get("src")
            csv_data.append(movie.poster_url)
        except:
            print('获取图片地址异常：', str(ex))
            print(soup)

        div_info = div_content.find(id="info")
        spans = div_info.find_all("span")
        # print("series：", spans)

        # 类型
        type = ""

        # 制片国家/地区
        area = ''
        for i in range(0, len(spans)):
            if "导演" == spans[i].text:
                # print("导演：", spans[i+1].text)
                # 导演,根据标签结构，第3个就是导演属性
                movie.director = spans[i+1].text
            elif "编剧" == spans[i].text:
                # 编剧,根据标签结构，第6个就是导演属性
                movie.screenwriter = spans[i+1].text
            elif "主演" == spans[i].text:
                try:
                    # 主演，根据标签结构，第9个就是导演属性，但是又有多个span嵌套标签
                    movie.actor = spans[i+1].text
                except Exception as ex_actor:
                     print("actor获取异常", str(ex_actor))
            elif "类型:" == spans[i].text:
                for i in range(i+1, len(spans)):
                    key = list(spans[i].attrs.keys())
                    if "property" != key[0]:
                        break
                    else:
                        if type != "":
                            type = spans[i].text + "/" + type
                        else:
                            type = spans[i].text
                movie.type = type
            elif "制片国家/地区:" == spans[i].text:
                # 获取标签外的文本内容，如<span></</span>123测试
                area = spans[i].next_sibling.strip()
            elif "语言:" == spans[i].text:
                movie.language = spans[i].next_sibling.strip()
            elif "首播:" == spans[i].text:
                movie.releaseyear = spans[i+1].text
            elif "集数:" == spans[i].text:
                try:
                    # 无spans[i+1]
                    movie.episodes = spans[i].next_sibling.strip()
                except Exception as ex:
                    print('获取集数异常：', str(ex))
                    movie.episodes = spans[i].next()

            elif "单集片长:" == spans[i].text:
                try:
                    # 无spans[i+1]
                    movie.duration = spans[i].next_sibling.strip()
                except Exception as ex:
                    print('获取单集片长异常：', str(ex))
                    movie.duration = spans[i].next()
            elif "又名:" == spans[i].text:
                movie.alias = spans[i].next_sibling.strip()

        csv_data.append(movie.director)
        csv_data.append(movie.screenwriter)
        csv_data.append(movie.actor)
        csv_data.append(movie.type)
        movie.area = area
        csv_data.append(area)
        # 语言
        csv_data.append(movie.language)
        csv_data.append(movie.releaseyear)
        csv_data.append(movie.duration)
        csv_data.append(movie.episodes)
        csv_data.append(movie.alias)
        # 评分
        dl_strong = div_content.find(class_="ll rating_num")
        movie.rate = dl_strong.text
        csv_data.append(movie.rate)

        # 简介
        dl_des = div_content.find(id="link-report")
        if dl_des is not None:
            movie.description = dl_des.text.strip()
            try:
                movie.description = movie.description.replace("\"", "'")
            except:
                print("引号替换异常")
        csv_data.append(movie.description)
        movie.subject_id = item_url["subject_id"]
        csv_data.append(movie.subject_id)

        # print("获取的数据：", csv_data)
        # print("获取的数据：", movie.__str__())

        csv_head = []
        try:
            """
                写cvs文件
            """
            # 写入csv文件
            # write_csv(f, csv_data)
        except Exception as ex_csv:
            print("写入csv异常", str(ex_csv))

        try:
            """
                写数据库
            """
            # 插入数据库
            insert_db(csv_data)
        except Exception as ex_db:
            print("写入数据库异常", str(ex_db))

# 写入csv文件
def write_csv(f, array_list):
    # 1. 创建文件对象
    # f = open(path,'w',encoding='utf-8')
    # 2. 基于文件对象构建 csv写入对象
    csv_writer = csv.writer(f)
    # 3. 构建列表头
    # csv_writer.writerow(["名称","code"])
    # 4. 写入csv文件内容
    csv_writer.writerow(array_list)
    # 5. 关闭文件
    # f.close()


lock = threading.Lock()



# 函数（测试用）
def run_thd2(name, id):
    print(name, id)

# 链接数据库
def conn_db():
    host = '192.168.4.73'
    user = 'bigdata'
    password = 'bigdata'
    port = 3306
    db_name = 'movieinfo'

    try:
        # 建立连接
        conn = pymysql.connect(host=host, user=user, password=password, db=db_name, port=port, charset="utf8")
    except Exception as ex:
        print("数据库链接异常", str(ex))
    # 游标
    cursor = conn.cursor()
    # 返回游标
    return cursor, conn


def select_db(subject_id):
    cursor, conn = conn_db()
    sql = "select id from douban_series where subject_id = " + subject_id

    try:
        # 执行sql语句
        print("查询SQL="+sql)
        cursor.execute(sql)
        result = cursor.fetchone()
        return result
    except Exception as ex_select:
        print("查询数据库异常", str(ex_select))
        return None
    close_db(cursor, conn)


# 根据条件查询
def select_result(cursor, subject_id):
    sql = "select id from douban_series where subject_id = " + subject_id

    try:
        # 执行sql语句
        print("查询SQL="+sql)
        cursor.execute(sql)
        result = cursor.fetchone()
        return result
    except Exception as ex_select:
        print("查询数据库异常", str(ex_select))
        return -1

# 写入数据
def insert_db(csv_data):
    cursor, conn = conn_db()
    # 查询是否有记录，有记录不插入
    result = select_result(cursor, csv_data[14])
    if result == None:
        # 未查询到记录
        # print(csv_data)
        """
            SQL 插入语句
            name 0, poster_url 1, director 2, screenwriter 3, actor 4. type 5, area 6, language 7, releaseyear 8, duration 9, episodes 10，alias 11 rate 12, description 13, subject_id 14
        """
        sql = "INSERT INTO douban_series (name, alias, director, actor, screenwriter, " \
              "releaseyear, area, type, duration, rate, description, poster_url, language, episodes, subject_id, " \
              "create_time, update_time) VALUES (" + "\"" + csv_data[0] + "\"" \
              "," + "\"" + csv_data[11] + "\"" + "," + "\"" + csv_data[2] + "\"" + "," + "\"" + csv_data[4] + "\"" + "," + "\"" + csv_data[3] + "\"" + \
              "," + "\"" + csv_data[8] + "\"" + "," + "\"" + csv_data[6] + "\"" + "," + "\"" + csv_data[5] + "\"" + "," + "\"" + csv_data[9] + "\"" + "," + \
              "\"" + csv_data[12] + "\"" + "," + "\"" + csv_data[13] + "\"" + "," + "\"" + csv_data[1] + "\"" + "," + "\"" + csv_data[7] + "\"" + "," + "\"" + csv_data[10] + "\"" + "," + \
              csv_data[14] + "," + "\"" + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\"" + "," + "\"" + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\"" + ")"
        try:
            # 执行sql语句
            # print("SQL="+sql)
            cursor.execute(sql)
            # 提交到数据库执行
            conn.commit()
        except Exception as ex_insert:
            # 如果发生错误则回滚
            conn.rollback()
            print("插入数据库异常", str(ex_insert))
    else:
        print("记录已存在：", "subject_id=" + csv_data[14])

    close_db(cursor, conn)


# 关闭数据库
def close_db(cursor, conn):
    # print("关闭数据库链接")
    cursor.close()
    conn.close()


# 线程
def run_thd(thread_id, start_num):
    limit = 20
    start_page = start_num
    # 初始值,每页的内容数量
    subjects_num = 20
    # 分类:  热门 美剧 英剧 韩剧 日剧 国产剧 港剧 日本动画 综艺 纪录片
    tag = '国产剧'
    # 每个线程4页数据，count_num 计数器，初始值为0，每次开始初始化
    count_num = 0

    # https://movie.douban.com/j/new_search_subjects?sort=U&range=0,10&tags=%E7%94%B5%E5%BD%B1&start=0

    csv_path = "/Users/hubert/Documents/home/csv/douban_moive" + tag + str(thread_id+1) + "：" + datetime.now().strftime('%Y-%m-%d') + ".csv"
    # f = open(csv_path, 'w', encoding='utf-8-sig')
    # with open(csv_path, 'w', encoding='utf-8-sig') as f:

    while subjects_num == 20 and count_num < 4:    # 多线程分页获取时采用
        # while subjects_num == 20:  # and count_num < 4
        # url = "https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&sort=recommend&page_limit=" + str(limit) + "&page_start=" + str(start_page)
        url = "https://movie.douban.com/j/search_subjects?type=tv&tag=" + tag + "&sort=recommend&page_limit=" + str(limit) + "&page_start=" + str(start_page)
        # 获取每列的数量，小于20个，下次循环结束
        # subjects_num = douban_subjects(url)
        print("thread:", thread_id+1, url)
        # logging.info("thread:", thread_id+1, url)
        # 从列表页获取详情页URL
        """
          result[0]: list {detail_urllist,subject_id}
          result[1]: 每页节目数量
        """
        try:
            result = get_detail_url(url)
            # print(result[0], result[1])
            subjects_num = result[1]
            detail_url = result[0]
        except Exception as ex_result:
            print("查询的detail_url异常：", str(ex_result))
        if subjects_num == -1:
            if len(detail_url) == 0:
                break
            else:
                subjects_num = 20
                # 进入下一页
                start_page += 20
                time.sleep(3)
                continue
        # 解析详情页
        __parser_douban__(detail_url)
        # 下一页
        start_page += 20
        count_num += 1


"""
      v 1.0 线程分配
      for i in range(5):
      if i == 1:
          start_page = 80
      elif i == 2:
          start_page = 160
      elif i == 3:
          start_page = 240
      elif i == 4:
          start_page = 320
      t = threading.Thread(target=run_thd, args=(i, start_page))
      t.start()
  """
# 多线程运行
if __name__ == '__main__':
    # url = 'https://movie.douban.com/subject/24773958/'
    # url = "https://movie.douban.com/explore#!type=movie&tag=%E7%83%AD%E9%97%A8&sort=rank&page_limit=20&page_start=0"
    # page_start 设置为分页limit值，即为下一页分页
    # {"subjects":[{"episodes_info":"","rate":"5.9","cover_x":1688,"title":"永恒族","url":"https:\/\/movie.douban.com\/subject\/30223888\/","playable":false,"cover":"https://img1.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2677303737.jpg","id":"30223888","cover_y":2500,"is_new":false},{"episodes_info":"","rate":"5.7","cover_x":4000,"title":"黑客帝国：矩阵重启","url":"https:\/\/movie.douban.com\/subject\/34801038\/","playable":false,"cover":"https://img3.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2844387600.jpg","id":"34801038","cover_y":5929,"is_new":false},{"episodes_info":"","rate":"6.2","cover_x":1400,"title":"精灵旅社4：变身大冒险","url":"https:\/\/movie.douban.com\/subject\/30472643\/","playable":false,"cover":"https://img3.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2659301260.jpg","id":"30472643","cover_y":2100,"is_new":true},{"episodes_info":"","rate":"6.3","cover_x":1170,"title":"奥斯维辛报告","url":"https:\/\/movie.douban.com\/subject\/34983370\/","playable":false,"cover":"https://img3.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2724272050.jpg","id":"34983370","cover_y":1728,"is_new":true},{"episodes_info":"","rate":"7.0","cover_x":800,"title":"神秘魔法部","url":"https:\/\/movie.douban.com\/subject\/35366862\/","playable":false,"cover":"https://img2.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2636895831.jpg","id":"35366862","cover_y":1200,"is_new":true},{"episodes_info":"","rate":"7.6","cover_x":1080,"title":"不要抬头","url":"https:\/\/movie.douban.com\/subject\/34884712\/","playable":false,"cover":"https://img2.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2730833093.jpg","id":"34884712","cover_y":1920,"is_new":false},{"episodes_info":"","rate":"8.7","cover_x":1000,"title":"杰伊·比姆","url":"https:\/\/movie.douban.com\/subject\/35652715\/","playable":false,"cover":"https://img2.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2734251152.jpg","id":"35652715","cover_y":1500,"is_new":false},{"episodes_info":"","rate":"6.5","cover_x":3000,"title":"魔法满屋","url":"https:\/\/movie.douban.com\/subject\/35134724\/","playable":false,"cover":"https://img9.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2807936075.jpg","id":"35134724","cover_y":4188,"is_new":false},{"episodes_info":"","rate":"7.8","cover_x":2764,"title":"沙丘","url":"https:\/\/movie.douban.com\/subject\/3001114\/","playable":true,"cover":"https://img9.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2687443734.jpg","id":"3001114","cover_y":4096,"is_new":false},{"episodes_info":"","rate":"7.1","cover_x":1012,"title":"欢乐好声音2","url":"https:\/\/movie.douban.com\/subject\/26962981\/","playable":false,"cover":"https://img3.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2732782860.jpg","id":"26962981","cover_y":1500,"is_new":true},{"episodes_info":"","rate":"7.8","cover_x":4048,"title":"法兰西特派","url":"https:\/\/movie.douban.com\/subject\/30300279\/","playable":false,"cover":"https://img9.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2634539726.jpg","id":"30300279","cover_y":6285,"is_new":false},{"episodes_info":"","rate":"6.1","cover_x":1785,"title":"古董局中局","url":"https:\/\/movie.douban.com\/subject\/26996619\/","playable":true,"cover":"https://img1.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2734316987.jpg","id":"26996619","cover_y":2500,"is_new":false},{"episodes_info":"","rate":"7.9","cover_x":595,"title":"世界上最糟糕的人","url":"https:\/\/movie.douban.com\/subject\/34447553\/","playable":false,"cover":"https://img9.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2668815075.jpg","id":"34447553","cover_y":842,"is_new":false},{"episodes_info":"","rate":"8.0","cover_x":2000,"title":"驾驶我的车","url":"https:\/\/movie.douban.com\/subject\/35235502\/","playable":false,"cover":"https://img2.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2639821491.jpg","id":"35235502","cover_y":2829,"is_new":false},{"episodes_info":"","rate":"5.9","cover_x":4000,"title":"铁道英雄","url":"https:\/\/movie.douban.com\/subject\/35205446\/","playable":true,"cover":"https://img9.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2684720964.jpg","id":"35205446","cover_y":5600,"is_new":false},{"episodes_info":"","rate":"7.9","cover_x":2000,"title":"天鹅挽歌","url":"https:\/\/movie.douban.com\/subject\/35258381\/","playable":false,"cover":"https://img9.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2717809625.jpg","id":"35258381","cover_y":3000,"is_new":false},{"episodes_info":"","rate":"7.7","cover_x":1500,"title":"犬之力","url":"https:\/\/movie.douban.com\/subject\/33437152\/","playable":false,"cover":"https://img1.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2678298618.jpg","id":"33437152","cover_y":2222,"is_new":false},{"episodes_info":"","rate":"8.3","cover_x":1500,"title":"倒数时刻","url":"https:\/\/movie.douban.com\/subject\/30279170\/","playable":false,"cover":"https://img9.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2690712224.jpg","id":"30279170","cover_y":2222,"is_new":false},{"episodes_info":"","rate":"6.1","cover_x":1349,"title":"超能敢死队","url":"https:\/\/movie.douban.com\/subject\/26838236\/","playable":false,"cover":"https://img9.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2685536675.jpg","id":"26838236","cover_y":2000,"is_new":false},{"episodes_info":"","rate":"8.7","cover_x":1448,"title":"花束般的恋爱","url":"https:\/\/movie.douban.com\/subject\/34874432\/","playable":false,"cover":"https://img9.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2623936924.jpg","id":"34874432","cover_y":2048,"is_new":false}]}
    # rate：评分
    """
        每个线程4页数据，每页最大数量20条,页数从0开始
        线程数 = 总页数/ 20 * 4
        例如：线程数 = 400 / 20 * 4 =5
    """
    start_time = time.time()
    # 多线程,5个线程，每个线程取4页数据（）
    start_page = 0
    # 总页面（有数据，可以不满2个）
    totle_page = 480

    # v2.0 线程分配
    for i in range(int(totle_page/(20 * 4) + 1)):
        # 页数，初始值为0
        start_page = i * 20 * 4
        t = threading.Thread(target=run_thd, args=(i, start_page))
        t.start()

    while threading.active_count() != 1:
        pass
    print("主线程结束")
    end_time = time.time()
    print("运行时长：", end_time - start_time)


