# -*- coding: utf-8 -*-
'''''''''
多线程和多进程最大的不同在于，多进程中，同一个变量，各自有一份拷贝存在于每个进程中，互不影响，
而多线程中，所有变量都由所有线程共享，所以，任何一个变量都可以被任何一个线程修改，
因此，线程之间共享数据最大的危险在于多个线程同时改一个变量，把内容给改乱了。

'''''''''

import requests
from bs4 import BeautifulSoup
import re
from datetime import datetime
import time
import threading
from pacong.pachong_config import db,cursor,headers
from pycacho.cachobase.logger import Logger
logger = Logger("doupanTop").get_log()


lock = threading.Lock()

def createTable():
    sql1 = 'drop table if exists doupan_top250;'
    sql2 = 'create table doupan_top250(No INT(11) NOT NULL AUTO_INCREMENT,article_title VARCHAR(200),article_url VARCHAR(200),comment_star float,comment_num int,publish_info VARCHAR(200),ctime timestamp(2),PRIMARY KEY (No));'
    cursor.execute(sql1)
    cursor.execute(sql2)
    db.commit()


def data_to_mysql(article_title,article_url,comment_star,comment_num,publish_info,ctime):
    sql = "insert into doupan_top250(article_title,article_url,comment_star,comment_num,publish_info,ctime) values('%s','%s','%s','%s','%s','%s')" % (article_title,article_url,comment_star,comment_num,publish_info,ctime)
    try:
        cursor.execute(sql)
    except Exception as e:
        # 发生错误时回滚
        db.rollback()
        print(str(e))
    else:
        db.commit()  # 事务提交
        logger.info('事务处理成功')


def get_one_page(p):
    url = 'https://book.douban.com/top250?start=' + str(p)
    # 获取网页内容，返回html数据
    response = requests.get(url, headers=headers)
    #response.encoding = 'utf-8'
    logger.info("当前爬取的网址为：%s" , url)
    lock.acquire()                           # 线程锁lock请求
    save_one_page(response.text)
    lock.release()                           # 线程锁lock释放


def save_one_page(html):
    article_data = []
    soup = BeautifulSoup(html, 'lxml')
    for title in soup.find_all(name='div', attrs={"class": "pl2"}):
        for a in title:
            if str(type(a)) == "<class 'bs4.element.Tag'>":
                if a.get('href'):
                    article = []
                    article_title = a.get('title')
                    article_url = a.get('href')
                    article.append(article_title)
                    article.append(article_url)
                    article_data.append(article)

    publish_info = re.findall(r'<p class="pl">(.*)</p>', html)
    comment_star = re.findall(r'<span class="rating_nums">(.*)</span>', html)
    comment_num = re.findall(r'(\d.*)人评价', html)  # 数字开始的非换行信息，“人评价”结尾的信息
    # comment_best = re.findall(r'<span class="inq">(.*)</span>', html)   # 有缺失p=50
    # print(comment_best)

    for i in range(len(article_data)):
        # print(i,article_data[i][0],article_data[i][1],comment_star[i],comment_num[i],publish_info[i],datetime.now())
        data_to_mysql(article_data[i][0], article_data[i][1], comment_star[i], comment_num[i], publish_info[i],
                      datetime.now())

if __name__ == '__main__':
    ctime = time.time()
    #is_done = threading.Event()  设置进程等待时间
    createTable()
    p = 0
    threads = []
    while p < 226:
        threads.append(threading.Thread(target=get_one_page, args=(p,)))
        p = p +25

    for t in threads:
        t.start()
    for t in threads:
        t.join()
        # is_done.wait(timeout=0.1)  设置进程等待时间

    logger.info("总共耗时：%s ", time.time() - ctime)