import requests
from bs4 import BeautifulSoup
import time
import pymysql
import os
import chardet

local = time.strftime("%Y/%m/%d")
headers = {
    "User-Agent":
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"
}


def download_page(url):
    r = requests.get(url, headers=headers)
    #print(r.encoding)
    return r.text


def download_page_1(url):
    r = requests.get(url, headers=headers)
    r.encoding = 'GBK'
    r.encoding = 'utf-8'
    #print(r.encoding)
    return r.text


#获取图片
def download_img(picUrl, img_name):
    read = requests.get(picUrl, headers=headers)
    #img_name = title[:4]
    f = open('%s' % img_name, 'wb')
    #f = open('%s.jpg' % local, 'wb')
    f.write(read.content)
    f.close()


#页码 模块 标题 简介 日期
def get_content(html, page):
    output = """
第{}页
    模块：{} 
    标题：{} 
    简介：{}
    图片：{}
    日期：{} 
    详情：{}
   
------------------------------------

"""

    soup = BeautifulSoup(html, 'html.parser')
    con_list = soup.find_all('li', class_="indent")
    for i in con_list:
        module = i.find('div',
                        class_='meta pw').find('span').find('a').get_text()
        a_info = i.find('div', class_='pic').find('a')
        if a_info is not None:
            title = a_info['title']
        else:
            title = ''

        intraduction = i.find('div', class_='cont').get_text()
        date = i.find('div', class_='time').get_text()

        #下载图片
        picUrl = i.find('div', class_='pic').find('a').find(
            'img', class_='imgwh')['data-original']

        #print(picUrl)
        file_path = '/usr/local/images/'
        file_path += local + '/'

        #创建目录
        if not os.path.exists(file_path):
            # 目录不存在，进行创建操作
            os.makedirs(file_path)  #使用os.makedirs()方法创建多层目录

        pic_name = file_path + os.path.basename(picUrl)

        #print(pic_name)
        download_img(picUrl, pic_name)

        #内容
        link_url = i.find('div', class_='title').find('a')['href']
        detail_html = download_page_1(link_url)
        detail_soup = BeautifulSoup(detail_html, 'html.parser')
        detail_txt = detail_soup.find('div',
                                      class_='cont',
                                      id="viewcontentbox")

        save_txt(
            output.format(page, module, title, intraduction, picUrl, date,
                          detail_txt))

        #保持至数据库
        save_db(title, picUrl, intraduction, detail_txt)


def save_db(title, imageAddress, synopsis, content):
    # 打开数据库连接
    db = pymysql.connect(host="localhost",
                         user="root",
                         password="123456",
                         database="gaoyi_news")

    # 使用 cursor() 方法创建一个游标对象 cursor
    cursor = db.cursor()

    # # 使用 execute()  方法执行 SQL 查询
    # cursor.execute("SELECT VERSION()")

    # # 使用 fetchone() 方法获取单条数据.
    # data = cursor.fetchone()

    # print("Database version : %s " % data)

    #转utf-8
    #content = content.encode('utf-8')
    #print(content)
    #result = chardet.detect(content)
    #print(result)

    # SQL 插入语句
    sql = """INSERT INTO tbl_news(title_id, sort_id, title,image_address, synopsis,create_time,content) VALUES(null,null,'{}','{}','{}',NOW(),'{}')
    """.format(title, imageAddress, synopsis, content)

    #print(sql)

    #sql = "INSERT INTO tbl_news(INSERT INTO tbl_news(title_id, sort_id, title,image_address, synopsis,create_time,content) VALUES(null,null,%s,%s,%s,NOW(),%s)" % (title, imageAddress, synopsis, 'content')
    try:
        # 执行sql语句
        cursor.execute(sql)
        # 提交到数据库执行
        db.commit()
        print('插入成功')
    except Exception as e:
        # 如果发生错误则回滚
        db.rollback()
        print(e)

    # 关闭数据库连接
    db.close()


def save_txt(*args):
    for i in args:
        with open('_chinaagv.txt', 'a', encoding='utf-8') as f:
            f.write(i)


def get_pages():
    row_url = 'https://www.chinaagv.com/news/search.php?keyword=AGV'
    row_html = download_page(row_url)
    soup = BeautifulSoup(row_html, 'html.parser')
    rowinfo = soup.find('div',
                        class_='w current').find('span',
                                                 class_='totalrow').get_text()
    str_arr = rowinfo.partition('/')
    total_pages = str_arr[2]
    total_pages = total_pages[:len(total_pages) - 1]
    print(total_pages)
    return total_pages


def main():
    # 我们点击下面链接，在页面下方可以看到共有13页，可以构造如下 url，
    # 当然我们最好是用 Beautiful Soup找到页面底部有多少页。

    #save_txt('{}'.format(rowinfo))

    total_pages = get_pages()
    total_pages = 2

    total_pages = int(total_pages)
    for i in range(1, total_pages):
        url = 'https://www.chinaagv.com/news/search.php?page={}&total=2738&keyword=AGV'.format(
            i)
        html = download_page(url)
        get_content(html, i)


if __name__ == '__main__':
    main()
