import requests
from bs4 import BeautifulSoup
import time
from lxml import etree
import os

local = time.strftime("%Y_%m_%d")
headers = {
    "User-Agent":
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"
}

base_url = 'http://www.gzdjzn.com/'


def download_page(url):
    r = requests.get(url, headers=headers)
    return r.text


# 获取图片
def download_img(picUrl, title):
    read = requests.get(picUrl, headers=headers)
    #img_name = title[:4]
    #f = open('%s.jpg' % img_name, 'wb')
    f = open('%s' % title, 'wb')
    f.write(read.content)
    f.close()


# 页码 模块 标题 简介 日期
def get_content(base_url, page):

    html = download_page(base_url)

    output = """
第{}页
    型号：{}
    图片：{}
    简介：{}
    详情：{}

------------------------------------

"""

    soup = BeautifulSoup(html, 'html.parser')
    products = soup.find_all('ul', class_='product_video_ul left clear')
    for project in products:

        con_list = project.find_all('li')

        for i in con_list:
            url = i.find('a')['href']
            url = base_url + url
            name = i.find('a').find(
                'div', class_='product_video_content').find('h3').get_text()
            introduction = i.find('a').find(
                'div', class_='product_video_content').find('p').get_text()

            detail_html = download_page(url)
            detail_soup = BeautifulSoup(detail_html, 'html.parser')
            text = detail_soup.find('div', class_='main_details_js article')

            temp = etree.HTML(text.text)
            details = temp.xpath('string(.)')

            # 打印文件目录
            try:
                file_path = os.getcwd() + "\\image\\" + name + '\\'
                print(file_path)
                # 判断是否已经存在该目录
                if not os.path.exists(file_path):
                    # 目录不存在，进行创建操作
                    os.makedirs(file_path)  #使用os.makedirs()方法创建多层目录
                    #print("目录新建成功：" + file_path)
                else:
                    print("目录已存在！！！")
            except BaseException as msg:
                print("新建目录失败：" + msg)

            #image = file_path + local + '.jpg'
            image = ''
            try:
                #下载图片
                picUrl_list = detail_soup.find('li',
                                               id='onlickImg').find_all('img')
                for j in picUrl_list:
                    picUrl = j['data-original']
                    picUrl = base_url.lstrip('/') + picUrl
                    print(picUrl)
                    pic_name = local + '_' + os.path.basename(picUrl)
                    download_img(picUrl, file_path + pic_name)
                    picUrl += ';'
                    image += picUrl
            except BaseException:
                image = ''

            save_txt(output.format(page, name, image, introduction, details))


def save_txt(*args):
    for i in args:
        with open('_gzdjzn.com.txt', 'a', encoding='utf-8') as f:
            f.write(i)


def get_pages():
    row_url = 'https://www.chinaagv.com/news/search.php?keyword=AGV'
    row_html = download_page(row_url)
    soup = BeautifulSoup(row_html, 'html.parser')
    rowinfo = soup.find('div',
                        class_='w current').find('span',
                                                 class_='totalrow').get_text()
    str_arr = rowinfo.partition('/')
    total_pages = str_arr[2]
    total_pages = total_pages[:len(total_pages) - 1]
    print(total_pages)
    return total_pages


def main():
    # 我们点击下面链接，在页面下方可以看到共有13页，可以构造如下 url，
    # 当然我们最好是用 Beautiful Soup找到页面底部有多少页。

    # save_txt('{}'.format(rowinfo))

    # total_pages = get_pages()
    total_pages = 2

    # total_pages = int(total_pages)
    for i in range(1, total_pages):
        get_content(base_url, i)


if __name__ == '__main__':
    main()
