"""
@Author : 合肥一元数智教育科技有限公司
@Date :  2025/7/9 9:27
@Description : 
 获取大图网数据
    1. 一级页面
         二级页面的链接   预览图地址   图片标题
    2. 二级页面
        素材信息    大小  宽高  作者  等信息
"""
import csv
import random
import time

import requests
import re


class DaTuSpider:
    def __init__(self):
        self.url = 'http://www.daimg.com/photo/tech/list_68_{}.html'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36',
            'Cookie': '__51cke__=; __tins__21295873=%7B%22sid%22%3A%201752021583859%2C%20%22vd%22%3A%208%2C%20%22expires%22%3A%201752023579442%7D; __51laig__=8'
        }

    def get_html(self, url):
        html = requests.get(url, headers=self.headers).content.decode('gbk')
        return html

    def parse_one_html(self, html):
        # 一级页面正则表达式
        one_reg = '<li><a target="_blank" href="(.*?)" .*?><img .*? src="(.*?)" .*?>(.*?)</a></li>'
        r_list = re.findall(one_reg, html, re.S)
        return r_list

    # 解析二级页面的数据
    def parse_two_page(self, r_list):
        all_info = []
        for r in r_list:
            item = {}
            # 添加一节页面信息
            item['two_page_href'] = r[0]
            item['img_src'] = r[1]
            item['title'] = r[2]
            # 获取二级页面
            two_html = self.get_html(r[0])
            two_reg = '<ul class="n_list">.*?<span class="n_listr2">(.*?)</span><span class="n_listr2">(.*?)</span><span class="n_listr2">(.*?)</span><span class="n_listr2">(.*?)</span><span class="n_listr2">(.*?)</span><span class="n_listr2">(.*?)</span>.*?<span class="n_listr2">(.*?)</span>'
            result = re.findall(two_reg, two_html, re.S)
            for r in result:
                item['sc_id'] = r[0].split('：')[1]
                item['sc_update_time'] = r[1].split('：')[1]
                item['sc_size'] = r[2].split('：')[1]
                item['sc_w_h'] = r[3].split('：')[1]
                item['sc_type'] = r[5].split('：')[1]
                all_info.append(item)
        return all_info

    def save_html(self, data):
        # 将数据持久化操csv文件中
        with open('datu2.csv', 'w', encoding='utf-8', newline='') as f:
            dictWriter = csv.DictWriter(f, fieldnames=data[0].keys())
            # 写出csv头部信息
            dictWriter.writeheader()
            dictWriter.writerows(data)

    def run(self):
        for page in range(1, 6):
            url = self.url.format(page)
            html = self.get_html(url)
            result = self.parse_one_html(html)
            two_result = self.parse_two_page(result)
            self.save_html(two_result)
            time.sleep(random.randint(1, 5))


if __name__ == '__main__':
    spider = DaTuSpider()
    spider.run()
