# -*- coding = utf-8 -*-
# @Time : 2023/5/5 11:11
# @Author :Henry L
# @File : YN.py
# @Software : PyCharm
import csv
import time
import requests
from bs4 import BeautifulSoup


def downloadfile(_url, _type, output_path, chunk_size=512):
    if _type == 'get':
        response = requests.get(url=_url, headers=headers, stream=True)
    else:
        response = requests.post(url=_url, headers=headers, stream=True)
    with open(output_path, mode='wb') as f:
        for chunk in response.iter_content(chunk_size):
            f.write(chunk)

def fileName(_name):
    if "xlsx" in _name:
        return "xlsx"
    elif "docx" in _name:
        return "docx"
    elif "doc" in _name:
        return "doc"
    elif "xls" in _name:
        return "xls"
    elif "pdf" in _name:
        return "pdf"
    elif "rar" in _name:
        return "rar"
    elif "http" in _name:
        return "html"

host = "http://www.ynyyzb.com.cn"
url = "http://www.ynyyzb.com.cn/showListZCFG.html?catalogId=4&type=&pageNow="
headers = {
    "Accept": "*/*",
    "Accept-Encoding": "gzip, deflate",
    "Connection": "keep-alive",
    "Host": "www.ynyyzb.com.cn",
    "Upgrade-Insecure-Requests": '1',
    "Referer": "http://www.ynyyzb.com.cn/showListZCFG.html?catalogId=3",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.58",
    "x-requested-with": "XMLHttpRequest",
    "Cookie": "JSESSIONIDSCHEDULE=Ixvp5CziQTx02NyLj2ShdOjXCHqkG0dH6rLZp1AjQ1qYP7mVSPDc!740810076",
    "Origin": "http://www.ynyyzb.com.cn"
}
for num in range(1, 73):
    response = requests.get(url + str(num), headers=headers)
    soup = BeautifulSoup(response.content, 'lxml')
    with open('YN.csv', 'a+', encoding='utf-8', newline='') as file:
        writer = csv.writer(file)
        print(num)
        for title in soup.find_all(class_='newscontentleft'):
            source = '云南省政府采购和出让中心'
            release = ''
            view_time = ''
            href = title.find('a')['href']
            attachments = []
            detail_response = requests.get(url=host + href, headers=headers)
            detail = BeautifulSoup(detail_response.content, 'lxml')
            content = detail.find(class_="content")
            # print(content)
            content_ = ''
            for p in content.find_all("p"):
                # content_ = content_ + p.text.replace('\r', '').replace('\n', '').replace(u'\xa0', '') + '\n'
                attachment = p.find_all("a")
                if len(attachment) > 0:
                    for attachment_ in attachment:
                        if "附件" not in attachment_.text:
                            continue
                        fileType = fileName(attachment_.text.lower())
                        if fileType is None:
                            continue
                        if "http://m.ynyyzb.com.cn/" in attachment_['href'] or host == attachment_['href'] or "www.gdmede.com.cn" in attachment_['href']:
                            continue
                        # if host not in attachment_['href']:
                            # downloadfile(host + attachment_['href'], "get", attachment_['href'][-36:-1] + '.' + fileType)
                        # else:
                            # downloadfile(attachment_['href'], "get", attachment_['href'][-36:-1] + '.' + fileType)
                        attachments.append(attachment_['href'][-36:-1] + '.' + fileType)
            newsmess = content.find_all(class_='newsmess')
            if len(newsmess) == 3:
                source = newsmess[0].text.replace('信息来源：', '').replace('\r', '').replace('\n', '').replace(' ', '')
                release = newsmess[1].text.replace('发布时间：', '').replace('\r', '').replace('\n', '').replace(' ', '')
                view_time = newsmess[2].text.replace('阅读次数：', '').replace('\r', '').replace('\n', '').replace(' ', '')
            name = title.find('a').text.replace('\r', '').replace('\n', '').replace(' ', '')
            writer.writerow([href, name, content, source, release, view_time, attachments])
    time.sleep(1)


