# -*- coding:UTF-8 -*-
# 开发人员: limenghui
# 开发时间：2024-04-08 14:44
# 文件名称：
# 开发工具：pycharm


import math
import time
from datetime import date, timedelta

import pandas as pd
import requests
from bs4 import BeautifulSoup

h = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest',
    'Content-Type': 'application/json;charset=utf-8'
}
# 定义保存数据的列表
all_lists = []


# 获取数据
def get_content(json_data,chanel,type):
    data = json_data['data']
    print("data:",len(data))
    for tmp in data:
        # 项目标题
        # chanel_type = "_".join([chanel_dic[chanel], type_dic[type]])
        title = tmp['title']
        # title ="_".join([title, chanel_type])
        print(title)
        # 发布时间
        notice_date = tmp['noticeTime']
        if len(notice_date) >= 10:
            notice_date = notice_date[:10]
        # 项目网址
        htmlpath = tmp['htmlpath']
        pro_url = f'http://www.plap.mil.cn/freecms{htmlpath}?noticeType=00102'
        # 中标地点
        regionName = tmp['regionName']
        # 项目类别
        purchaseNature = purchaseNature_dic.get(tmp['purchaseNature'])
        # 正文
        content = tmp['content']
        # 渠道
        soup = BeautifulSoup(content, 'html.parser')
        text = soup.get_text()
        text = '\n'.join(text.split())
        text = text.replace(' ', '')

       # print(title)
        list = [notice_date, regionName, title, pro_url, purchaseNature, text]
        all_lists.append(list)
    time.sleep(1)
    return all_lists


if __name__ == '__main__':
    start = time.time()
    # 定义开始和结束时间
    # start_time = (date.today() + timedelta(days=-7)).strftime("%Y-%m-%d")
    start_time = '2024-09-09'
    end_time = '2024-09-16'
    # end_time = (date.today()+ timedelta(days=-1)).strftime("%Y-%m-%d")
    # 定义项目类别的字典
    purchaseNature_dic = {"1": "物资", "2": "工程", "3": "服务"}
    # 00102:采购结果公示
    # 001051:单一来源公示
    type_dic = {'00102': '采购结果公示', '001051': '单一来源公示'}
    chanel_dic = {'abbb350b-9b39-4a22-a4e3-94f54dffd929': '集中采购(线下采购)',
                  'e0172398-23dd-47be-a092-14c67a92bd4f': '自行采购(线下采购)',
                  'a2399540-ac44-4c68-a612-9bdbdb3634e5': '集中采购(电子采购)',
                  'a9d2cf13-a83a-402e-b432-b5aa1e904409': '自行采购(自采平台)'
                  }
    start_page = 1
    base_url = 'http://www.plap.mil.cn/freecms/rest/v1/notice/selectInfoMoreChannel.do?&siteId=404bb030-5be9-4070-85bd-c94b1473e8de&channel={}' \
               '&currPage={}&pageSize=10&noticeType={}&operationStartTime={}%2000:00:00&operationEndTime={}%2000:00:00'
    for chanel in chanel_dic.keys():
        for type in type_dic.keys():
            try:
                res = requests.get(url=base_url.format(chanel, start_page, type, start_time, end_time), headers=h)
                time.sleep(1)
                json_data = res.json()
                total = json_data['total']
                print(f'{chanel_dic[chanel]}_{type_dic[type]}_{total}')
                pages = math.ceil(total / 10) + 1
                if total == 0:
                    print(f'{chanel_dic[chanel]}_{type_dic[type]}没数据')
                else:
                    for i in range(1, pages):
                        max_attempts = 3
                        wait_time = 5
                        for attempt in range(max_attempts):
                            try:
                                print(f"第{i}页")
                                res_other = requests.get(url=base_url.format(chanel, i, type, start_time, end_time),
                                                         headers=h)
                                if res_other.status_code == 200:
                                    json_other_data = res_other.json()
                                    time.sleep(1)
                                    get_content(json_other_data,chanel,type)
                                    break
                                else:
                                    print(f'Received non-200 response:{res_other.status_code}')
                                    res_other.raise_for_status()
                            except requests.exceptions.HTTPError as e:
                                print(f'HTTPError occurred: {e}, retrying in {wait_time} seconds...')
                            except requests.exceptions.RequestException as e:
                                print(f'RequestException occurred: {e}, retrying in {wait_time} seconds...')
                                time.sleep(wait_time)
                            if attempt >= max_attempts - 1:
                                print('Max attempts reached. Exiting.')
                                break

            except Exception as e:
                pass
    try:
        data = pd.DataFrame(all_lists)
        data.columns = ['发布时间', '项目所在地a', '中标项目', '项目网址', '项目类别', '正文']
        # 【物资】+【服务】
        # 剔除工程类型数据
        data = data[data['项目类别'] != '工程']
        data.drop_duplicates(subset=['中标项目'], keep='first', inplace=True)  # 去重
        data.reset_index(drop=True, inplace=True)
    except Exception as e:
        # 周六日数据不更新 拿到的是空数据
        print(e)
        # 序号、单位、涉及金融、项目标题、公示时间、项目网址
        data = pd.DataFrame(columns=('发布时间', '项目所在地', '中标项目', '项目网址', '项目类别', '正文'))

    path = 'D:/python_demo/CITIC/军队政法/军队采购网数据/'
    data.columns = ['RELEASE_TIME','PROJECT_AREA','PROJECT_NAME','PROJECT_WEB','PROJECT_TYPE','CONTENT']
    data['dt'] = end_time.replace('-', '')
    # data.to_excel(path + '中国军队采购网_{0}.xlsx'.format(end_time.replace('-', '')),
    #             index=False)
    data.to_csv(path + '中国军队采购网_{0}.csv'.format(end_time.replace('-', '')),
                  index=False,encoding = 'utf-8')
    count_time = time.time() - start
    print("="*50)
    print(f"获得数据条数:{data.shape[0]}")
    print(f"共运行{count_time}秒,大概{count_time / 60}分钟")
