import requests
import time
import csv

from query_hospital_address import get_address



class HospitalCrawler:

    def __init__(self):
        # 等待保存的数据
        self.data = None
        # 数据保存在Hospital_data.csv
        self.file = None
        # 指定爬虫要爬取的湘医保的URL地址
        self.url = 'https://healthcare.hnybj.com.cn/pw-fixd/hosp/queryHospitalList'
        # 设置请求头
        self.headers = {
            # 'Cookie': 'SESSION=MjRhNjZiNmMtNzgzZS00N2Q5LTk1NDktYzlmMWUwZWZmODc4',
            'Origin': 'https://healthcare.hnybj.com.cn',
            'Referer': 'https://healthcare.hnybj.com.cn/pss-hunan-h5/mechanism/LocationHospitalIndex?stack-key=1d9225b8',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47',
            'channel': 'app',
        }
        # 设置POST请求的json_data负载
        self.json_data = {
            'fixBlngAdmdvs': '',
            'fixmedinsName': '',
            'fixmedinsCode': '',
            'pageNum': 3215,  # 2251出错点
            'pageSize': 10,
            'hospLv': '',
            'sortRule': '1',
        }
        self.hospLv_dict = {
            '01': '三级特等',
            '02': '三级甲等',
            '03': '三级乙等',
            '04': '三级丙等',
            '05': '二级甲等',
            '06': '二级乙等',
            '07': '二级丙等',
            '08': '一级甲等',
            '09': '一级乙等',
            '10': '一级丙等',
            '11': '无等级',
            '12': '三级无等',
            '13': '二级无等',
            '14': '一级无等',
        }
        self.page_set = set()

    def get_data(self):
        """爬取数据"""
       
        resp = requests.post(url=self.url, headers=self.headers, json=self.json_data)
        self.data = resp.json()
        self.parse_data()
        

    def parse_data(self):
        """解析数据"""
        lst = []
        
        nextPage = self.data['data']['nextPage']
        pages = self.data['data']['pages']
        

        # 解析并下载
        if not self.json_data['pageNum'] in self.page_set:
            try:
                for hospital in self.data['data']['list']:
                    fixmedinsName = hospital['fixmedinsName']
                    hospLv = hospital['hospLv']
                    address = get_address(fixmedinsName)
                    print(fixmedinsName, self.hospLv_dict[hospLv], address)
                    lst.append([fixmedinsName, self.hospLv_dict[hospLv], address])
                    self.save_data(lst)
                    self.page_set.add(nextPage)
                    print(f'nextPage={nextPage}')
            except:
                pass
        else:
            print(f'{self.json_data["pageNum"]}已爬取')


        if nextPage:  # 不断爬取
            # time.sleep(1)  # 防止爬取速度过快
            self.json_data['pageNum'] = nextPage
            return 0
        else:
            return 1

        

    def save_data(self, lst):
        """保存数据"""
        csv_writer = csv.writer(self.file)
        csv_writer.writerows(lst)

    def run(self):
        """执行爬虫"""
        self.file = open('Hospital_data2.csv', 'a', encoding='utf-8-sig', newline='')
        # 循环爬取全部数据
        while True:
            self.get_data()
            if self.parse_data():
                break

        self.file.close()

if __name__ == '__main__':
    spider = HospitalCrawler()
    spider.run()
