# -*- coding:utf-8 -*-
import re

from concurrent.futures import ThreadPoolExecutor as Pool

import requests, pymysql
from requests.exceptions import ConnectionError


class Spider():
    """国外网站爬虫"""

    def __init__(self):
        self.base_url = 'https://www.healio.com'
        self.headers = {
            'user-agent': 'Mozilla / 5.0(X11;Linuxx86_64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 68.0.3440.75Safari / 537.36'
        }
        self.conn= pymysql.connect(host="localhost", user="root", password="123456", db="little_pig", port=3306, charset='utf8')
        self.cursor = self.conn.cursor()

    def get_pages(self, num):
        url = self.base_url + '/news/headlines?startItem={}'.format(num)
        try:
            response = requests.get(url, headers=self.headers, verify=True)
            if response.status_code != 200:
                return 404
            elif response.status_code == 200:
                print('正在爬取url:' + url)
                self.parse_pages(response.text)
            else:
                self.get_pages(num)
        except ConnectionError:
            self.get_pages(num)

    def parse_pages(self, data):
        items = re.findall(r'<div class=story.*?<span class=kicker>(.*?)</span>.*?<h2><a href=\'(.*?)\'>(.*?)</a></h2>.*?<h6 class=date>(.*?)</h6>.*?<div class=excerpt>(.*?)</div>', data, re.S)
        self.save_pages(items)

    def save_pages(self, items):
        sql = 'insert into foreign_news values ("{}", "{}", "{}", "{}", "{}")'
        for item in items:
            try:
                self.cursor.execute(sql.format(item[0], self.base_url + item[1], item[2], item[3], item[4]))
                self.conn.commit()
            except Exception:
                self.conn.rollback()

    def run(self):
        # nums = [x*20 for x in range(10)]
        # pool = Pool()
        for i in range(10):
            self.get_pages(i * 20)
        # pool.map(self.get_pages, nums)


if __name__ == '__main__':
    spider = Spider()
    spider.run()
