#!/usr/bin/python3
# -*- coding: utf-8 -*-


'''
分析:
真实 url地址: https://www.neihanx.com/e/action/ListInfo/?classid=11&page={页码}
爬虫流程

列表页 -> 提取详情页url -> 详情页 -> 提取数据

'''

from pprint import pprint
import requests
import re

import html

class Neihan8Spider(object):

    def __init__(self):
        self.base_url= "https://www.neihanx.com"
        self.list_page_base_url = "https://www.neihanx.com/e/action/ListInfo/?classid=11&page={}"
        self.headers = {
            "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
        }
        pass

    def save_content(self,content):
        # 编写插入数据库

        print(content)

        pass

    def run(self):


        # 提取列表页中的详情页的url地址的正则表达式
        detail_urls_pattern = re.compile(r'<a href="(.*?)" class="title" title')

        # 初步提取内容的正则表达式
        # 注意也要匹配到换行符
        detail_content_pattern = re.compile(r'<div class="detail">(.*?)<div class="tag-share line">',re.DOTALL)

        # 精准提取数据
        detail_parts_prttern = re.compile(r'<p>(.*?)</p>')

        # 1. 生成 列表页 列表
        list_page_urls = [self.list_page_base_url.format(page) for page in range(1094)]

        # 2. 循环获取列表页
        for list_page_url in list_page_urls:
            list_page_response = requests.get(list_page_url,headers = self.headers)
            # 获取列表页 html
            list_page_html = list_page_response.content.decode('utf-8')

            # 从 html 中提取详情页链接
            detail_urls = detail_urls_pattern.findall(list_page_html)

            # 访问想页
            for detail_url in detail_urls:
                url = self.base_url + detail_url
                # 获取详情页
                detail_response = requests.get(url,headers=self.headers)
                # 获取详情页 html
                detail_html = detail_response.content.decode('utf-8')

                # 把提取数据方式分成两部分
                # 1. 初步提取
                content = detail_content_pattern.findall(detail_html)
                if content is not None and len(content) > 0:
                    content = content[0]
                    # 2. 精准提取
                    parts = detail_parts_prttern.findall(content)

                    content_string = ""

                    for part in parts:
                        # 内部包含 html 转义字符,放置特殊符号导致 html 结构发生紊乱
                        part = html.unescape(part)
                        part = part.strip()
                        content_string += part + "\n"
                    self.save_content(content_string)

                # break

            # break

        pass

if __name__ == '__main__':
    spider = Neihan8Spider()
    spider.run()