#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2020/12/25 11:14
# @Author  : Andrewq
# @Site    : nope
# @File    : w3school_spider.py
# @Software: PyCharm
import json
import random
import requests
from bs4 import BeautifulSoup
import bs4
from spider_app import config
from spider_app.models.model import InformationCode, InformationInfo


def code_spider(url):
    url_list = []
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.text, "html5lib")
    study_block = soup.find('div', id="course")
    study_urls = study_block.find_all('li')
    for i, study_url in enumerate(study_urls):
        print('正在处理第{0}条code数据'.format(i))
        result_dict = InformationCode.add_code_info(**{"codeName": study_url.text.strip(),
                                                       "parentCodeID": "20201225132122"})
        if result_dict.get("code") != "200":
            return result_dict
        url_list.append((result_dict.get('data'), base_url + study_url.find('a').attrs.get('href')[8:]))
    return url_list


def text_spider(code_url):
    response = requests.get(code_url[1], headers=headers)
    soup = BeautifulSoup(response.text, "html5lib")
    main_content = soup.find('div', id='maincontent')
    contents = main_content.find_all('div', id='')
    text_list = []
    for content in contents:
        for item in content.contents:
            if isinstance(item, bs4.element.NavigableString):
                continue
            text_list.append((item.name, item.text.strip()))
    text_json = json.dumps(text_list, ensure_ascii=False)
    InformationInfo.add_detail_info(**{"title": code_url[0].get("codeName"),
                                       "text": text_json,
                                       "parentCodeID": code_url[0].get("codeID")})
    return text_json


if __name__ == '__main__':
    headers = {
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Accept-Encoding': 'gzip, deflate, compress',
        'Accept-Language': 'en-us;q=0.5,en;q=0.3',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'User-Agent': config.HEAD_LIST[random.randint(0, 20)]
    }
    base_url = "https://www.w3school.com.cn/python/"
    first_url = base_url + "index.asp"
    code_url_list = code_spider(first_url)
    for i, code_url in enumerate(code_url_list):
        print('正在处理第{0}条content数据'.format(i))
        text_spider(code_url)
    # text_spider(({'codeID': '20201225132539883447', 'codeName': 'Python 教程', 'parentCodeID': '20201225132122'},
    #              'https://www.w3school.com.cn/python/index.asp'))
    print("爬取完成")
