import requests
import os
import json
import sys
import time
import re

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
sys.path.append(BASE_DIR + r"\sql")
import mongo


def get_html(url, param):
    html = requests.post(url, param)
    html.encoding = html.apparent_encoding
    return html.text


def parser_data(data_str):
    data_list = json.loads(data_str)["data"]["list"]

    data_result = []
    if len(data_list) < 1:
        print("数据不存在！")
        return []
    for a in data_list:
        dr = re.compile(r'<[^>]+>', re.S)
        result = {"classify": a["classifyName"], "summary": a["summary"], "tags": a["tags"], "title": a["title"],
                  "details": dr.sub('', a["details"]).replace("\n", "")}
        data_result.append(result)
    return data_result


if __name__ == "__main__":
    url = "http://open.zhongyi9999.com/lts/viewPoint/getLocationLevelPageList"
    classifyId = ["5c774e9fd654f4550807c929", "5c763055d654f4550807c8d8", "5c774ea9d654f4550807c92a",
                  "5c774eaed654f4550807c92b", "5c99d558d654f47d230302b5",
                  "5c7e1a3bd654f4550807c994", "5c774eb2d654f4550807c92c"]
    param = {"locationLevel": "",
             "classifyId": "5c774ea9d654f4550807c92a",
             "pageSize": "100",
             "pageNum": "1",
             "type": "",
             "status": "0",
             "siteId": "123504"}
    for a in classifyId:
        param["classifyId"] = str(a)
        for i in range(1, 10000):
            param["pageNum"] = str(i)
            data_str = get_html(url, param)
            n = parser_data(data_str)
            if len(n) < 1:
                break
            mongo.insertMany(n)
            time.sleep(3)
