import datetime
import re
import ast
from urllib import parse
import threading
import time
from queue import Queue

import requests
from scrapy import Selector

from csdn_spider.answer.spider import answer_spider, save_answer_mysql
from csdn_spider.author.spider import author_spider, save_author_mysql
from csdn_spider.models import Topic
from csdn_spider.topic.spider import topic_spider, save_topic_mysql

domain = 'https://bbs.csdn.net'


def get_nodes_json(url):
    left_menu_text = requests.get(url).text
    nodes_str_match = re.search("forumNodes:(.*])", left_menu_text)
    if nodes_str_match:
        nodes_str = nodes_str_match.group(1).replace('null', "None").strip()
        nodes_list = ast.literal_eval(nodes_str)
        return nodes_list
    return []


def process_nodes_list(nodes_list):
    # 将json中url提取到list
    for i in nodes_list:
        if ("url" in i) & ("children" not in i):
            yield i['url']
        if "children" in i:
            for child_item in process_nodes_list(i['children']):
                yield child_item


def get_url_list(url):
    node_list = get_nodes_json(url)
    url_list_gen = process_nodes_list(node_list)
    url_list = []
    for item in url_list_gen:
        item = parse.urljoin(domain, item)
        url_list.append(item)
    return url_list


topic_url_queue = Queue()
author_url_queue = Queue()


# 页面线程方法
def base_thread_topic_spider():
    url = "https://bbs.csdn.net/dynamic_js/left_menu.js?csdn"
    author_base_url = 'https://me.csdn.net'
    urls_list = get_url_list(url)
    n = 0
    for url in urls_list:
        gen_topic = topic_spider(url)
        for topic in gen_topic:
            n += 1
            save_topic_mysql(topic)
            topic_url = parse.urljoin(url, topic['topic_url'])
            topic_url_queue.put(topic_url)
            author_url = parse.urljoin(author_base_url, topic['author_id'])
            author_url_queue.put(author_url)
            # print('page_nums:' + str(n))


def base_thread_answer_spider():
    n = 0
    while 1:
        try:
            topic_url = topic_url_queue.get()
        except Exception as e:
            print(e)
            time.sleep(1)
            continue
        gen_answer = answer_spider(topic_url)
        for answer in gen_answer:
            n += 1
            save_answer_mysql(answer)
            # print("answer_nums:" + str(n))


def base_thread_author_spdier():
    n = 0
    while 1:
        if author_url_queue.empty():
            break
        author_url = author_url_queue.get()

        author = author_spider(author_url)
        save_author_mysql(author)
        n += 1
        print('author_nums:' + str(n))


thread_topic_spider = threading.Thread(target=base_thread_topic_spider)
thread_answer_spider = threading.Thread(target=base_thread_answer_spider)
thread_anthor_spider = threading.Thread(target=base_thread_author_spdier)

if __name__ == "__main__":
    thread_topic_spider.start()
    thread_answer_spider.start()
    thread_anthor_spider.start()
