from threading import Thread

from flask import Flask, jsonify
from scrapy.crawler import CrawlerRunner
from scrapy import signals
from scrapy.crawler import CrawlerProcess
from scrapy.signalmanager import dispatcher
from twisted.internet import reactor, defer
from twisted.internet.task import react
from scrapy.utils.project import get_project_settings
# 导入BlogSpider类
# from qaScrap.qaScrap.spiders.test_spider import BlogSpider
app = Flask(__name__)
process = CrawlerProcess(get_project_settings())
@app.route('/')
def helo_world():
    return {
        "name":'demo',
        "content":'Hello World!!!@@@@'
    }

@app.route('/next',endpoint='next_word')
def next():
    return {
        "name":'demo',
        "content":'Hello World!!!@@@@'
    }

@app.route('/json')    
def get_json():
    return jsonify({
        "name":'demo',
        "content":'Hello World!!!@@@@'
    })

# @app.route('/scraper')
# def scraper():
#     scrape_data = []
#
#     def _crawler_result(item, response, spider):
#         scrape_data.append(dict(item))
#
#     dispatcher.connect(_crawler_result, signal=signals.item_scraped)
#
#     @defer.inlineCallbacks
#     def crawl(reactor):
#         runner = CrawlerRunner()
#         yield runner.crawl(BlogSpider, start_urls=["https://product.pconline.com.cn/mobile/apple/2359779_detail.html"])
#         reactor.stop()
#
#     def run_crawler():
#         react(crawl)
#
#     thread = Thread(target=run_crawler)
#     thread.start()
#     thread.join()
#
#     return jsonify(scrape_data)

if __name__ == '__main__':
    app.run(port=9527,debug=True)