from flask import Flask, request, jsonify
from flask_cors import CORS
from langdetect import detect
import pymongo
from celery import group
import ast
import os

# from scrapy.crawler import CrawlerProcess
# from scrapy.utils.project import get_project_settings
# from spiders.tweet_by_user_id import TweetSpiderByUserID
# from spiders.tweet_by_keyword import TweetSpiderByKeyword
# # from spiders import CommentSpider
# from spiders.follower import FollowerSpider
# from spiders.user import UserSpider
# from spiders.fan import FanSpider
# from spiders.repost import RepostSpider

app = Flask(__name__)
CORS(app, supports_credentials=True)
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["DetectionSystemNew"]
print(db)


# os.environ['SCRAPY_SETTINGS_MODULE'] = 'settings'


@app.route('/spider_data_with_keywords', methods=['GET'])
def spider_data_with_keywords():
    from task import spider_data
    topic_name = request.args.get("topic_name")
    keywords_list = ast.literal_eval(request.args.get("keywords"))
    start_time = request.args.get("start_time")
    end_time = request.args.get('end_time')
    max_count = request.args.get('max_count')
    is_split_by_hours = request.args.get("is_split_by_hours")
    # settings = dict(get_project_settings())
    # print(settings)
    request_dict = {
        "mode": 'tweet_by_keyword',
        'keywords_list': keywords_list,
        "start_time": start_time,
        "end_time": end_time,
        'topic_name':topic_name,
        'max_count': max_count,
        # "settings":settings,
        "is_split_by_hours": 0
    }
    spider_data.delay(request_dict)
    return request_dict, 200


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=True, use_reloader=False)
