from __future__ import absolute_import

import networkx as nx
import os
from app import db
from celery_app import celery
from bson import objectid
from xgboost import XGBClassifier
from transformers import RobertaTokenizer, RobertaModel, AutoModel, AutoTokenizer
import torch

import time
from tqdm import tqdm
import numpy as np
import warnings
from datetime import datetime
import sys

warnings.filterwarnings("ignore")
# os.environ['SCRAPY_SETTINGS_MODULE'] = 'settings'

os.environ['SCRAPY_SETTINGS_MODULE'] = 'settings'


@celery.task
def spider_data(request):
    sys.path.append('F:\smds_backhend')

    from scrapy.crawler import CrawlerProcess
    from scrapy.utils.project import get_project_settings
    from spiders.tweet_by_user_id import TweetSpiderByUserID
    from spiders.tweet_by_keyword import TweetSpiderByKeyword
    # from spiders import CommentSpider
    from spiders.follower import FollowerSpider
    from spiders.user import UserSpider
    from spiders.fan import FanSpider
    from spiders.repost import RepostSpider
    print(request)
    # current_path = os.getcwd()
    # print(current_path)
    settings = get_project_settings()
    settings_dict = dict(settings)
    settings_dict['MONGO_DATABASE'] = request['topic_name']
    process = CrawlerProcess(settings_dict)
    mode_to_spider = {
        # 'comment': CommentSpider,
        'fan': FanSpider,
        'follow': FollowerSpider,
        'user': UserSpider,
        'repost': RepostSpider,
        # 'tweet_by_tweet_id': TweetSpiderByTweetID,
        'tweet_by_user_id': TweetSpiderByUserID,
        'tweet_by_keyword': TweetSpiderByKeyword,
    }
    process.crawl(mode_to_spider[request['mode']], keyword=request['keywords_list'], start_time=request['start_time'],
                  end_time=request['end_time'], is_split_by_hour=request['is_split_by_hours'], max_count=int(request['max_count']))
    print("start！！！！")
    process.start()
    return 1

