#-*- coding:utf-8 -*-
import argparse
import os
import datetime
import subprocess
import traceback

from scrapy import log

TIME_STR = datetime.datetime.now().strftime("%Y%m%d%H")
START_DIR = os.path.split(os.path.realpath(__file__))[0]


def start_crawl(sfile, rdir):
    if not os.path.isdir(rdir):
        exception_info = "result dir:%s is not existed" % rdir
        raise Exception(exception_info)

    with open(sfile) as f:
        cmd_cd = " ".join(["cd", START_DIR])
        for index, line in enumerate(f):
            lst = line.strip().split(":")
            if len(lst) < 2:
                log.msg("line:%s is illegal!" % (index, line.strip()), level=log.ERROR)
                continue
            content_id = lst[0]
            query = "".join(lst[1:])
            directory = rdir + "/" + lst[0] + "/"
            spider_names = ("youku", "sohu", "hunantv")
            try:
                subprocess.check_call(cmd_cd, shell=True)
                for spider_name in spider_names:
                    cmd_crawl = ["/usr/bin/scrapy", "crawl", spider_name, "-a",
                                 "query=%s" % query, "-a", "rdir=%s" % directory,
                                 "-a", "start_time=%s" % TIME_STR,
                                 "-a", "content_id=%s" % content_id]
                    subprocess.check_call(cmd_crawl)
            except Exception:
                log.msg(traceback.format_exc(), level=log.ERROR)


def main():
    parser = argparse.ArgumentParser(description='start scrapy to crawl querys')
    parser.add_argument("source", help="query file path")
    parser.add_argument("rdir", help="result directory path")
    args = parser.parse_args()
    start_crawl(args.source, args.rdir)

if __name__ == "__main__":
    main()
