import argparse
import os
import time
from datetime import datetime
from urllib.parse import urlparse

import requests

headers = {
    'Connection': 'keep-alive',
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
}


class Spider(object):
    def __init__(self):
        self.start = None
        self.end = None
        self.page = 1
        self.size = 100
        self.sleep = 5
        self.token = None
        self.poi_id = 'test01'
        self.tenant_code = 'SED'
        self.exists = []
        self.load()

    def load(self):
        """
        加载已下载的数据
        Returns:

        """
        with open('download.txt', 'r') as file:
            self.exists = [line.rstrip('\n') for line in file.readlines()]
            print(self.exists)

    def list(self):
        list_query_url = 'https://api.deepeleph.com/api/v1/anti-theft/trade_session/query'
        response = requests.post(list_query_url, headers=headers,
                                 json={
                                     "poiId": self.poi_id,
                                     "tenantCode": self.tenant_code,
                                     "startTimeFromEq": self.start,
                                     "startTimeToEq": self.end,
                                     "pageNo": self.page,
                                     "pageSize": self.size,
                                     "token": self.token
                                 })
        results = response.json()
        if results.get('success', False) and results.get('data'):
            data = results.get('data')
            if data:
                _pages = data.get('pages')
                _items = data.get('items')
                return _pages, _items
        return 1, []

    def get_video(self, item):
        video_query_url = 'https://api.deepeleph.com/api/v1/anti-theft/trade_session/trade_logs'
        response = requests.post(video_query_url, headers=headers,
                                 json={
                                     "poiId": self.poi_id,
                                     "tenantCode": self.tenant_code,
                                     "transactionId": item['transactionId'],
                                     "token": self.token
                                 })
        results = response.json()
        if results.get('success', False) and results.get('data'):
            data = results.get('data')
            if data:
                return data.get('videoUrl')
        return None

    def download(self, items):
        for item in items:
            transaction_id = item['transactionId']
            if not transaction_id:
                print('No transaction, do next…………')
                continue
            if transaction_id in self.exists:
                continue
            # 视频网址
            _video_url = self.get_video(item)
            if _video_url:
                response = requests.get(_video_url, headers=headers, stream=True)
                filename = self.get_filename(_video_url)
                file_path = os.path.join('videos', filename)
                if not os.path.isfile(file_path):
                    with open(file_path, "wb") as mp4:
                        for chunk in response.iter_content(chunk_size=1024 * 1024):
                            if chunk:
                                mp4.write(chunk)
                else:
                    print('video already download by this transaction:', transaction_id)
                with open('download.txt', 'a') as file:
                    file.write(f'{transaction_id}\n')
            else:
                print('No video found by this transaction:', transaction_id)
            print(f"finished downloading {transaction_id}")
            # 睡眠时间
            time.sleep(self.sleep)

    @staticmethod
    def get_filename(_video_url):
        parsed_url = urlparse(_video_url)
        path = parsed_url.path
        return path.split('/')[-1]


def parse_datetime(date_time):
    try:
        return datetime.strptime(date_time, '%Y-%m-%d %H:%M:%S')
    except ValueError:
        msg = "Not a valid date: '{0}'. Format should be YYYY-MM-DD HH:MM:SS."
        raise argparse.ArgumentTypeError(msg.format(date_time))


def default_start_time():
    now = datetime.now().date()
    return datetime.combine(now, datetime.min.time())


def parse_args():
    """
    命令行参数定义
    Returns:

    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--start', type=parse_datetime, default=default_start_time(), help='设置查询开始时间')
    parser.add_argument('--end', type=parse_datetime, default=datetime.now(), help='set end time')
    parser.add_argument('--page', type=int, default=1, help='current page number')
    parser.add_argument('--size', type=int, default=100, help='current page size')
    parser.add_argument('--sleep', type=int, default=10, help='download interval time')
    parser.add_argument('--token', type=str, required=True, help='api token')
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()
    spider = Spider()
    spider.start = int(args.start.timestamp()) * 1000
    spider.end = int(args.end.timestamp()) * 1000
    spider.page = args.page
    spider.size = args.size
    spider.sleep = args.sleep
    spider.token = args.token
    total_page, current_items = spider.list()
    while len(current_items) > 0:
        print("total page:", total_page, "start download page: ", spider.page)
        spider.download(current_items)
        print("total page:", total_page, "finished download page: ", spider.page)
        spider.page = spider.page + 1
        _, current_items = spider.list()
