import json
import sys

sys.path.append('/root/qvenv')
import requests
import redis
import schedule
import time
from get_headers import get_headers
from pd2db import Pd2DB
from urllib import parse


class SpiderIndia:
    header = '''
    :authority: www.mca.gov.in
    :method: GET
    :path: /content/mca/global/en/data-and-reports/company-statistics/indian-foreign-companies-llps/total-companies-registered.html
    :scheme: https
    pragma: no-cache
    referer: https://www.mca.gov.in/content/mca/global/en/data-and-reports/company-statistics/indian-foreign-companies-llps/total-companies-registered.html
    sec-ch-ua: " Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"
    sec-ch-ua-mobile: ?0
    sec-ch-ua-platform: "Windows"
    sec-fetch-dest: document
    sec-fetch-mode: navigate
    sec-fetch-site: same-origin
    sec-fetch-user: ?1
    upgrade-insecure-requests: 1
    user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36
    '''
    tmp_docIDs = ['8ndEdgOE0Wwks4FauFrWQw%3D%3D', 'pfJYCCXPkadaVjoye%2Fba2Q%3D%3D', '3CLpjZ3f5Vla%2BBIpMeHzpw%3D%3D',
                  'Rf0SiQJkt1ZqpJzzppR%2Ftw%3D%3D', '4xuisEanVIlunqFhKqkHDA%3D%3D', 'rg0Y2bbgmyP1r2ZlSkQFQQ%3D%3D',
                  'kqLGPURbZgSkHoKx%2Bkxe5A%3D%3D', 'jWUSAUDYxnHfedNCw4eXKg%3D%3D', 'xjzqlfQucNmCkvsxf2UJiw%3D%3D',
                  'ZM7G34HE3La9kyXOmAN3DQ%3D%3D', 'D9iGgtrCss%2Fzp%2FvLrS04hQ%3D%3D', '1wqbhjpURvk6og%2FMydi6IA%3D%3D',
                  'zrcsFEvW0rHM0WXqFVf8TQ%3D%3D', 'F3HdgGxidAOxKgZAuudyJQ%3D%3D', 'fOb9kg5Uz%2BGtVSlaU9V5%2BQ%3D%3D',
                  '2goIgwaopbHxnW1ZpXZkjw%3D%3D', 'bcBHUbmJhqkej%2BBj3zU5sg%3D%3D', 'eUXdq1SmIEoIzfy6l3b%2FCg%3D%3D',
                  '7DK1%2FjS6EnxXZHaZQbUAyg%3D%3D', 'dsgT4hm62EZzKDmMBvEung%3D%3D', 'ieZ3Y1LLGyUpAkeKjBGj8A%3D%3D',
                  'i5y%2F9ulox714EDic9niJfQ%3D%3D', 'W1%2B%2FPZDyrKmS7COQ3j30nQ%3D%3D', 'm1ycidLbYacNs5x8alBEDg%3D%3D',
                  't3EFYYahzSqbBD6nHUWC7A%3D%3D', 'K0ODYiGcmNSSKDJ6BBhrhA%3D%3D', 'psy2tu0b%2BJfc38aC%2BmgO5A%3D%3D',
                  'cyegdTNXAi7nbaEDh2481w%3D%3D', 'o7PYiO6Uy63CFG%2FumjLi%2Fg%3D%3D', '3OGpwLhau6chfIPS%2FYHFUQ%3D%3D',
                  'hb5k%2BsolsaNOhxjupdh%2Ffg%3D%3D', '72PGcEW2ZHkX%2BauzBzTyrA%3D%3D', 'Qab0httqh4zw6n0AY9Di7w%3D%3D',
                  'oZTsfv5wkcHD%2FQtVtmpfQQ%3D%3D', 'cm1wB2oUubhZBHTPAjT1pg%3D%3D']
    tmp_docs = [{'tName': 'Indian Companies_LLP_Foreign Companies register in December 2021',
                 'docID': '4nouBNAPaQtdxLMFkTO%2BNw%3D%3D'},
                {'tName': 'Master Details of Companies/LLPs Registered - October, 2021',
                 'docID': 'W8fxmtx6kJaBYjHrSxJ9dw%3D%3D'}]
    # 创建redis链接对象
    db = redis.Redis(host='gz-crs-3v22wfs1.sql.tencentcdb.com', port=23148, password='j23QdKGB3js2',
                     decode_responses=True, db=11)
    setName = 'indiaDB'

    def __init__(self):
        self.now_url = 'https://www.mca.gov.in/bin/dms/searchDocList?page={}&perPage=10&sortField=Date&sortOrder=D&searchField=Title&searchKeyword=&startDate=&endDate=&filter=&dialog=%7B%22folder%22%3A%22403%22%2C%22language%22%3A%22English%22%2C%22totalColumns%22%3A3%2C%22columns%22%3A%5B%22Title%22%2C%22Month%22%2C%22Year%20of%20report%22%5D%7D'
        self.history_url = 'https://www.mca.gov.in/bin/dms/searchDocList?page={}&perPage=5&sortField=Date&sortOrder=D&searchField=Title&searchKeyword=&startDate=&endDate=&filter=&dialog=%7B%22folder%22%3A%22407%22%2C%22language%22%3A%22English%22%2C%22totalColumns%22%3A2%2C%22columns%22%3A%5B%22Title%22%2C%22Date%22%5D%7D'
        # self.base_url = 'https://www.mca.gov.in/content/mca/global/en/data-and-reports/company-statistics/indian-foreign-companies-llps/total-companies-registered.html'
        self.headers = get_headers(self.header)

    def down_file(self, docs):
        for dic in docs:
            docID = parse.quote(dic['docID'])
            download_url = "https://www.mca.gov.in/bin/dms/getdocument?mds=" + docID + "&type=download"
            c = requests.get(download_url, headers=self.headers).content
            # with open(f'D:/Download/India/{dic["tName"].replace("/", "-")}.xlsx', 'wb') as f:
            #     f.write(c)
            with open(f'/root/qvenv/download/india_data/{dic["tName"].replace("/", "-")}.xlsx', 'wb') as f:
                f.write(c)

            print(f'{dic["tName"].replace("/", "-")}.xlsx已下载完毕.')

    def get_docID(self, url):
        content = requests.get(url.format(1), headers=self.headers).content.decode()
        dic = json.loads(content)
        content_li = eval(dic['documentDetails'])
        print(f'一页{len(content_li)}个')
        # print(content_li)
        docs = []
        for i in content_li:
            docIDs = {}
            docIDs['tName'] = i['column1'] if not i['column1'].endswith('.xlsx') else i['column1'].replace('.xlsx', '')
            docIDs['docID'] = i['docID']
            self.db.sadd(self.setName, i['docID'])
            docs.append(docIDs)

        # 获取总页数
        total = dic['totalResults']
        total_page = total // len(content_li) if total % len(content_li) == 0 else total // len(content_li) + 1
        print(f"一共{total_page}页")

        for i in range(2, total_page + 1):
            resp = requests.get(url.format(i), headers=self.headers).content.decode()
            dic = json.loads(resp)
            li = eval(dic['documentDetails'])
            for i in li:
                docIDs = {}
                docIDs['tName'] = i['column1']
                docIDs['docID'] = i['docID']
                self.db.sadd(self.setName, i['docID'])
                print(f'已将{i["docID"]}加入到队列中')
                docs.append(docIDs)

        return docs

    def check_update(self, ):
        print('checking...')
        content = requests.get(self.now_url.format(1), headers=self.headers).content.decode()
        dic = json.loads(content)
        content_li = eval(dic['documentDetails'])
        newData = content_li[0]
        newDocID = newData['docID']
        # 判断最新的一条数据是否存在于队列中
        if not self.db.sismember(self.setName, newDocID):
            print('Data updated')
            docIDs = {}
            docIDs['tName'] = newData['column1']
            docIDs['docID'] = newData['docID']
            self.db.sadd(self.setName, newDocID)
            # 将最新的数据下载下来
            self.down_file([docIDs])
            down = Pd2DB()
            files = down.getfiles('/root/qvenv/download/india_data/')
            down.handle_file(files)
        else:
            print('Data not updated')

    def job(self):
        # schedule.every(1).minutes.do(self.check_update)
        schedule.every().monday.do(self.check_update)
        while True:
            schedule.run_pending()
            time.sleep(3)

    def main(self):
        # 获取最新数据
        # docs = self.get_docID(self.now_url)
        # 获取历史数据
        # self.get_docID(self.history_url)
        # 文件下载
        # self.down_file(docs)
        # 检查官网数据是否更新
        self.check_update()
        # self.job()


if __name__ == '__main__':
    spider = SpiderIndia()
    spider.main()
