import base64
import json
import gzip
import requests
import datetime
from lxml import html

DEBUG = False
SAVE_JSON_DATA = True

class TC_spider:
    decode_str="tradingeconomics-charts-core-api-key"
    #TODO Make it update automatically
    basic_data_url="https://d3ii0wo49og5mi.cloudfront.net/"
    headers = {
        'authority': 'd3ii0wo49og5mi.cloudfront.net',
        'accept': 'application/json, text/javascript, */*; q=0.01',
        'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'cache-control': 'no-cache',
        'dnt': '1',
        'origin': 'https://tradingeconomics.com',
        'referer': 'https://tradingeconomics.com/',
        'sec-ch-ua-platform': '"Android"',
        'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N)'
    }
    
    valiable_data = []
    
    def get_avaliable_data_list(self, url, name_xpath, target_url_xpath,retry_times=5):
        valiable_data = []
        for i in range(1,retry_times): 
            try:
                response = requests.get(url, headers=self.headers)
                if response.status_code == 200:
                    #print(url,"len:",len(response.text))
                    tree = html.fromstring(response.text)
                    for name,target_url in zip(tree.xpath(name_xpath),tree.xpath(target_url_xpath)):
                        #print(f"{name.text.strip()} {target_url.get('href')}")
                        valiable_data += [{"name":name.text.strip(),"target":target_url.get('data-symbol')}]
                    #print(len(valiable_data))
                    return valiable_data
                else:
                    print("Failed to fetch data.url=" + url + "return code=" + str(response.status_code))
            except requests.exceptions.ConnectionError:
                print("Try to fetch data from url=" + url + "but get requests.exceptions.ConnectionError,retry....")
                continue

    def get_all_avaliable_data_list(self):
        valiable_data = []
        valiable_category=[
                {"url":"https://tradingeconomics.com/Commodities",
                    "name_xpath":'//div[4]/div/div/div[*]/div/table/tbody/tr[*]/td[1]/a/b',
                    "target_url_xpath":'//div[4]/div/div/div[*]/div/table/tbody/tr[*]'},
                {"url":"https://tradingeconomics.com/Currencies",
                    "name_xpath":"//div[4]/div/div/div[*]/div/table/tbody/tr[*]/td[2]/a/b",
                    "target_url_xpath":'//div[4]/div/div/div[*]/div/table/tbody/tr[*]'},
                {"url":"https://tradingeconomics.com/Stocks",
                    "name_xpath":"//div[4]/div/div/div[*]/div/table/tbody/tr[*]/td[2]/a/b",
                    "target_url_xpath":"//div[4]/div/div/div[*]/div/table/tbody/tr[*]"},
                {"url":"https://tradingeconomics.com/Bonds",
                    "name_xpath":"//div[4]/div/div/div[*]/div/table/tbody/tr[*]/td[2]/a/b",
                    "target_url_xpath":"//div[4]/div/div/div[*]/div/table/tbody/tr[*]"},
                {"url":"https://tradingeconomics.com/Crypto",
                    "name_xpath":"//div[4]/div/div/div[*]/div/table/tbody/tr[*]/td[1]/a/b",
                    "target_url_xpath":"//div[4]/div/div/div[*]/div/table/tbody/tr[*]"}
                        ]
        for cate in valiable_category:
            valiable_data += self.get_avaliable_data_list(cate['url'], cate['name_xpath'], cate['target_url_xpath']);
        
        #for data in valiable_data:
        #    print(f"{data['name']} {data['target']}")
        
        self.valiable_data = valiable_data
        return [d['name'] for d in valiable_data]

    def get_url(self,data_name):
        if self.valiable_data == data_name:
            get_all_avaliable_data_list()
        url = self.basic_data_url + "markets/" + [d['target'] for d in self.valiable_data if d.get('name') == data_name][0]
        if DEBUG :
            print("target url = ",url)
        return url

    def get_gzip_data(self,url):
        #TODO Make parameters optional/dynamic
        params = {
            'span': 'max',
            'ohlc': '0',
            'key': '20240229:nazare'
        }
        response = requests.get(url, params=params, headers=self.headers)
        #response = requests.get(url, params=params) #it work
        if response.status_code == 200:
            print("get response success:", len(response.content))
            return response.content
        else:
            print("request failed,url:", url, "paras=", params, "status code:" ,response.status_code)
            exit()

    def data_magic(self,data, decode_str = decode_str):
        b64_decode_data = base64.b64decode(data)
        bytearray_data = bytearray(b64_decode_data)
        decode_word = decode_str.encode()
        for i in range(len(bytearray_data)):
            bytearray_data[i] ^= decode_word[i % len(decode_word)]
        decompressed_data = gzip.decompress(bytearray_data)
        if DEBUG :
            print(decompressed_data)
        return json.loads(decompressed_data)

if __name__=="__main__":
    i = 0
    spider = TC_spider()
    names = spider.get_all_avaliable_data_list()
    for name in names:
        i = i + 1
        print(f"{i} : {name:<15} ",end="")
        if i%5 == 0:
            print("")
    num = int(input("\n\nplease input the number to choose date name:"))
    url = spider.get_url(names[num-1])
    print(url)
    data = spider.get_gzip_data(url)
    json_data = spider.data_magic(data)
    if DEBUG :
        spider.print_goldprice_json_data(json_data)
    if SAVE_JSON_DATA:
        with open('data.json', 'w') as outfile:
            json.dump(json_data, outfile, indent=4)
