from bs4 import BeautifulSoup
import re, base64, requests
from scrapy import Request, Spider
import os, sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)


main_head = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 's3plus.meituan.net',
    'Pragma': 'no-cache',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
}

class CharcodeSpider(Spider):
    name = "charcode"

    def start_requests(self):
        index_urls = ['http://www.dianping.com/shop/100000003']
        for base_url in index_urls:
            req_url = base64.b64encode(base_url.encode()).decode('utf-8')
            request_map = {}
            request_map['url'] = req_url
            request_map['header'] = main_head
            url = 'http://spider.xiaodiankeji.net/http/v1/get?requestMap=' + str(request_map)
            yield Request(url=url, callback=self.parse)

    def parse(self, response):
        resp_obj = BeautifulSoup(response.body,'html.parser')
        index_links = resp_obj.find_all("link", type="text/css")
        if len(index_links) == 0:
            return
        link_href = None
        for link in index_links:
            link_href = link['href']
            if 's3plus' in link_href:
                woffs_url = 'https:' + link_href
                req_url = base64.b64encode(woffs_url.encode()).decode('utf-8')
                request_map = {}
                request_map['url'] = req_url
                request_map['header'] = main_head
                url = 'http://spider.xiaodiankeji.net/http/v1/get?requestMap=' + str(request_map)
                yield Request(url=url, callback=self.download_woff)
   
    def download_woff(self, response):
        index_resp = response.body.decode('utf-8')
        self.logger.info(index_resp)
        woffs_m = re.findall(r'//s3plus.meituan.net/v1/mss_[a-z0-9]+/font/[a-z0-9]+.woff', index_resp)
        tags_m = re.findall(r'PingFangSC-Regular-([a-zA-Z]+)"', index_resp)
        for i in range(len(woffs_m)):
            tag = tags_m[i]
            woff_url = 'http:' + woffs_m[i]
            woff = requests.get(woff_url).content
            filename = BASE_DIR+'\\woffs\\'+tag+'.woff'
            with open(filename, 'wb') as f:
                f.write(woff)
            self.log(f'Saved file {filename}')
