import json
import scrapy
import html
import os
from zhihu.items import ZhihuItem

class ZhihuCourseSpider(scrapy.Spider):
    name = "zhihu_course"
    start_urls = [
        "https://api.zhihu.com/education/training/1760689372985630721/video_page/catalog?limit=128&offset=0&education_channel_code=ZHZN-d62bb90dfad9e02"
    ]

    def start_requests(self):
        headers = {
            "Host": "api.zhihu.com",
            "cookie": "edu_user_uuid=edu-v1|69934b07-b719-40da-bfd4-852ca4e2e1da; _xsrf=jQTiuwAx6XdQiTMI3lPoIxCTVVp7ATDX; BEC=e9bdbc10d489caddf435785a710b7029; EDU_TRACE_ID=74a33b95-baf2-4204-97fd-2e97d8614937; _zap=8c537446-e5e3-4d6d-8b3d-6b2afc9d8506; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1733400375; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1733400375; HMACCOUNT=4655DB3D14BEE177; d_c0=ACBSRVfBpRmPToDhEOM0LsdH2Q7YzU57cdM=|1733400375; captcha_session_v2=2|1:0|10:1733400375|18:captcha_session_v2|88:eXVPSGJaTE1mUnNGbUMvM3U3dmJ6aHpCUC82VTdWbmYzWGIwZUtBRUNjZ2djaXBPMUFERzg5ZEc4SVJLQnU2Uw==|15bfbb92b8a710dc533a74f9b48fd563a6e6b825e7e9b248235f9a4600c0dd34; z_c0=2|1:0|10:1733400401|4:z_c0|92:Mi4xR3ZsQUNBQUFBQUFBSUZKRlY4R2xHU1lBQUFCZ0FsVk5VZVUtYUFCOFVBT1BpSVlTcUFlTmNidEFqVEpSanJ0TjBR|fd3e8cdbe94fd01d8fc4a67d6ce52f885aa68099e79d22f06f34d6ce90dab7b9; EDU_MEMBER_HASH_ID=d9e0d66a1112050d4c70a00890e5bb83",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
            "Referer": "https://www.zhihu.com/",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": '"macOS"',
            "sec-ch-ua": '"Google Chrome";v="92", "Chromium";v="92", "Not_A Brand";v="24"',
            "sec-fetch-dest": "empty",
            "sec-fetch-mode": "cors",
            "sec-fetch-site": "same-site",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7"
        }
        for url in self.start_urls:
            yield scrapy.Request(url, headers=headers, callback=self.parse)

    def parse(self, response):
        data = response.json().get('data', {}).get('data', [])
        #print(json.dumps(data, indent=2))   
        
        for course in data:
            item = ZhihuItem()
            item['title'] = course.get('title')
            
            # Step 2: 获取第一个播放链接
            playlist = course.get('resource', {}).get('data', {}).get('playlist', {})
            #print(playlist)
            if playlist:
                fhd_url = playlist.get('FHD', {}).get('url')
                hd_url = playlist.get('HD', {}).get('url')
                # 优先使用 FHD，如果没有则使用 HD
                if fhd_url:
                    item['video_url'] = html.unescape(fhd_url)
                elif hd_url:
                    item['video_url'] = html.unescape(hd_url)

                # 如果找到了有效的视频URL，就请求下载
                if item.get('video_url'):
                    yield scrapy.Request(item['video_url'], callback=self.download_video, meta={'item': item})
            
            #Step 5: 获取 file_id 并生成下载链接
            file_list = course.get('file_list', [])
            if file_list:
                file_id = file_list[0].get('file_id')
                file_name = file_list[0].get('file_name')
                if file_id:
                    item['file_id'] = file_id
                    item['file_name'] = file_name
                    file_url = f"https://api.zhihu.com/education/file/{file_id}"
                    yield scrapy.Request(file_url, callback=self.download_file, meta={'item': item})

    def download_video(self, response):
        item = response.meta['item']
        video_path = f"videos/{item['title']}.mp4"
        os.makedirs("videos", exist_ok=True)
        
        # Save video to local file
        with open(video_path, 'wb') as f:
            f.write(response.body)
        
        self.log(f"Downloaded video for {item['title']} to {video_path}")

    def download_file(self, response):
        item = response.meta['item']
        data = response.json().get('data', {})
        file_download_url = data.get('file_url')
        
        if file_download_url:
            yield scrapy.Request(file_download_url, callback=self.save_file, meta={'item': item})
    
    def save_file(self, response):
        item = response.meta['item']
        file_path = f"files/{item['file_name']}"
        os.makedirs("files", exist_ok=True)
        
        # Save file to local file
        with open(file_path, 'wb') as f:
            f.write(response.body)
        
        self.log(f"Downloaded file for {item['title']} to {file_path}")
