import logging

import scrapy
from scrapy import crawler
from scrapy_splash import SplashRequest
from scrapy.http import FormRequest
import json
# from ..items import *
import csv
import urllib
from datetime import datetime

class FullSpider(scrapy.Spider):
    name = 'full_spider'
    allowed_domains = ['api01.scys.vip']
    # 登陆
    # 首页、
    # 最新实战｜往期实战、
    # 小红书运营、
    # 航海手册、
    # 高手分享、
    # 百问百答｜查看全文｜精选日志｜船员好事
    # 项目库
    start_urls = ['https://api01.scys.vip/parse/functions/activate',
                  'https://api01.scys.vip/parse/functions/queryArticle',
                  'https://api01.scys.vip/parse/classes/ActivitySummary',
                  'https://api01.scys.vip/parse/classes/ActivityInfo',
                  'https://api01.scys.vip/parse/functions/queryTokenDetail',
                  'https://api01.scys.vip/parse/functions/getShareForActivity',
                  'https://api01.scys.vip/parse/classes/QA',
                  'https://api01.scys.vip/parse/functions/getHermit',
                  'https://api01.scys.vip/parse/functions/getProject',
                  'https://api01.scys.vip/parse/functions/queryArticle']

    activationCode = '64277190ef923ca74445df917448139c'
    # Define a class variable to store the token.
    # token = None
    token = "428df80c4e3fcf14b7d07f9ca69a141c"
    # Define a shared formdata.
    common_formdata = {
        "_ApplicationId": "app",
        "_ClientVersion": "js2.1.0",
        "_InstallationId": "ebfca397-d261-0ad9-0c5e-a5f7148a1dad", #
    }


    def start_requests(self):

        # # 定义不同页面的formdata
        logging.INFO("sfsdfldsjflsdfjdsl")
        pass
        urls_formdata_map = {
            # 'https://api01.scys.vip/parse/functions/queryArticle': {
            #     "groupId": 1824528822,
            #     "token": self.token,
            #     "activationCode": self.activationCode,
            #     "excludeSolution": True,
            # },
        #     'https://api01.scys.vip/parse/classes/ActivitySummary': {"where": {"isCurrent": True}, "limit": 1,"_method": "GET"},
        #     'https://api01.scys.vip/parse/classes/ActivitySummary': {"where": {"isCurrent": False}, "limit": 1,"_method": "GET"},
        #     'https://api01.scys.vip/parse/classes/ActivityInfo': {"where": {"activity_id": 1573}, "limit": 1,"_method": "GET"},
        #     'https://api01.scys.vip/parse/functions/queryTokenDetail': {"token": self.token},
        #     'https://api01.scys.vip/parse/functions/getShareForActivity': {"activity_id": 1573},
        #     'https://api01.scys.vip/parse/classes/QA': {"where": {"category": "百问百答", "activity_id": 1573},
        #                                                 "limit": 20, "order": "-gmt_create","_method":"GET"},
        #     'https://api01.scys.vip/parse/classes/QA': {"where": {"category": "百问百答", "qid": 19920}, "limit": 20,
        #                                                 "order": "-gmt_create","_method":"GET"},
        #     'https://api01.scys.vip/parse/classes/QA': {"where": {"category": "精选日志", "activity_id": 1573},
        #                                                 "limit": 20, "order": "-gmt_create","_method":"GET"},
        #     'https://api01.scys.vip/parse/classes/QA': {"where": {"category": "船员好事", "activity_id": 1573},
        #                                                 "limit": 20, "order": "-gmt_create","_method":"GET"},
        #     'https://api01.scys.vip/parse/functions/getHermit': {},
            'https://api01.scys.vip/parse/functions/getProject': {"menu_id": "404045"},
        # 下面这个应该不需要
        #     'https://api01.scys.vip/parse/functions/queryArticle': {"topic_id": "581184482185524",
        #                                                             "token": self.token},
        }


        if self.token:
            # If the token is not empty, construct requests for different pages based on the token.
            # After successful login, proceed with requesting other pages.
            for url, formdata in urls_formdata_map.items():
                formdata = { **formdata , **self.common_formdata}
                # self.generate_requests(url, formdata)
                # print(formdata)
                # 首页主页
                if "queryArticle" in url:
                    is_current = formdata.get('groupId')
                    if 1824528822 == is_current:
                        data = json.dumps(formdata)
                        yield scrapy.FormRequest(url=url, method="POST",headers = {"Content-Type": "application/json"},
                        body = data, callback = self.parse_initial_page)
                    # else:
                    #     topic_ids = ['588118188424524','188558284428112','188558284424822','188558284425112','811881512814222','411881415812428','411881415821858','588118285148224','211881512848241']
                    #     for topic_id in topic_ids:
                    #         formdata["topic_id"] = topic_id
                    #         data = json.dumps(formdata)
                    #         yield scrapy.FormRequest(url=url, method="POST", headers={"Content-Type": "application/json"},
                    #                              body=data, callback=self.article_details_page)
                # get activity_id    返回的id 即：activity_id
                # 最新｜往期实战
                if "ActivitySummary" in url:
                    is_current = formdata.get('where', {}).get('isCurrent')
                    if is_current:
                        formdataq = {"where": {"isCurrent": True}, "limit": 1, "_method": "GET",
                                    "_ApplicationId": "app",
                                    "_ClientVersion": "js2.1.0",
                                    "_InstallationId": "ebfca397-d261-0ad9-0c5e-a5f7148a1dad"}
                        data = json.dumps(formdataq)
                        yield scrapy.FormRequest(url=url, method="POST", headers={"Content-Type": "application/json"},
                                                 body=data, callback=self.practical_exercises)
                    else:
                        formdataq = {"where": {"isCurrent": False}, "limit": 1, "_method": "GET", "_ApplicationId": "app",
                         "_ClientVersion": "js2.1.0", "_InstallationId": "ebfca397-d261-0ad9-0c5e-a5f7148a1dad"}
                        data = json.dumps(formdataq)
                        yield scrapy.FormRequest(url=url,method="POST",headers={"Content-Type": "application/json"},body=data, callback=self.practical_exercises)

                # # 生成带有不同activity_id的请求
                # 高手分享
                if "ActivityInfo" in url:
                    activity_ids = [1573,1574,1575,1576,1577,1578,1579,1580,1581,1582,1583,1584,1585,1586,1587
                        ,1588,1589,1590,1591,1592,1593,1594,1595,1813,1596,1382,1057,1039,1040,1041,1042,1043
                        ,1044,1045,1046,1047,1048,1049,1050,1051,1052,1053,1054,1055,1056,1058,1059,1060,1061
                        ,1062,1063,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,174,170,171
                        ,168,177,176,175,179,173,172,167,169,178,180,181,183,182,156,155,160,159,158,150,162
                        ,153,154,151,157,166,165,55,133,126,134,132,128,127,130,131,129,125,47,1,10,9,7,8,6,5
                        ,4,3,2,1925,1926,1927,1928,1929,1930,1931,1932,1933]
                    activity_ids = [1932, 1933]
                    for activity_id in activity_ids:
                        formdata = {"where": {"activity_id":1573}, "limit": 1, "_method": "GET",
                                     "_ApplicationId": "app",
                                     "_ClientVersion": "js2.1.0",
                                     "_InstallationId": "ebfca397-d261-0ad9-0c5e-a5f7148a1dad"}

                        formdata["where"]["activity_id"] = activity_id
                        data = json.dumps(formdata)
                        yield scrapy.FormRequest(url=url, method="POST", headers={"Content-Type": "application/json"},
                                                 body=data, callback=self.practical_details)

                if "getShareForActivity" in url:
                    activity_ids = [1932, 1933,1926,1927]
                    for activity_id in activity_ids:
                        formdata["activity_id"] = activity_id
                        data = json.dumps(formdata)
                        yield scrapy.FormRequest(url=url, method="POST", headers={"Content-Type": "application/json"},
                                                 body=data, callback=self.share_for_activity)

                # 百问百答\精选日志\船员好事
                if "QA" in url:
                    category = formdata.get('where', {}).get('category')
                    if category == "百问百答":
                        activity_ids = [1932, 1933]
                        for activity_id in activity_ids:
                            formdata["where"]["activity_id"] = activity_id
                            data = json.dumps(formdata)
                            yield scrapy.FormRequest(url=url, method="POST",
                                                     headers={"Content-Type": "application/json"},
                                                     body=data, callback=self.practical_details_discuss)
                    # if "qid" in formdata:
                    #     yield scrapy.FormRequest(url=url, formdata=formdata, callback=self.parse)
                    # if category == "精选日志":
                    #     yield scrapy.FormRequest(url=url, formdata=formdata, callback=self.parse)
                    # if category == "船员好事":
                    #     yield scrapy.FormRequest(url=url, formdata=formdata, callback=self.parse)
                # 项目库
                if "getHermit" in url:
                    data = json.dumps(formdata)
                    yield scrapy.FormRequest(url=url, method="POST",headers = {"Content-Type": "application/json"},
                    body = data, callback = self.project_repository)
                if "getProject" in url:
                    menu_ids = ['1538603', '655022', '845566', '404011', '404009', '655030', '404006', '1301967', '655025',
                     '1301964', '404007', '404008', '655027', '404004', '404001', '404002', '404000', '655031',
                     '655026', '655029', '403988', '655015', '403984', '655023', '655019', '1415933', '403993',
                     '902213', '655016', '403997', '403996', '403991', '1496441', '403987', '403979', '403986',
                     '403980', '1415924', '1538608', '1496432', '403990', '403994', '1496434', '403985', '403981',
                     '1415936', '403982', '403983', '833098', '942045', '655017', '403995', '403975', '845565',
                     '655024', '403977', '655020', '833108', '655028', '845564', '655021', '403974', '845568', '403976',
                     '655018', '1496430', '1538599', '404016', '404015', '404017', '404030', '404029', '404033',
                     '404039', '942054', '404035', '404034', '404037', '404032', '1415919', '404038', '404023',
                     '404022', '655065', '404027', '404026', '404025', '1415934', '404024', '404019', '1538611',
                     '404020', '845627', '845629', '404048', '404047', '404045', '845624', '845626', '845628', '845625',
                     '404043', '404042', '1301973', '902203', '1496435', '404063', '655113', '404090', '404088',
                     '404089', '404087', '404085', '404078', '404081', '1538607', '404079', '404077', '1301969',
                     '655104', '1415937', '902205', '655109', '655103', '404053', '942049', '1301962', '655099',
                     '942043', '655105', '1301970', '655101', '655102', '404054', '1496431', '1415921', '655106',
                     '655108', '1496437', '833103', '833099', '655100', '404051', '1301959', '404083', '655110',
                     '404068', '655111', '404069', '1496439', '833102', '902206', '902211', '404073', '404071',
                     '404075', '404074', '942051', '404066', '404065', '1415925', '902212', '655120', '902208',
                     '1415923', '1496428', '404059', '404060', '404058', '404056', '404057', '404061', '1301965',
                     '404104', '1301974', '1538604', '1415932', '404102', '404100', '404097', '655076', '404093',
                     '404098', '404095', '404094', '655070', '404096', '404128', '404130', '404129', '833105', '902210',
                     '655092', '404138', '404137', '404139', '655094', '655091', '404136', '1301966', '404140',
                     '845567', '404135', '404134', '942057', '655093', '655074', '404133', '655078', '902214', '404118',
                     '404116', '404114', '404115', '404112', '953321', '404110', '404111', '404125', '404124', '942048',
                     '404122', '404120', '404225', '655056', '1415931', '404201', '1538600', '404198', '404218',
                     '942044', '404219', '404199', '404197', '404221', '1538597', '404229', '833107', '404232',
                     '404200', '404209', '404196', '655069', '404220', '404211', '404207', '404194', '404224', '404210',
                     '404215', '404204', '655063', '942056', '404216', '404231', '404230', '1496440', '655077',
                     '404226', '404227', '404202', '404217', '404195', '404208', '404203', '655057', '655058', '404206',
                     '655064', '404205', '404214', '1301972', '404223', '404234', '404235', '1538601', '404152',
                     '655121', '1415926', '1538598', '404165', '404151', '404161', '1415922', '1496429', '1496433',
                     '1538606', '1301963', '655115', '404170', '1301961', '942058', '404147', '1538609', '404155',
                     '404169', '942053', '404164', '1496436', '404162', '404160', '1496442', '1301960', '404167',
                     '1415927', '942052', '655118', '953322', '1301968', '1496438', '833100', '404146', '404157',
                     '404171', '942059', '404158', '404153', '655117', '404150', '1415935', '1301958', '655116',
                     '404156', '404143', '404148', '1415928', '655114', '1415930', '404159', '404168', '404145',
                     '1415929', '404154', '1538605', '1415920', '404144', '404163', '404149', '1301971', '404183',
                     '404190', '404174', '404180', '404187', '404181', '404179', '404175', '404173', '942055', '902204',
                     '404182', '404185', '404189', '404188', '404186', '404191', '1538610', '404184']

                    # menu_ids = ['1538603', '655022', '845566']
                    for menu_id in menu_ids:
                        formdata["menu_id"] = menu_id
                        data = json.dumps(formdata)
                        yield scrapy.FormRequest(url=url, method="POST",
                                                 headers={"Content-Type": "application/json"},
                                                 body=data, callback=self.project_repository_detail)



        else:
            # If the token is empty, then first request the login page to obtain the token.
            yield scrapy.FormRequest(self.start_urls[0],
                                     formdata={"activationCode": self.activationCode,
                                                                "groupId": 1824528822, "clientIP": "123.117.179.206",
                                                                "_ApplicationId": "app", "_ClientVersion": "js2.1.0",
                                                                "_InstallationId": "ebfca397-d261-0ad9-0c5e-a5f7148a1dad"},
                                     callback=self.parse_login)

    def parse_login(self, response):
        # Parse the login response, obtain the token, and update the shared formdata.
        self.token = response.json().get('token')




    def parse_initial_page(self, response):

        urls_formdata_map = {
            "groupId": 1824528822,
            "token": self.token,
            "activationCode": self.activationCode,
            "excludeSolution": True,
        }
        # 在这里编写解析初始页面的逻辑，并返回提取到的数据
        json_data = json.loads(response.text.encode("utf-8", "replace").decode("utf-8"))
        result_list = json_data.get("result", [])

        # Find the earliest create_time
        earliest_time = None
        for result in result_list:
            create_time_str = result.get("create_time", {}).get("iso", "")
            if create_time_str:
                create_time_obj = datetime.strptime(create_time_str, "%Y-%m-%dT%H:%M:%S.%fZ")
                if earliest_time is None or create_time_obj < earliest_time:
                    earliest_time = create_time_obj

        if earliest_time:
            earliest_time_str = earliest_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")

            # Update endTime in common_formdata
            self.common_formdata["endTime"] = {"__type": "Date", "iso": earliest_time_str}

            # Construct new request
            new_request = scrapy.FormRequest(
                url='https://api01.scys.vip/parse/functions/queryArticle',
                method="POST",
                headers={"Content-Type": "application/json"},
                body=json.dumps({ **urls_formdata_map,**self.common_formdata}),  # Use updated common_formdata
                callback=self.parse_initial_page
            )

            yield new_request

            for result in result_list:
                # 提取数据
                create_time = result.get("create_time", {}).get("iso", "")
                content = result.get("content", {})
                topic_id = content.get("topic_id", "")
                group = content.get("group", {}) # json
                content_type = content.get("type", "")
                content_create_time = content.get("create_time", "")
                content_modify_time = content.get("modify_time", "")
                talk = content.get("talk", {})
                talk_owner = talk.get("owner", {}) # json
                talk_text = talk.get("text", "")
                images = talk.get("images", []) # json
                latest_likes = content.get("latest_likes", []) # json
                show_comments = content.get("show_comments", [])  # json
                annotation = content.get("annotation", "")
                likes_count = content.get("likes_count", "")
                rewards_count = content.get("rewards_count", "")
                comments_count = content.get("comments_count", "")
                digested = content.get("digested", "")
                sticky = content.get("sticky", "")
                user_specific = content.get("user_specific", {})   # json
                # 标签
                hashtags = result.get("hashtags", [])
                # 主题ID
                result_topic_id = result.get("topic_id", "")
                # 讨论组ID
                result_group_id = result.get("group_id", "")
                # 是否精华
                result_digested = result.get("digested", "")
                # 用户ID
                uid = result.get("uid", "")
                # 对象ID
                objectId = result.get("objectId", "")
                # 对象类型
                className = result.get("className", "")

                # 创建SpiderItem对象并保存数据
                item = InitialPageItem(
                    create_time=create_time,
                    topic_id=topic_id,
                    group_info=group,
                    content_type=content_type,
                    content_create_time=content_create_time,
                    content_modify_time=content_modify_time,
                    talk_owner=talk_owner,
                    talk_text=talk_text,
                    talk_images=images,
                    latest_likes=latest_likes,
                    show_comments=show_comments,
                    annotation=annotation,
                    likes_count=likes_count,
                    rewards_count=rewards_count,
                    comments_count=comments_count,
                    digested=digested,
                    sticky=sticky,
                    user_specific=user_specific,
                    hashtags=hashtags,
                    result_topic_id=result_topic_id,
                    result_group_id=result_group_id,
                    result_digested=result_digested,
                    uid=uid,
                    objectId=objectId,
                    className=className,
                )
                # 返回item，Scrapy将自动将item传递给Pipeline处理
                yield item




    def practical_exercises(self, response):
        if response.status == 400:
            # Handle the 403 response here
            self.logger.warning(f"Received 400 response from {response.url}")

        json_data = json.loads(response.text)
        result = json_data.get("results")
        data = result[0].get('data', {}).get('data', {})


        activities = data.get('activity', [])
        for activity in activities:
            item = ActivityItem()

            item['id'] = activity.get('id', '')
            item['gmt_create'] = activity.get('gmt_create', 0)
            item['name'] = activity.get('name', '')
            item['label'] = activity.get('label', '')
            item['cover_img'] = activity.get('cover_img', '')
            item['raw_name'] = activity.get('raw_name', '')

            template = activity.get('template', {})
            item['template'] = activity.get('template', {})
            item['template_raw_name'] = template.get('raw_name', '')
            item['template_avatar'] = template.get('avatar', '')
            item['template_platform'] = template.get('platform', '')
            item['template_target'] = template.get('target', '')
            item['template_refund_num'] = template.get('refund_num', 0)

            item['join_cnt'] = activity.get('join_cnt', 0)
            item['user_avatar'] = activity.get('user_avatar', '')
            item['article_tag'] = activity.get('article_tag', '')
            item['gmt_start'] = activity.get('gmt_start', 0)
            item['gmt_0'] = activity.get('gmt_0', 0)
            item['gmt_1'] = activity.get('gmt_1', 0)
            item['gmt_end'] = activity.get('gmt_end', 0)
            item['price'] = activity.get('price', 0)
            item['menu'] = activity.get('menu', {})
            item['is_refund'] = activity.get('is_refund', False)

            yield item

    def practical_details(self, response):
        # print(response.text)
        json_data = json.loads(response.text)
        result = json_data.get("results", [])
        ajaxResult = result[0].get('ajaxResult', {}).get('data', {})
        landingResult = result[0].get('landingResult', {}).get('data', {})



        veteranShares = result[0].get('veteranShares', [])
        for veteran in veteranShares:
            item = PracticalDetailsVeteranItem()
            # item['type'] = 'veteran'
            item['id'] = veteran.get("id", 0)
            item['gmt_create'] = veteran.get("gmt_create", 0)
            item['activity_id'] = veteran.get("activity_id", 0)
            item['activity_name'] = veteran.get("activity_name", "")
            item['activity_label'] = veteran.get("activity_label", "")
            item['category'] = veteran.get("category", "")
            item['title'] = veteran.get("title", "")
            item['tag'] = veteran.get("tag", [])
            item['href'] = veteran.get("href", "")
            item['content'] = veteran.get("content", "")

            # extra = veteran.get("extra", {})
            # item['author'] = extra.get("author", "")
            # item['avatar'] = extra.get("avatar", "")
            # item['share_time'] = extra.get("share_time", "")
            # item['xq_group_number'] = extra.get("xq_group_number", "")
            item['extra'] = veteran.get("extra", {})

            item['is_delete'] = veteran.get("IsDelete", False)

            yield item

        discussResult = result[0].get('discussResult', {}).get('data', {}).get('items', {})
        for discuss in discussResult:
            item = PracticalDetailsDiscussItem()
            # item['type'] = 'discuss'
            item['id'] = discuss.get('id', 0)
            item['gmt_create'] = discuss.get('gmt_create', 0)
            item['activity_id'] = result[0].get('activity_id', 0)
            item['category'] = discuss.get('category', '')
            item['title'] = discuss.get('title', '')
            item['tag'] = discuss.get('tag', [])
            item['href'] = discuss.get('href', '')
            item['content'] = discuss.get('content', '')
            item['extra'] = discuss.get('extra', {})
            item['is_delete'] = discuss.get('is_delete', False)

            yield item

    def share_for_activity(self, response):
        json_data = json.loads(response.text)
        results = json_data.get("result", [])

        for veteran in results:
            item = PracticalDetailsVeteranItem()
            # item['type'] = 'veteran'
            item['id'] = veteran.get("id", 0)
            item['gmt_create'] = veteran.get("gmt_create", 0)
            item['activity_id'] = veteran.get("activity_id", 0)
            item['activity_name'] = veteran.get("activity_name", "")
            item['activity_label'] = veteran.get("activity_label", "")
            item['category'] = veteran.get("category", "")
            item['title'] = veteran.get("title", "")
            item['tag'] = veteran.get("tag", [])
            item['href'] = veteran.get("href", "")
            item['content'] = veteran.get("content", "")

            item['extra'] = veteran.get("extra", {})

            item['is_delete'] = veteran.get("IsDelete", False)

            yield item

    def practical_details_discuss(self, response):
        json_data = json.loads(response.text)

        results = json_data.get("results", [])


        for discuss in results:

            item = PracticalDetailsDiscussItem()
            item['objectId'] = discuss.get('objectId', '')
            item['tag'] = discuss.get('tag', [])
            item['extra'] = discuss.get('extra', {})
            item['qid'] = discuss.get('qid', 0)
            item['gmt_create'] = discuss.get('gmt_create', 0)
            item['category'] = discuss.get('category', '')
            item['title'] = discuss.get('title', '')
            item['href'] = discuss.get('href', '')
            item['content'] = discuss.get('content', '')
            item['activity_id'] = discuss.get('activity_id', 0)
            item['createdAt'] = discuss.get('createdAt', '')
            item['updatedAt'] = discuss.get('updatedAt', '')
            item['like_list'] = discuss.get('like_list', [])
            item['elastic'] = discuss.get('elastic', False)
            item['ACL'] = discuss.get('ACL', {})

            yield item

    def project_repository(self, response):
        json_data = json.loads(response.text)
        print(json_data)
        datas = json_data.get("result", {}).get("data", {}).get("data", {})
        # 暂时不拆，不了解做什么用
        content_count = datas.get("content_count", {})
        hot = datas.get("hot", [])
        hot_words = datas.get("hot_words", [])
        toolbar = datas.get("toolbar", [])
        # 拆开存储
        projects = datas.get("project", [])
        topics = datas.get("topic", [])

        item = ProjectRepositoryItem()
        item['content_count'] = content_count
        item['hot'] = hot
        item['hot_words'] = hot_words
        item['toolbar'] = toolbar
        item['project'] = projects
        item['topic'] = topics
        yield item

        for topic in topics:
            topic_item = ProjectRepositoryTopicItem()
            childrens = topic.get("children", [])
            topic_item['topic_label'] = topic.get('label', '')
            topic_item['topic_value'] = topic.get('label', '')
            for children in childrens:
                topic_item['label'] = children.get('label', '')
                topic_item['name'] = children.get('name', '')
                topic_item['value'] = children.get('value', '')

                yield topic_item

        for project in projects:
            project_item = ProjectRepositoryProjectItem()

            project_item['project_article_cnt'] = project.get('article_cnt', 0)
            project_item['project_category_cnt'] = project.get('category_cnt', 0)
            project_item['project_name'] = project.get('name', '')
            project_item['project_cnt'] = project.get('project_cnt', 0)
            childrens = project.get("children", [])
            for children in childrens:
                project_item['category_article_cnt'] = children.get('article_cnt', '')
                project_item['category_name'] = children.get('name', '')
                project_item['category_project_cnt'] = children.get('project_cnt', 0)
                categorys = children.get("children", [])
                for category in categorys:
                    project_item['article_id'] = category.get('id', '')
                    project_item['article_name'] = category.get('name', '')
                    project_item['article_cnt'] = category.get('article_cnt', 0)
                    yield project_item



    def project_repository_detail(self, response):

        json_data = json.loads(response.text.encode("utf-8", "replace").decode("utf-8"))
        result = json_data.get("result", {})

        item = ProjectRepositoryDetailsItem()

        item['menu_id'] = result.get("menu_id", '')

        datas = result.get("data", {}).get("data", {})

        extra = datas.get("extra", {})
        item['menu_info'] = extra.get("menu", {})
        item['user_info'] = extra.get("user", {})
        items = datas.get("items", [])
        for ite in items:
            item['article_content'] = ite.get("article_content", "")
            item['comments_count'] = ite.get("comments_count", 0)
            item['create_user_id'] = ite.get("create_user_id", 0)
            item['create_user_level'] = ite.get("create_user_level", 0)
            item['show_create_user_id'] = ite.get("show_create_user_id", 0)
            item['show_title'] = ite.get("show_title", "")
            item['gmt_create'] = ite.get("gmt_create", 0)
            item['gmt_update'] = ite.get("gmt_update", 0)
            item['is_digested'] = ite.get("is_digested", 0)
            item['like_count'] = ite.get("like_count", 0)
            item['rewards_count'] = ite.get("rewards_count", 0)
            item['reading_count'] = ite.get("reading_count", 0)
            item['menu_ids'] = ite.get("menu_ids", "")
            item['type'] = ite.get("type", "")
            item['task_topic_id'] = ite.get("task_topic_id", 0)
            item['topic_id'] = ite.get("topic_id", 0)

            yield item






