{"node":{"id":"urn:cid:bafkr4ieravyzq7b5h6524uwq4ydzrtpn42d2t55g5iviuu5mrp4mntlzde","properties":{"timestamp":"2024-01-18T14:25:39Z","nodeType":"data","dataRegistrationJcs":"urn:cid:baga6yaq6eawo2tmbxijdveaiqokdjjndr5rb45bu3k7hslxaa3bpkiabcg7fk","registeredBy":"did:key:z6MkutbV1GPZLLquVDwfVrkHcJezwvvCp92RmL1MGdvN4J5P"}},"enrichments":{"asset_hub":{"asset_id":160,"asset_name":"laion/CLIP-ViT-H-14-laion2B-s32B-b79K","owning_project":"LAION-2B","asset_description":"A CLIP ViT-H/14 model trained using the LAION-2B English subset of LAION-5B, utilizing OpenCLIP. The model, developed by Romain Beaumont on the stability.ai cluster, is designed for zero-shot, arbitrary image classification and aims to aid research in understanding the potential impact of such models.","asset_format":"OpenCLIP","asset_type":"Model","asset_blob_type":"iroh-collection","source_location_url":"","contact_info":"Refer to Hugging Face's official channels for contact information.","license":"MIT","license_link":"https://doi.org/10.5281/zenodo.5143773","registered_date":"2024-01-18T14:27:43.028061Z","last_modified_date":"2024-01-18T14:27:43.028061Z"}}} |