# based on https://huggingface.co/spaces/NimaBoscarino/climategan/blob/main/app.py # noqa: E501 # thank you @NimaBoscarino import os from textwrap import dedent from urllib import parse from requests import get import googlemaps import gradio as gr import numpy as np from gradio.components import ( HTML, Button, Column, Dropdown, Image, Markdown, Radio, Row, Textbox, ) from skimage import io from datetime import datetime from climategan_wrapper import ClimateGAN TEXTS = [ dedent( """
Climate change does not impact everyone equally. This Space shows the effects of the climate emergency, "one address at a time". Visit the original experience at ThisClimateDoesNotExist.com
Enter an address or upload a Street View image, and ClimateGAN will generate images showing how the location could be impacted by flooding, wildfires, or smog if it happened there.
This is NOT an exercise in climate prediction, rather an exercise of empathy, to put yourself in others' shoes, as if Climate Change came crushing on your doorstep.
After you have selected an image and started the inference you will see all the outputs of ClimateGAN, including intermediate outputs such as the flood mask, the segmentation map and the depth maps used to produce the 3 events.
This Space makes use of recent Stable Diffusion in-painting pipelines to replace ClimateGAN's original Painter. If you select 'Both' painters, you will see a comparison
Visit ThisClimateDoesNotExist.com for more information | Original ClimateGAN GitHub Repo | Read the original ICLR 2021 ClimateGAN paper
""" ), dedent( """ ## How to use this Space 1. Enter an address or upload a Street View image (at least 640x640) 2. Select the type of Painter you'd like to use for the flood renderings 3. Click on the "See for yourself!" button 4. Wait for the inference to complete, typically around 30 seconds (plus queue time) 5. Enjoy the results! 1. The prompt for Stable Diffusion is `An HD picture of a street with dirty water after a heavy flood` 2. Pay attention to potential "inventions" by Stable Diffusion's in-painting 3. The "restricted to masked area" SD output is the result of: `y = mask * flooded + (1-mask) * input` """ ), ] CSS = dedent( """ a { color: #0088ff; text-decoration: underline; } strong { color: #c34318; font-weight: bolder; } #how-to-use-md li { margin: 0.1em; } #how-to-use-md li p { margin: 0.1em; } """ ) def toggle(radio): if "address" in radio.lower(): return [ gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), ] else: return [ gr.update(visible=False), gr.update(visible=True), gr.update(visible=True), ] def predict(cg: ClimateGAN, api_key): def _predict(*args): print(f"Starting inference ({str(datetime.now())})") image = place = painter = radio = None if api_key: radio, image, place, painter = args else: image, painter = args if api_key and place and "address" in radio.lower(): geocode_result = gmaps.geocode(place) address = geocode_result[0]["formatted_address"] static_map_url = f"https://maps.googleapis.com/maps/api/streetview?size=640x640&location={parse.quote(address)}&source=outdoor&key={api_key}" img_np = io.imread(static_map_url) print("Using GSV image") else: print("Using user image") img_np = image painters = { "ClimateGAN Painter": "climategan", "Stable Diffusion Painter": "stable_diffusion", "Both": "both", } print("Using painter", painters[painter]) output_dict = cg.infer_single( img_np, painters[painter], concats=[ "input", "masked_input", "climategan_flood", "stable_copy_flood", ], as_pil_image=True, ) input_image = output_dict["input"] masked_input = output_dict["masked_input"] wildfire = output_dict["wildfire"] smog = output_dict["smog"] depth = np.repeat(output_dict["depth"], 3, axis=-1) segmentation = output_dict["segmentation"] climategan_flood = output_dict.get( "climategan_flood", np.ones(input_image.shape, dtype=np.uint8) * 255, ) stable_flood = output_dict.get( "stable_flood", np.ones(input_image.shape, dtype=np.uint8) * 255, ) stable_copy_flood = output_dict.get( "stable_copy_flood", np.ones(input_image.shape, dtype=np.uint8) * 255, ) concat = output_dict.get( "concat", np.ones(input_image.shape, dtype=np.uint8) * 255, ) return ( input_image, masked_input, segmentation, depth, climategan_flood, stable_flood, stable_copy_flood, concat, wildfire, smog, ) return _predict if __name__ == "__main__": ip = get("https://api.ipify.org").content.decode("utf8") print("My public IP address is: {}".format(ip)) api_key = os.environ.get("GMAPS_API_KEY") gmaps = None if api_key is not None: gmaps = googlemaps.Client(key=api_key) cg = ClimateGAN( model_path="config/model/masker", dev_mode=os.environ.get("CG_DEV_MODE", "").lower() == "true", ) cg._setup_stable_diffusion() radio = address = None pred_ins = [] pred_outs = [] with gr.Blocks(css=CSS) as app: with Row(): with Column(): Markdown("# ClimateGAN: Visualize Climate Change") HTML(TEXTS[0]) with Column(): Markdown(TEXTS[1], elem_id="how-to-use-md") with Row(): HTML("