Upload keep aspect ratio
Browse files- src/routes/+page.svelte +12 -2
src/routes/+page.svelte
CHANGED
@@ -244,7 +244,18 @@
|
|
244 |
imgEl.onload = () => resolve(imgEl);
|
245 |
});
|
246 |
const { width, height } = imgEl;
|
247 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
248 |
}
|
249 |
|
250 |
function onfImgUpload() {
|
@@ -413,4 +424,3 @@ The model is licensed with a [CreativeML Open RAIL-M](https://huggingface.co/spa
|
|
413 |
### Biases and content acknowledgment
|
414 |
Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the [LAION-5B dataset](https://laion.ai/blog/laion-5b/), which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4)
|
415 |
</article>
|
416 |
-
|
|
|
244 |
imgEl.onload = () => resolve(imgEl);
|
245 |
});
|
246 |
const { width, height } = imgEl;
|
247 |
+
// keep aspect ratio
|
248 |
+
if (width == height) {
|
249 |
+
ctx?.drawImage(imgEl, 0, 0, width, height, 0, 0, canvasSize, canvasSize);
|
250 |
+
} else if (width > height) {
|
251 |
+
const canvasHeight = Math.floor((canvasSize * height) / width);
|
252 |
+
const padding = Math.floor((canvasSize - canvasHeight) / 2);
|
253 |
+
ctx?.drawImage(imgEl, 0, 0, width, height, 0, padding, canvasSize, canvasHeight);
|
254 |
+
} else {
|
255 |
+
const canvasWidth = Math.floor((canvasSize * width) / height);
|
256 |
+
const padding = Math.floor((canvasSize - canvasWidth) / 2);
|
257 |
+
ctx?.drawImage(imgEl, 0, 0, width, height, padding, 0, canvasWidth, canvasSize);
|
258 |
+
}
|
259 |
}
|
260 |
|
261 |
function onfImgUpload() {
|
|
|
424 |
### Biases and content acknowledgment
|
425 |
Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the [LAION-5B dataset](https://laion.ai/blog/laion-5b/), which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4)
|
426 |
</article>
|
|