diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..4945cf585ea6a8f1b1b2317021f3f77ebfcf7beb --- /dev/null +++ b/app.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python + +from __future__ import annotations + +import argparse +import pathlib + +import gradio as gr + +from vtoonify_model import Model + +DESCRIPTION = ''' +
+

+ Portrait Style Transfer with VToonify +

+
+''' +FOOTER = '
visitor badge
' + +ARTICLE = r""" +If VToonify is helpful, please help to ⭐ the Github Repo. Thanks! +[![GitHub Stars](https://img.shields.io/github/stars/sczhou/CodeFormer?style=social)](https://github.com/williamyang1991/VToonify) +--- +📝 **Citation** +If our work is useful for your research, please consider citing: +```bibtex +@article{yang2022Vtoonify, + title={VToonify: Controllable High-Resolution Portrait Video Style Transfer}, + author={Yang, Shuai and Jiang, Liming and Liu, Ziwei and Loy, Chen Change}, + journal={ACM Transactions on Graphics (TOG)}, + volume={41}, + number={6}, + articleno={203}, + pages={1--15}, + year={2022}, + publisher={ACM New York, NY, USA}, + doi={10.1145/3550454.3555437}, +} +``` +📋 **License** +This project is licensed under S-Lab License 1.0. +Redistribution and use for non-commercial purposes should follow this license. +📧 **Contact** +If you have any questions, please feel free to reach me out at williamyang@pku.edu.cn. +""" + +def update_slider(choice: str) -> dict: + if type(choice) == str and choice.endswith('-d'): + return gr.Slider.update(maximum=1, minimum=0, value=0.5) + else: + return gr.Slider.update(maximum=0.5, minimum=0.5, value=0.5) + +def set_example_image(example: list) -> dict: + return gr.Image.update(value=example[0]) + +def set_example_video(example: list) -> dict: + return gr.Video.update(value=example[0]), + +sample_video = ['./vtoonify/data/529.mp4', './vtoonify/data/pexels-anthony-shkraba-7525601.mp4'] +sample_vid = gr.Video(label='Video file') #for displaying the example +example_videos = gr.components.Dataset(components=[sample_vid], samples=[[path] for path in sample_video], type='values', label='Video Examples') + +def main(): + args = parse_args() + model = Model(device=args.device) + + with gr.Blocks(theme=args.theme, css='style.css') as demo: + + gr.Markdown(DESCRIPTION) + + with gr.Box(): + gr.Markdown('''## Step 1(Select Style) + - Select **Style Type**. + - Types with `-d` means it supports style degree adjustment. + - Types without `-d` usually has better toonification quality. + + ''') + with gr.Row(): + with gr.Column(): + gr.Markdown('''Select Style Type''') + with gr.Row(): + style_type = gr.Radio(label='Style Type', + choices=['cartoon1','cartoon1-d','cartoon2-d','cartoon3-d', + 'cartoon4','cartoon4-d','cartoon5-d','comic1-d', + 'comic2-d','comic3-d', 'arcane1','arcane1-d','arcane2', 'arcane2-d', + 'caricature1','caricature2','pixar','pixar-d' + ] + ) + exstyle = gr.Variable() + with gr.Row(): + loadmodel_button = gr.Button('Load Model') + with gr.Row(): + load_info = gr.Textbox(label='Process Information', interactive=False, value='No model loaded.') + with gr.Column(): + gr.Markdown('''Reference Styles + ![example](https://raw.githubusercontent.com/williamyang1991/tmpfile/master/vtoonify/style.jpg)''') + + + with gr.Box(): + gr.Markdown('''## Step 2 (Preprocess Input Image / Video) + - Drop an image/video containing a near-frontal face to the **Input Image**/**Input Video. + - If there are multiple faces, hit the Edit button in the upper right corner and crop the source beforehand. + - Hit the **Rescale Image**/ **Rescale First Frame** button. + - The final image result will be based on this **Rescaled Face**. + - Use padding parameters to adjust the background space. + - For video input, further hit the **Rescale Video** button. + - The final video result will be based on this **Rescaled Video**. + + ''') + with gr.Row(): + with gr.Box(): + with gr.Column(): + gr.Markdown('''Choose the padding parameters. + ![example](https://raw.githubusercontent.com/williamyang1991/tmpfile/master/vtoonify/rescale.jpg)''') + with gr.Row(): + top = gr.Slider(128, + 320, + value=200, + step=8, + label='top') + with gr.Row(): + bottom = gr.Slider(128, + 320, + value=200, + step=8, + label='bottom') + with gr.Row(): + left = gr.Slider(128, + 320, + value=200, + step=8, + label='left') + with gr.Row(): + right = gr.Slider(128, + 320, + value=200, + step=8, + label='right') + with gr.Box(): + with gr.Column(): + gr.Markdown('''Input''') + with gr.Row(): + input_image = gr.Image(label='Input Image', + type='filepath') + with gr.Row(): + preprocess_image_button = gr.Button('Rescale Image') + with gr.Row(): + input_video = gr.Video(label='Input Video', + mirror_webcam=False, + type='filepath') + with gr.Row(): + preprocess_video0_button = gr.Button('Rescale First Frame') + preprocess_video1_button = gr.Button('Rescale Video') + + with gr.Box(): + with gr.Column(): + gr.Markdown('''View''') + with gr.Row(): + input_info = gr.Textbox(label='Process Information', interactive=False, value='n.a.') + with gr.Row(): + aligned_face = gr.Image(label='Rescaled Face', + type='numpy', + interactive=False) + instyle = gr.Variable() + with gr.Row(): + aligned_video = gr.Video(label='Rescaled Video', + type='mp4', + interactive=False) + with gr.Row(): + with gr.Column(): + paths = ['./vtoonify/data/077436.jpg', './vtoonify/data/pexels-andrea-piacquadio-733872.jpg'] + example_images = gr.Dataset(components=[input_image], + samples=[[path] for path in paths], + label='Image Examples') + with gr.Column(): + #example_videos = gr.Dataset(components=[input_video], samples=[['./vtoonify/data/529.mp4']], type='values') + #to render video example on mouse hover/click + example_videos.render() + #to load sample video into input_video upon clicking on it + def load_examples(video): + print("****** inside load_example() ******") + print("in_video is : ", video[0]) + return video[0] + + example_videos.click(load_examples, example_videos, input_video) + + with gr.Box(): + gr.Markdown('''## Step 3 (Generate Style Transferred Image/Video)''') + with gr.Row(): + with gr.Column(): + gr.Markdown(''' + + - Adjust **Style Degree**. + - For image, hit **Toonify!** to toonify **Rescaled Face**. + - For video, hit e **VToonify!** to toonify **Rescaled Video**. + ''') + style_degree = gr.Slider(0, + 1, + value=0.5, + step=0.05, + label='Style Degree') + with gr.Column(): + gr.Markdown('''![example](https://raw.githubusercontent.com/williamyang1991/tmpfile/master/vtoonify/degree.jpg) + ''') + with gr.Row(): + with gr.Column(): + with gr.Row(): + result_face = gr.Image(label='Result Image', + type='numpy', + interactive=False) + with gr.Row(): + toonify_button = gr.Button('Toonify!') + with gr.Column(): + with gr.Row(): + result_video = gr.Video(label='Result Video', + type='mp4', + interactive=False) + with gr.Row(): + vtoonify_button = gr.Button('VToonify!') + + gr.Markdown(ARTICLE) + gr.Markdown(FOOTER) + + loadmodel_button.click(fn=model.load_model, + inputs=[style_type], + outputs=[exstyle, load_info]) + + + style_type.change(fn=update_slider, + inputs=style_type, + outputs=style_degree) + + preprocess_image_button.click(fn=model.detect_and_align_image, + inputs=[input_image, top, bottom, left, right], + outputs=[aligned_face, instyle, input_info]) + preprocess_video0_button.click(fn=model.detect_and_align_video, + inputs=[input_video, top, bottom, left, right], + outputs=[aligned_face, instyle, input_info]) + preprocess_video1_button.click(fn=model.detect_and_align_full_video, + inputs=[input_video, top, bottom, left, right], + outputs=[aligned_video, instyle, input_info]) + + toonify_button.click(fn=model.image_toonify, + inputs=[aligned_face, instyle, exstyle, style_degree], + outputs=[result_face]) + vtoonify_button.click(fn=model.video_tooniy, + inputs=[aligned_video, instyle, exstyle, style_degree], + outputs=[result_video]) + + + example_images.click(fn=set_example_image, + inputs=example_images, + outputs=example_images.components) + + demo.launch( + enable_queue=args.enable_queue, + server_port=args.port, + share=args.share, + ) + + +if __name__ == '__main__': + main() diff --git a/packages.txt b/packages.txt new file mode 100644 index 0000000000000000000000000000000000000000..e8ad28ebdd8e76c0eee3aa48351860e675c56e75 --- /dev/null +++ b/packages.txt @@ -0,0 +1,4 @@ +bzip2 +cmake +ninja-build + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..48b738ee7192584d94075820c69914c8359c6348 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +dlib==19.23.0 +numpy==1.22.3 +opencv-python-headless==4.5.5.62 +Pillow==9.0.1 +scipy==1.8.0 +torch==1.11.0 +torchvision==0.12.0 \ No newline at end of file diff --git a/vtoonify/LICENSE.md b/vtoonify/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..a7e5837d44361b7aa1d633b9d36783ac838a45bc --- /dev/null +++ b/vtoonify/LICENSE.md @@ -0,0 +1,12 @@ +# S-Lab License 1.0 + +Copyright 2022 S-Lab + +Redistribution and use for non-commercial purpose in source and binary forms, with or without modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\ +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +4. In the event that redistribution and/or use for commercial purpose in source or binary forms, with or without modification is required, please contact the contributor(s) of the work. + + diff --git a/vtoonify/data/077436.jpg b/vtoonify/data/077436.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65ad13508376874aec688dcd8b4a5faefe8ba62d Binary files /dev/null and b/vtoonify/data/077436.jpg differ diff --git a/vtoonify/data/pexels-andrea-piacquadio-733872.jpg b/vtoonify/data/pexels-andrea-piacquadio-733872.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c08baabb9510352ab7c22333b71db2f624acae4 Binary files /dev/null and b/vtoonify/data/pexels-andrea-piacquadio-733872.jpg differ diff --git a/vtoonify/model/__init__.py b/vtoonify/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vtoonify/model/bisenet/LICENSE b/vtoonify/model/bisenet/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..bfae0b0c29f885a118e382b445b6eaeca0d3b3e6 --- /dev/null +++ b/vtoonify/model/bisenet/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 zll + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vtoonify/model/bisenet/README.md b/vtoonify/model/bisenet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..849d55e2789c8852e01707d1ff755dc74e63a7f5 --- /dev/null +++ b/vtoonify/model/bisenet/README.md @@ -0,0 +1,68 @@ +# face-parsing.PyTorch + +

+ + + +

+ +### Contents +- [Training](#training) +- [Demo](#Demo) +- [References](#references) + +## Training + +1. Prepare training data: + -- download [CelebAMask-HQ dataset](https://github.com/switchablenorms/CelebAMask-HQ) + + -- change file path in the `prepropess_data.py` and run +```Shell +python prepropess_data.py +``` + +2. Train the model using CelebAMask-HQ dataset: +Just run the train script: +``` + $ CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 train.py +``` + +If you do not wish to train the model, you can download [our pre-trained model](https://drive.google.com/open?id=154JgKpzCPW82qINcVieuPH3fZ2e0P812) and save it in `res/cp`. + + +## Demo +1. Evaluate the trained model using: +```Shell +# evaluate using GPU +python test.py +``` + +## Face makeup using parsing maps +[**face-makeup.PyTorch**](https://github.com/zllrunning/face-makeup.PyTorch) + + + + + + + + + + + + + + + + + + + + + + +
 HairLip
Original InputOriginal InputOriginal Input
ColorColorColor
+ + +## References +- [BiSeNet](https://github.com/CoinCheung/BiSeNet) \ No newline at end of file diff --git a/vtoonify/model/bisenet/model.py b/vtoonify/model/bisenet/model.py new file mode 100644 index 0000000000000000000000000000000000000000..e61c0eb20aaa63065cc17bbcfe27b245f1f0dbf5 --- /dev/null +++ b/vtoonify/model/bisenet/model.py @@ -0,0 +1,283 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +from model.bisenet.resnet import Resnet18 +# from modules.bn import InPlaceABNSync as BatchNorm2d + + +class ConvBNReLU(nn.Module): + def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs): + super(ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_chan, + out_chan, + kernel_size = ks, + stride = stride, + padding = padding, + bias = False) + self.bn = nn.BatchNorm2d(out_chan) + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = F.relu(self.bn(x)) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + +class BiSeNetOutput(nn.Module): + def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs): + super(BiSeNetOutput, self).__init__() + self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1) + self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False) + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.conv_out(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class AttentionRefinementModule(nn.Module): + def __init__(self, in_chan, out_chan, *args, **kwargs): + super(AttentionRefinementModule, self).__init__() + self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1) + self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False) + self.bn_atten = nn.BatchNorm2d(out_chan) + self.sigmoid_atten = nn.Sigmoid() + self.init_weight() + + def forward(self, x): + feat = self.conv(x) + atten = F.avg_pool2d(feat, feat.size()[2:]) + atten = self.conv_atten(atten) + atten = self.bn_atten(atten) + atten = self.sigmoid_atten(atten) + out = torch.mul(feat, atten) + return out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class ContextPath(nn.Module): + def __init__(self, *args, **kwargs): + super(ContextPath, self).__init__() + self.resnet = Resnet18() + self.arm16 = AttentionRefinementModule(256, 128) + self.arm32 = AttentionRefinementModule(512, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0) + + self.init_weight() + + def forward(self, x): + H0, W0 = x.size()[2:] + feat8, feat16, feat32 = self.resnet(x) + H8, W8 = feat8.size()[2:] + H16, W16 = feat16.size()[2:] + H32, W32 = feat32.size()[2:] + + avg = F.avg_pool2d(feat32, feat32.size()[2:]) + avg = self.conv_avg(avg) + avg_up = F.interpolate(avg, (H32, W32), mode='nearest') + + feat32_arm = self.arm32(feat32) + feat32_sum = feat32_arm + avg_up + feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest') + feat32_up = self.conv_head32(feat32_up) + + feat16_arm = self.arm16(feat16) + feat16_sum = feat16_arm + feat32_up + feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest') + feat16_up = self.conv_head16(feat16_up) + + return feat8, feat16_up, feat32_up # x8, x8, x16 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +### This is not used, since I replace this with the resnet feature with the same size +class SpatialPath(nn.Module): + def __init__(self, *args, **kwargs): + super(SpatialPath, self).__init__() + self.conv1 = ConvBNReLU(3, 64, ks=7, stride=2, padding=3) + self.conv2 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1) + self.conv3 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1) + self.conv_out = ConvBNReLU(64, 128, ks=1, stride=1, padding=0) + self.init_weight() + + def forward(self, x): + feat = self.conv1(x) + feat = self.conv2(feat) + feat = self.conv3(feat) + feat = self.conv_out(feat) + return feat + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class FeatureFusionModule(nn.Module): + def __init__(self, in_chan, out_chan, *args, **kwargs): + super(FeatureFusionModule, self).__init__() + self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0) + self.conv1 = nn.Conv2d(out_chan, + out_chan//4, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.conv2 = nn.Conv2d(out_chan//4, + out_chan, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + self.init_weight() + + def forward(self, fsp, fcp): + fcat = torch.cat([fsp, fcp], dim=1) + feat = self.convblk(fcat) + atten = F.avg_pool2d(feat, feat.size()[2:]) + atten = self.conv1(atten) + atten = self.relu(atten) + atten = self.conv2(atten) + atten = self.sigmoid(atten) + feat_atten = torch.mul(feat, atten) + feat_out = feat_atten + feat + return feat_out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class BiSeNet(nn.Module): + def __init__(self, n_classes, *args, **kwargs): + super(BiSeNet, self).__init__() + self.cp = ContextPath() + ## here self.sp is deleted + self.ffm = FeatureFusionModule(256, 256) + self.conv_out = BiSeNetOutput(256, 256, n_classes) + self.conv_out16 = BiSeNetOutput(128, 64, n_classes) + self.conv_out32 = BiSeNetOutput(128, 64, n_classes) + self.init_weight() + + def forward(self, x): + H, W = x.size()[2:] + feat_res8, feat_cp8, feat_cp16 = self.cp(x) # here return res3b1 feature + feat_sp = feat_res8 # use res3b1 feature to replace spatial path feature + feat_fuse = self.ffm(feat_sp, feat_cp8) + + feat_out = self.conv_out(feat_fuse) + feat_out16 = self.conv_out16(feat_cp8) + feat_out32 = self.conv_out32(feat_cp16) + + feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True) + feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True) + feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True) + return feat_out, feat_out16, feat_out32 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], [] + for name, child in self.named_children(): + child_wd_params, child_nowd_params = child.get_params() + if isinstance(child, FeatureFusionModule) or isinstance(child, BiSeNetOutput): + lr_mul_wd_params += child_wd_params + lr_mul_nowd_params += child_nowd_params + else: + wd_params += child_wd_params + nowd_params += child_nowd_params + return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params + + +if __name__ == "__main__": + net = BiSeNet(19) + net.cuda() + net.eval() + in_ten = torch.randn(16, 3, 640, 480).cuda() + out, out16, out32 = net(in_ten) + print(out.shape) + + net.get_params() diff --git a/vtoonify/model/bisenet/resnet.py b/vtoonify/model/bisenet/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..aa2bf95130e9815ba378cb6f73207068b81a04b9 --- /dev/null +++ b/vtoonify/model/bisenet/resnet.py @@ -0,0 +1,109 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as modelzoo + +# from modules.bn import InPlaceABNSync as BatchNorm2d + +resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth' + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class BasicBlock(nn.Module): + def __init__(self, in_chan, out_chan, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(in_chan, out_chan, stride) + self.bn1 = nn.BatchNorm2d(out_chan) + self.conv2 = conv3x3(out_chan, out_chan) + self.bn2 = nn.BatchNorm2d(out_chan) + self.relu = nn.ReLU(inplace=True) + self.downsample = None + if in_chan != out_chan or stride != 1: + self.downsample = nn.Sequential( + nn.Conv2d(in_chan, out_chan, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(out_chan), + ) + + def forward(self, x): + residual = self.conv1(x) + residual = F.relu(self.bn1(residual)) + residual = self.conv2(residual) + residual = self.bn2(residual) + + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x) + + out = shortcut + residual + out = self.relu(out) + return out + + +def create_layer_basic(in_chan, out_chan, bnum, stride=1): + layers = [BasicBlock(in_chan, out_chan, stride=stride)] + for i in range(bnum-1): + layers.append(BasicBlock(out_chan, out_chan, stride=1)) + return nn.Sequential(*layers) + + +class Resnet18(nn.Module): + def __init__(self): + super(Resnet18, self).__init__() + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1) + self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2) + self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2) + self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2) + self.init_weight() + + def forward(self, x): + x = self.conv1(x) + x = F.relu(self.bn1(x)) + x = self.maxpool(x) + + x = self.layer1(x) + feat8 = self.layer2(x) # 1/8 + feat16 = self.layer3(feat8) # 1/16 + feat32 = self.layer4(feat16) # 1/32 + return feat8, feat16, feat32 + + def init_weight(self): + state_dict = modelzoo.load_url(resnet18_url) + self_state_dict = self.state_dict() + for k, v in state_dict.items(): + if 'fc' in k: continue + self_state_dict.update({k: v}) + self.load_state_dict(self_state_dict) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +if __name__ == "__main__": + net = Resnet18() + x = torch.randn(16, 3, 224, 224) + out = net(x) + print(out[0].size()) + print(out[1].size()) + print(out[2].size()) + net.get_params() diff --git a/vtoonify/model/dualstylegan.py b/vtoonify/model/dualstylegan.py new file mode 100644 index 0000000000000000000000000000000000000000..60d9850ad049a2751781871d6ae0c2779ecc863f --- /dev/null +++ b/vtoonify/model/dualstylegan.py @@ -0,0 +1,203 @@ +import random +import torch +from torch import nn +from model.stylegan.model import ConvLayer, PixelNorm, EqualLinear, Generator + +class AdaptiveInstanceNorm(nn.Module): + def __init__(self, fin, style_dim=512): + super().__init__() + + self.norm = nn.InstanceNorm2d(fin, affine=False) + self.style = nn.Linear(style_dim, fin * 2) + + self.style.bias.data[:fin] = 1 + self.style.bias.data[fin:] = 0 + + def forward(self, input, style): + style = self.style(style).unsqueeze(2).unsqueeze(3) + gamma, beta = style.chunk(2, 1) + out = self.norm(input) + out = gamma * out + beta + return out + +# modulative residual blocks (ModRes) +class AdaResBlock(nn.Module): + def __init__(self, fin, style_dim=512, dilation=1): # modified + super().__init__() + + self.conv = ConvLayer(fin, fin, 3, dilation=dilation) # modified + self.conv2 = ConvLayer(fin, fin, 3, dilation=dilation) # modified + self.norm = AdaptiveInstanceNorm(fin, style_dim) + self.norm2 = AdaptiveInstanceNorm(fin, style_dim) + + # model initialization + # the convolution filters are set to values close to 0 to produce negligible residual features + self.conv[0].weight.data *= 0.01 + self.conv2[0].weight.data *= 0.01 + + def forward(self, x, s, w=1): + skip = x + if w == 0: + return skip + out = self.conv(self.norm(x, s)) + out = self.conv2(self.norm2(out, s)) + out = out * w + skip + return out + +class DualStyleGAN(nn.Module): + def __init__(self, size, style_dim, n_mlp, channel_multiplier=2, twoRes=True, res_index=6): + super().__init__() + + layers = [PixelNorm()] + for i in range(n_mlp-6): + layers.append(EqualLinear(512, 512, lr_mul=0.01, activation="fused_lrelu")) + # color transform blocks T_c + self.style = nn.Sequential(*layers) + # StyleGAN2 + self.generator = Generator(size, style_dim, n_mlp, channel_multiplier) + # The extrinsic style path + self.res = nn.ModuleList() + self.res_index = res_index//2 * 2 + self.res.append(AdaResBlock(self.generator.channels[2 ** 2])) # for conv1 + for i in range(3, self.generator.log_size + 1): + out_channel = self.generator.channels[2 ** i] + if i < 3 + self.res_index//2: + # ModRes + self.res.append(AdaResBlock(out_channel)) + self.res.append(AdaResBlock(out_channel)) + else: + # structure transform block T_s + self.res.append(EqualLinear(512, 512)) + # FC layer is initialized with identity matrices, meaning no changes to the input latent code + self.res[-1].weight.data = torch.eye(512) * 512.0**0.5 + torch.randn(512, 512) * 0.01 + self.res.append(EqualLinear(512, 512)) + self.res[-1].weight.data = torch.eye(512) * 512.0**0.5 + torch.randn(512, 512) * 0.01 + self.res.append(EqualLinear(512, 512)) # for to_rgb7 + self.res[-1].weight.data = torch.eye(512) * 512.0**0.5 + torch.randn(512, 512) * 0.01 + self.size = self.generator.size + self.style_dim = self.generator.style_dim + self.log_size = self.generator.log_size + self.num_layers = self.generator.num_layers + self.n_latent = self.generator.n_latent + self.channels = self.generator.channels + + def forward( + self, + styles, # intrinsic style code + exstyles, # extrinsic style code + return_latents=False, + return_feat=False, + inject_index=None, + truncation=1, + truncation_latent=None, + input_is_latent=False, + noise=None, + randomize_noise=True, + z_plus_latent=False, # intrinsic style code is z+ or z + use_res=True, # whether to use the extrinsic style path + fuse_index=18, # layers > fuse_index do not use the extrinsic style path + interp_weights=[1]*18, # weight vector for style combination of two paths + ): + + if not input_is_latent: + if not z_plus_latent: + styles = [self.generator.style(s) for s in styles] + else: + styles = [self.generator.style(s.reshape(s.shape[0]*s.shape[1], s.shape[2])).reshape(s.shape) for s in styles] + + if noise is None: + if randomize_noise: + noise = [None] * self.generator.num_layers + else: + noise = [ + getattr(self.generator.noises, f"noise_{i}") for i in range(self.generator.num_layers) + ] + + if truncation < 1: + style_t = [] + + for style in styles: + style_t.append( + truncation_latent + truncation * (style - truncation_latent) + ) + + styles = style_t + + if len(styles) < 2: + inject_index = self.generator.n_latent + + if styles[0].ndim < 3: + latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + + else: + latent = styles[0] + + else: + if inject_index is None: + inject_index = random.randint(1, self.generator.n_latent - 1) + + if styles[0].ndim < 3: + latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + latent2 = styles[1].unsqueeze(1).repeat(1, self.generator.n_latent - inject_index, 1) + + latent = torch.cat([latent, latent2], 1) + else: + latent = torch.cat([styles[0][:,0:inject_index], styles[1][:,inject_index:]], 1) + + if use_res: + if exstyles.ndim < 3: + resstyles = self.style(exstyles).unsqueeze(1).repeat(1, self.generator.n_latent, 1) + adastyles = exstyles.unsqueeze(1).repeat(1, self.generator.n_latent, 1) + else: + nB, nL, nD = exstyles.shape + resstyles = self.style(exstyles.reshape(nB*nL, nD)).reshape(nB, nL, nD) + adastyles = exstyles + + out = self.generator.input(latent) + out = self.generator.conv1(out, latent[:, 0], noise=noise[0]) + if use_res and fuse_index > 0: + out = self.res[0](out, resstyles[:, 0], interp_weights[0]) + + skip = self.generator.to_rgb1(out, latent[:, 1]) + i = 1 + for conv1, conv2, noise1, noise2, to_rgb in zip( + self.generator.convs[::2], self.generator.convs[1::2], noise[1::2], noise[2::2], self.generator.to_rgbs): + if use_res and fuse_index >= i and i > self.res_index: + out = conv1(out, interp_weights[i] * self.res[i](adastyles[:, i]) + + (1-interp_weights[i]) * latent[:, i], noise=noise1) + else: + out = conv1(out, latent[:, i], noise=noise1) + if use_res and fuse_index >= i and i <= self.res_index: + out = self.res[i](out, resstyles[:, i], interp_weights[i]) + if use_res and fuse_index >= (i+1) and i > self.res_index: + out = conv2(out, interp_weights[i+1] * self.res[i+1](adastyles[:, i+1]) + + (1-interp_weights[i+1]) * latent[:, i+1], noise=noise2) + else: + out = conv2(out, latent[:, i + 1], noise=noise2) + if use_res and fuse_index >= (i+1) and i <= self.res_index: + out = self.res[i+1](out, resstyles[:, i+1], interp_weights[i+1]) + if use_res and fuse_index >= (i+2) and i >= self.res_index-1: + skip = to_rgb(out, interp_weights[i+2] * self.res[i+2](adastyles[:, i+2]) + + (1-interp_weights[i+2]) * latent[:, i + 2], skip) + else: + skip = to_rgb(out, latent[:, i + 2], skip) + i += 2 + if i > self.res_index and return_feat: + return out, skip + + image = skip + + if return_latents: + return image, latent + + else: + return image, None + + def make_noise(self): + return self.generator.make_noise() + + def mean_latent(self, n_latent): + return self.generator.mean_latent(n_latent) + + def get_latent(self, input): + return self.generator.style(input) \ No newline at end of file diff --git a/vtoonify/model/encoder/__init__.py b/vtoonify/model/encoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vtoonify/model/encoder/align_all_parallel.py b/vtoonify/model/encoder/align_all_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..d41d4fdf8888dbfcd7f8e2ac3962eb8cfb1816b4 --- /dev/null +++ b/vtoonify/model/encoder/align_all_parallel.py @@ -0,0 +1,217 @@ +""" +brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset) +author: lzhbrian (https://lzhbrian.me) +date: 2020.1.5 +note: code is heavily borrowed from + https://github.com/NVlabs/ffhq-dataset + http://dlib.net/face_landmark_detection.py.html + +requirements: + apt install cmake + conda install Pillow numpy scipy + pip install dlib + # download face landmark model from: + # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 +""" +from argparse import ArgumentParser +import time +import numpy as np +import PIL +import PIL.Image +import os +import scipy +import scipy.ndimage +import dlib +import multiprocessing as mp +import math + +#from configs.paths_config import model_paths +SHAPE_PREDICTOR_PATH = 'shape_predictor_68_face_landmarks.dat'#model_paths["shape_predictor"] + + +def get_landmark(filepath, predictor): + """get landmark with dlib + :return: np.array shape=(68, 2) + """ + detector = dlib.get_frontal_face_detector() + if type(filepath) == str: + img = dlib.load_rgb_image(filepath) + else: + img = filepath + dets = detector(img, 1) + + if len(dets) == 0: + print('Error: no face detected!') + return None + + shape = None + for k, d in enumerate(dets): + shape = predictor(img, d) + + if shape is None: + print('Error: No face detected! If you are sure there are faces in your input, you may rerun the code several times until the face is detected. Sometimes the detector is unstable.') + t = list(shape.parts()) + a = [] + for tt in t: + a.append([tt.x, tt.y]) + lm = np.array(a) + return lm + + +def align_face(filepath, predictor): + """ + :param filepath: str + :return: PIL Image + """ + + lm = get_landmark(filepath, predictor) + if lm is None: + return None + + lm_chin = lm[0: 17] # left-right + lm_eyebrow_left = lm[17: 22] # left-right + lm_eyebrow_right = lm[22: 27] # left-right + lm_nose = lm[27: 31] # top-down + lm_nostrils = lm[31: 36] # top-down + lm_eye_left = lm[36: 42] # left-clockwise + lm_eye_right = lm[42: 48] # left-clockwise + lm_mouth_outer = lm[48: 60] # left-clockwise + lm_mouth_inner = lm[60: 68] # left-clockwise + + # Calculate auxiliary vectors. + eye_left = np.mean(lm_eye_left, axis=0) + eye_right = np.mean(lm_eye_right, axis=0) + eye_avg = (eye_left + eye_right) * 0.5 + eye_to_eye = eye_right - eye_left + mouth_left = lm_mouth_outer[0] + mouth_right = lm_mouth_outer[6] + mouth_avg = (mouth_left + mouth_right) * 0.5 + eye_to_mouth = mouth_avg - eye_avg + + # Choose oriented crop rectangle. + x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] + x /= np.hypot(*x) + x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) + y = np.flipud(x) * [-1, 1] + c = eye_avg + eye_to_mouth * 0.1 + quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) + qsize = np.hypot(*x) * 2 + + # read image + if type(filepath) == str: + img = PIL.Image.open(filepath) + else: + img = PIL.Image.fromarray(filepath) + + output_size = 256 + transform_size = 256 + enable_padding = True + + # Shrink. + shrink = int(np.floor(qsize / output_size * 0.5)) + if shrink > 1: + rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) + img = img.resize(rsize, PIL.Image.ANTIALIAS) + quad /= shrink + qsize /= shrink + + # Crop. + border = max(int(np.rint(qsize * 0.1)), 3) + crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), + int(np.ceil(max(quad[:, 1])))) + crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), + min(crop[3] + border, img.size[1])) + if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: + img = img.crop(crop) + quad -= crop[0:2] + + # Pad. + pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), + int(np.ceil(max(quad[:, 1])))) + pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), + max(pad[3] - img.size[1] + border, 0)) + if enable_padding and max(pad) > border - 4: + pad = np.maximum(pad, int(np.rint(qsize * 0.3))) + img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') + h, w, _ = img.shape + y, x, _ = np.ogrid[:h, :w, :1] + mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]), + 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3])) + blur = qsize * 0.02 + img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) + img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0) + img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') + quad += pad[:2] + + # Transform. + img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) + if output_size < transform_size: + img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS) + + # Save aligned image. + return img + + +def chunks(lst, n): + """Yield successive n-sized chunks from lst.""" + for i in range(0, len(lst), n): + yield lst[i:i + n] + + +def extract_on_paths(file_paths): + predictor = dlib.shape_predictor(SHAPE_PREDICTOR_PATH) + pid = mp.current_process().name + print('\t{} is starting to extract on #{} images'.format(pid, len(file_paths))) + tot_count = len(file_paths) + count = 0 + for file_path, res_path in file_paths: + count += 1 + if count % 100 == 0: + print('{} done with {}/{}'.format(pid, count, tot_count)) + try: + res = align_face(file_path, predictor) + res = res.convert('RGB') + os.makedirs(os.path.dirname(res_path), exist_ok=True) + res.save(res_path) + except Exception: + continue + print('\tDone!') + + +def parse_args(): + parser = ArgumentParser(add_help=False) + parser.add_argument('--num_threads', type=int, default=1) + parser.add_argument('--root_path', type=str, default='') + args = parser.parse_args() + return args + + +def run(args): + root_path = args.root_path + out_crops_path = root_path + '_crops' + if not os.path.exists(out_crops_path): + os.makedirs(out_crops_path, exist_ok=True) + + file_paths = [] + for root, dirs, files in os.walk(root_path): + for file in files: + file_path = os.path.join(root, file) + fname = os.path.join(out_crops_path, os.path.relpath(file_path, root_path)) + res_path = '{}.jpg'.format(os.path.splitext(fname)[0]) + if os.path.splitext(file_path)[1] == '.txt' or os.path.exists(res_path): + continue + file_paths.append((file_path, res_path)) + + file_chunks = list(chunks(file_paths, int(math.ceil(len(file_paths) / args.num_threads)))) + print(len(file_chunks)) + pool = mp.Pool(args.num_threads) + print('Running on {} paths\nHere we goooo'.format(len(file_paths))) + tic = time.time() + pool.map(extract_on_paths, file_chunks) + toc = time.time() + print('Mischief managed in {}s'.format(toc - tic)) + + +if __name__ == '__main__': + args = parse_args() + run(args) diff --git a/vtoonify/model/encoder/criteria/id_loss.py b/vtoonify/model/encoder/criteria/id_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..b9325eff921e05f263f07663061b102123f7299c --- /dev/null +++ b/vtoonify/model/encoder/criteria/id_loss.py @@ -0,0 +1,33 @@ +import torch +from torch import nn +from model.encoder.encoders.model_irse import Backbone + + +class IDLoss(nn.Module): + def __init__(self, model_paths): + super(IDLoss, self).__init__() + print('Loading ResNet ArcFace') + self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se') + self.facenet.load_state_dict(torch.load(model_paths)) + self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112)) + self.facenet.eval() + + def extract_feats(self, x): + x = x[:, :, 35:223, 32:220] # Crop interesting region + x = self.face_pool(x) + x_feats = self.facenet(x) + return x_feats + + def forward(self, y_hat, y): + n_samples = y_hat.shape[0] + y_feats = self.extract_feats(y) # Otherwise use the feature from there + y_hat_feats = self.extract_feats(y_hat) + y_feats = y_feats.detach() + loss = 0 + count = 0 + for i in range(n_samples): + diff_target = y_hat_feats[i].dot(y_feats[i]) + loss += 1 - diff_target + count += 1 + + return loss / count \ No newline at end of file diff --git a/vtoonify/model/encoder/encoders/__init__.py b/vtoonify/model/encoder/encoders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vtoonify/model/encoder/encoders/helpers.py b/vtoonify/model/encoder/encoders/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..b51fdf97141407fcc1c9d249a086ddbfd042469f --- /dev/null +++ b/vtoonify/model/encoder/encoders/helpers.py @@ -0,0 +1,119 @@ +from collections import namedtuple +import torch +from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module + +""" +ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) +""" + + +class Flatten(Module): + def forward(self, input): + return input.view(input.size(0), -1) + + +def l2_norm(input, axis=1): + norm = torch.norm(input, 2, axis, True) + output = torch.div(input, norm) + return output + + +class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): + """ A named tuple describing a ResNet block. """ + + +def get_block(in_channel, depth, num_units, stride=2): + return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] + + +def get_blocks(num_layers): + if num_layers == 50: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=4), + get_block(in_channel=128, depth=256, num_units=14), + get_block(in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 100: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=13), + get_block(in_channel=128, depth=256, num_units=30), + get_block(in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 152: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=8), + get_block(in_channel=128, depth=256, num_units=36), + get_block(in_channel=256, depth=512, num_units=3) + ] + else: + raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers)) + return blocks + + +class SEModule(Module): + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.avg_pool = AdaptiveAvgPool2d(1) + self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) + self.relu = ReLU(inplace=True) + self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) + self.sigmoid = Sigmoid() + + def forward(self, x): + module_input = x + x = self.avg_pool(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + return module_input * x + + +class bottleneck_IR(Module): + def __init__(self, in_channel, depth, stride): + super(bottleneck_IR, self).__init__() + if in_channel == depth: + self.shortcut_layer = MaxPool2d(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2d(in_channel, depth, (1, 1), stride, bias=False), + BatchNorm2d(depth) + ) + self.res_layer = Sequential( + BatchNorm2d(in_channel), + Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), + Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth) + ) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + return res + shortcut + + +class bottleneck_IR_SE(Module): + def __init__(self, in_channel, depth, stride): + super(bottleneck_IR_SE, self).__init__() + if in_channel == depth: + self.shortcut_layer = MaxPool2d(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2d(in_channel, depth, (1, 1), stride, bias=False), + BatchNorm2d(depth) + ) + self.res_layer = Sequential( + BatchNorm2d(in_channel), + Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), + PReLU(depth), + Conv2d(depth, depth, (3, 3), stride, 1, bias=False), + BatchNorm2d(depth), + SEModule(depth, 16) + ) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + return res + shortcut diff --git a/vtoonify/model/encoder/encoders/model_irse.py b/vtoonify/model/encoder/encoders/model_irse.py new file mode 100644 index 0000000000000000000000000000000000000000..adb359125e34469c05831b541aca38a2c5e183a0 --- /dev/null +++ b/vtoonify/model/encoder/encoders/model_irse.py @@ -0,0 +1,84 @@ +from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module +from model.encoder.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm + +""" +Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) +""" + + +class Backbone(Module): + def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True): + super(Backbone, self).__init__() + assert input_size in [112, 224], "input_size should be 112 or 224" + assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" + assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" + blocks = get_blocks(num_layers) + if mode == 'ir': + unit_module = bottleneck_IR + elif mode == 'ir_se': + unit_module = bottleneck_IR_SE + self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), + BatchNorm2d(64), + PReLU(64)) + if input_size == 112: + self.output_layer = Sequential(BatchNorm2d(512), + Dropout(drop_ratio), + Flatten(), + Linear(512 * 7 * 7, 512), + BatchNorm1d(512, affine=affine)) + else: + self.output_layer = Sequential(BatchNorm2d(512), + Dropout(drop_ratio), + Flatten(), + Linear(512 * 14 * 14, 512), + BatchNorm1d(512, affine=affine)) + + modules = [] + for block in blocks: + for bottleneck in block: + modules.append(unit_module(bottleneck.in_channel, + bottleneck.depth, + bottleneck.stride)) + self.body = Sequential(*modules) + + def forward(self, x): + x = self.input_layer(x) + x = self.body(x) + x = self.output_layer(x) + return l2_norm(x) + + +def IR_50(input_size): + """Constructs a ir-50 model.""" + model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False) + return model + + +def IR_101(input_size): + """Constructs a ir-101 model.""" + model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False) + return model + + +def IR_152(input_size): + """Constructs a ir-152 model.""" + model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False) + return model + + +def IR_SE_50(input_size): + """Constructs a ir_se-50 model.""" + model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False) + return model + + +def IR_SE_101(input_size): + """Constructs a ir_se-101 model.""" + model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False) + return model + + +def IR_SE_152(input_size): + """Constructs a ir_se-152 model.""" + model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False) + return model diff --git a/vtoonify/model/encoder/encoders/psp_encoders.py b/vtoonify/model/encoder/encoders/psp_encoders.py new file mode 100644 index 0000000000000000000000000000000000000000..06f14e2ba2a4e823ae712f09871fed2669702f26 --- /dev/null +++ b/vtoonify/model/encoder/encoders/psp_encoders.py @@ -0,0 +1,186 @@ +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn import Linear, Conv2d, BatchNorm2d, PReLU, Sequential, Module + +from model.encoder.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE +from model.stylegan.model import EqualLinear + + +class GradualStyleBlock(Module): + def __init__(self, in_c, out_c, spatial): + super(GradualStyleBlock, self).__init__() + self.out_c = out_c + self.spatial = spatial + num_pools = int(np.log2(spatial)) + modules = [] + modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1), + nn.LeakyReLU()] + for i in range(num_pools - 1): + modules += [ + Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1), + nn.LeakyReLU() + ] + self.convs = nn.Sequential(*modules) + self.linear = EqualLinear(out_c, out_c, lr_mul=1) + + def forward(self, x): + x = self.convs(x) + x = x.view(-1, self.out_c) + x = self.linear(x) + return x + + +class GradualStyleEncoder(Module): + def __init__(self, num_layers, mode='ir', opts=None): + super(GradualStyleEncoder, self).__init__() + assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' + assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' + blocks = get_blocks(num_layers) + if mode == 'ir': + unit_module = bottleneck_IR + elif mode == 'ir_se': + unit_module = bottleneck_IR_SE + self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False), + BatchNorm2d(64), + PReLU(64)) + modules = [] + for block in blocks: + for bottleneck in block: + modules.append(unit_module(bottleneck.in_channel, + bottleneck.depth, + bottleneck.stride)) + self.body = Sequential(*modules) + + self.styles = nn.ModuleList() + self.style_count = opts.n_styles + self.coarse_ind = 3 + self.middle_ind = 7 + for i in range(self.style_count): + if i < self.coarse_ind: + style = GradualStyleBlock(512, 512, 16) + elif i < self.middle_ind: + style = GradualStyleBlock(512, 512, 32) + else: + style = GradualStyleBlock(512, 512, 64) + self.styles.append(style) + self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0) + self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0) + + def _upsample_add(self, x, y): + '''Upsample and add two feature maps. + Args: + x: (Variable) top feature map to be upsampled. + y: (Variable) lateral feature map. + Returns: + (Variable) added feature map. + Note in PyTorch, when input size is odd, the upsampled feature map + with `F.upsample(..., scale_factor=2, mode='nearest')` + maybe not equal to the lateral feature map size. + e.g. + original input size: [N,_,15,15] -> + conv2d feature map size: [N,_,8,8] -> + upsampled feature map size: [N,_,16,16] + So we choose bilinear upsample which supports arbitrary output sizes. + ''' + _, _, H, W = y.size() + return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y + + def forward(self, x): + x = self.input_layer(x) + + latents = [] + modulelist = list(self.body._modules.values()) + for i, l in enumerate(modulelist): + x = l(x) + if i == 6: + c1 = x + elif i == 20: + c2 = x + elif i == 23: + c3 = x + + for j in range(self.coarse_ind): + latents.append(self.styles[j](c3)) + + p2 = self._upsample_add(c3, self.latlayer1(c2)) + for j in range(self.coarse_ind, self.middle_ind): + latents.append(self.styles[j](p2)) + + p1 = self._upsample_add(p2, self.latlayer2(c1)) + for j in range(self.middle_ind, self.style_count): + latents.append(self.styles[j](p1)) + + out = torch.stack(latents, dim=1) + return out + + +class BackboneEncoderUsingLastLayerIntoW(Module): + def __init__(self, num_layers, mode='ir', opts=None): + super(BackboneEncoderUsingLastLayerIntoW, self).__init__() + print('Using BackboneEncoderUsingLastLayerIntoW') + assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' + assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' + blocks = get_blocks(num_layers) + if mode == 'ir': + unit_module = bottleneck_IR + elif mode == 'ir_se': + unit_module = bottleneck_IR_SE + self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False), + BatchNorm2d(64), + PReLU(64)) + self.output_pool = torch.nn.AdaptiveAvgPool2d((1, 1)) + self.linear = EqualLinear(512, 512, lr_mul=1) + modules = [] + for block in blocks: + for bottleneck in block: + modules.append(unit_module(bottleneck.in_channel, + bottleneck.depth, + bottleneck.stride)) + self.body = Sequential(*modules) + + def forward(self, x): + x = self.input_layer(x) + x = self.body(x) + x = self.output_pool(x) + x = x.view(-1, 512) + x = self.linear(x) + return x + + +class BackboneEncoderUsingLastLayerIntoWPlus(Module): + def __init__(self, num_layers, mode='ir', opts=None): + super(BackboneEncoderUsingLastLayerIntoWPlus, self).__init__() + print('Using BackboneEncoderUsingLastLayerIntoWPlus') + assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' + assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' + blocks = get_blocks(num_layers) + if mode == 'ir': + unit_module = bottleneck_IR + elif mode == 'ir_se': + unit_module = bottleneck_IR_SE + self.n_styles = opts.n_styles + self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False), + BatchNorm2d(64), + PReLU(64)) + self.output_layer_2 = Sequential(BatchNorm2d(512), + torch.nn.AdaptiveAvgPool2d((7, 7)), + Flatten(), + Linear(512 * 7 * 7, 512)) + self.linear = EqualLinear(512, 512 * self.n_styles, lr_mul=1) + modules = [] + for block in blocks: + for bottleneck in block: + modules.append(unit_module(bottleneck.in_channel, + bottleneck.depth, + bottleneck.stride)) + self.body = Sequential(*modules) + + def forward(self, x): + x = self.input_layer(x) + x = self.body(x) + x = self.output_layer_2(x) + x = self.linear(x) + x = x.view(-1, self.n_styles, 512) + return x diff --git a/vtoonify/model/encoder/psp.py b/vtoonify/model/encoder/psp.py new file mode 100644 index 0000000000000000000000000000000000000000..9953db7469493ba32abfeff041e51ac2c81e8399 --- /dev/null +++ b/vtoonify/model/encoder/psp.py @@ -0,0 +1,125 @@ +""" +This file defines the core research contribution +""" +import matplotlib +matplotlib.use('Agg') +import math + +import torch +from torch import nn +from model.encoder.encoders import psp_encoders +from model.stylegan.model import Generator + +def get_keys(d, name): + if 'state_dict' in d: + d = d['state_dict'] + d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name} + return d_filt + + +class pSp(nn.Module): + + def __init__(self, opts): + super(pSp, self).__init__() + self.set_opts(opts) + # compute number of style inputs based on the output resolution + self.opts.n_styles = int(math.log(self.opts.output_size, 2)) * 2 - 2 + # Define architecture + self.encoder = self.set_encoder() + self.decoder = Generator(self.opts.output_size, 512, 8) + self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256)) + # Load weights if needed + self.load_weights() + + def set_encoder(self): + if self.opts.encoder_type == 'GradualStyleEncoder': + encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts) + elif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoW': + encoder = psp_encoders.BackboneEncoderUsingLastLayerIntoW(50, 'ir_se', self.opts) + elif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoWPlus': + encoder = psp_encoders.BackboneEncoderUsingLastLayerIntoWPlus(50, 'ir_se', self.opts) + else: + raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type)) + return encoder + + def load_weights(self): + if self.opts.checkpoint_path is not None: + print('Loading pSp from checkpoint: {}'.format(self.opts.checkpoint_path)) + ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu') + self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True) + self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True) + self.__load_latent_avg(ckpt) + else: + pass + '''print('Loading encoders weights from irse50!') + encoder_ckpt = torch.load(model_paths['ir_se50']) + # if input to encoder is not an RGB image, do not load the input layer weights + if self.opts.label_nc != 0: + encoder_ckpt = {k: v for k, v in encoder_ckpt.items() if "input_layer" not in k} + self.encoder.load_state_dict(encoder_ckpt, strict=False) + print('Loading decoder weights from pretrained!') + ckpt = torch.load(self.opts.stylegan_weights) + self.decoder.load_state_dict(ckpt['g_ema'], strict=False) + if self.opts.learn_in_w: + self.__load_latent_avg(ckpt, repeat=1) + else: + self.__load_latent_avg(ckpt, repeat=self.opts.n_styles) + ''' + + def forward(self, x, resize=True, latent_mask=None, input_code=False, randomize_noise=True, + inject_latent=None, return_latents=False, alpha=None, z_plus_latent=False, return_z_plus_latent=True): + if input_code: + codes = x + else: + codes = self.encoder(x) + #print(codes.shape) + # normalize with respect to the center of an average face + if self.opts.start_from_latent_avg: + if self.opts.learn_in_w: + codes = codes + self.latent_avg.repeat(codes.shape[0], 1) + else: + codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1) + + + if latent_mask is not None: + for i in latent_mask: + if inject_latent is not None: + if alpha is not None: + codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i] + else: + codes[:, i] = inject_latent[:, i] + else: + codes[:, i] = 0 + + input_is_latent = not input_code + if z_plus_latent: + input_is_latent = False + images, result_latent = self.decoder([codes], + input_is_latent=input_is_latent, + randomize_noise=randomize_noise, + return_latents=return_latents, + z_plus_latent=z_plus_latent) + + if resize: + images = self.face_pool(images) + + if return_latents: + if z_plus_latent and return_z_plus_latent: + return images, codes + if z_plus_latent and not return_z_plus_latent: + return images, result_latent + else: + return images, result_latent + else: + return images + + def set_opts(self, opts): + self.opts = opts + + def __load_latent_avg(self, ckpt, repeat=None): + if 'latent_avg' in ckpt: + self.latent_avg = ckpt['latent_avg'].to(self.opts.device) + if repeat is not None: + self.latent_avg = self.latent_avg.repeat(repeat, 1) + else: + self.latent_avg = None diff --git a/vtoonify/model/encoder/readme.md b/vtoonify/model/encoder/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..5421bfe3e67b7b6cbd7baf96b741b539d65bb0fd --- /dev/null +++ b/vtoonify/model/encoder/readme.md @@ -0,0 +1,9 @@ +# Encoding in Style: a StyleGAN Encoder for Image-to-Image Translation + +## Description +Official Implementation of pSp paper for both training and evaluation. The pSp method extends the StyleGAN model to +allow solving different image-to-image translation problems using its encoder. + +Fork from [https://github.com/eladrich/pixel2style2pixel](https://github.com/eladrich/pixel2style2pixel). + +In VToonify, we modify pSp to accept z+ latent code. diff --git a/vtoonify/model/raft/LICENSE b/vtoonify/model/raft/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ed13d8404f0f1315ee323b2c8d1b2d8f77b5c82f --- /dev/null +++ b/vtoonify/model/raft/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2020, princeton-vl +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vtoonify/model/raft/RAFT.png b/vtoonify/model/raft/RAFT.png new file mode 100644 index 0000000000000000000000000000000000000000..a387fe2c8b2d02602941a5a74993992cd6490a4c Binary files /dev/null and b/vtoonify/model/raft/RAFT.png differ diff --git a/vtoonify/model/raft/README.md b/vtoonify/model/raft/README.md new file mode 100644 index 0000000000000000000000000000000000000000..650275ed7c4cda12822587c6a4358f057fffe494 --- /dev/null +++ b/vtoonify/model/raft/README.md @@ -0,0 +1,80 @@ +# RAFT +This repository contains the source code for our paper: + +[RAFT: Recurrent All Pairs Field Transforms for Optical Flow](https://arxiv.org/pdf/2003.12039.pdf)
+ECCV 2020
+Zachary Teed and Jia Deng
+ + + +## Requirements +The code has been tested with PyTorch 1.6 and Cuda 10.1. +```Shell +conda create --name raft +conda activate raft +conda install pytorch=1.6.0 torchvision=0.7.0 cudatoolkit=10.1 matplotlib tensorboard scipy opencv -c pytorch +``` + +## Demos +Pretrained models can be downloaded by running +```Shell +./download_models.sh +``` +or downloaded from [google drive](https://drive.google.com/drive/folders/1sWDsfuZ3Up38EUQt7-JDTT1HcGHuJgvT?usp=sharing) + +You can demo a trained model on a sequence of frames +```Shell +python demo.py --model=models/raft-things.pth --path=demo-frames +``` + +## Required Data +To evaluate/train RAFT, you will need to download the required datasets. +* [FlyingChairs](https://lmb.informatik.uni-freiburg.de/resources/datasets/FlyingChairs.en.html#flyingchairs) +* [FlyingThings3D](https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html) +* [Sintel](http://sintel.is.tue.mpg.de/) +* [KITTI](http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow) +* [HD1K](http://hci-benchmark.iwr.uni-heidelberg.de/) (optional) + + +By default `datasets.py` will search for the datasets in these locations. You can create symbolic links to wherever the datasets were downloaded in the `datasets` folder + +```Shell +├── datasets + ├── Sintel + ├── test + ├── training + ├── KITTI + ├── testing + ├── training + ├── devkit + ├── FlyingChairs_release + ├── data + ├── FlyingThings3D + ├── frames_cleanpass + ├── frames_finalpass + ├── optical_flow +``` + +## Evaluation +You can evaluate a trained model using `evaluate.py` +```Shell +python evaluate.py --model=models/raft-things.pth --dataset=sintel --mixed_precision +``` + +## Training +We used the following training schedule in our paper (2 GPUs). Training logs will be written to the `runs` which can be visualized using tensorboard +```Shell +./train_standard.sh +``` + +If you have a RTX GPU, training can be accelerated using mixed precision. You can expect similiar results in this setting (1 GPU) +```Shell +./train_mixed.sh +``` + +## (Optional) Efficent Implementation +You can optionally use our alternate (efficent) implementation by compiling the provided cuda extension +```Shell +cd alt_cuda_corr && python setup.py install && cd .. +``` +and running `demo.py` and `evaluate.py` with the `--alternate_corr` flag Note, this implementation is somewhat slower than all-pairs, but uses significantly less GPU memory during the forward pass. diff --git a/vtoonify/model/raft/alt_cuda_corr/correlation.cpp b/vtoonify/model/raft/alt_cuda_corr/correlation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b01584d19edb99e7feec5f2e4c51169a1ed208db --- /dev/null +++ b/vtoonify/model/raft/alt_cuda_corr/correlation.cpp @@ -0,0 +1,54 @@ +#include +#include + +// CUDA forward declarations +std::vector corr_cuda_forward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + int radius); + +std::vector corr_cuda_backward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius); + +// C++ interface +#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) + +std::vector corr_forward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + int radius) { + CHECK_INPUT(fmap1); + CHECK_INPUT(fmap2); + CHECK_INPUT(coords); + + return corr_cuda_forward(fmap1, fmap2, coords, radius); +} + + +std::vector corr_backward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius) { + CHECK_INPUT(fmap1); + CHECK_INPUT(fmap2); + CHECK_INPUT(coords); + CHECK_INPUT(corr_grad); + + return corr_cuda_backward(fmap1, fmap2, coords, corr_grad, radius); +} + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &corr_forward, "CORR forward"); + m.def("backward", &corr_backward, "CORR backward"); +} \ No newline at end of file diff --git a/vtoonify/model/raft/alt_cuda_corr/correlation_kernel.cu b/vtoonify/model/raft/alt_cuda_corr/correlation_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..145e5804a16ece51b8ff5f1cb61ae8dab4fc3bb7 --- /dev/null +++ b/vtoonify/model/raft/alt_cuda_corr/correlation_kernel.cu @@ -0,0 +1,324 @@ +#include +#include +#include +#include + + +#define BLOCK_H 4 +#define BLOCK_W 8 +#define BLOCK_HW BLOCK_H * BLOCK_W +#define CHANNEL_STRIDE 32 + + +__forceinline__ __device__ +bool within_bounds(int h, int w, int H, int W) { + return h >= 0 && h < H && w >= 0 && w < W; +} + +template +__global__ void corr_forward_kernel( + const torch::PackedTensorAccessor32 fmap1, + const torch::PackedTensorAccessor32 fmap2, + const torch::PackedTensorAccessor32 coords, + torch::PackedTensorAccessor32 corr, + int r) +{ + const int b = blockIdx.x; + const int h0 = blockIdx.y * blockDim.x; + const int w0 = blockIdx.z * blockDim.y; + const int tid = threadIdx.x * blockDim.y + threadIdx.y; + + const int H1 = fmap1.size(1); + const int W1 = fmap1.size(2); + const int H2 = fmap2.size(1); + const int W2 = fmap2.size(2); + const int N = coords.size(1); + const int C = fmap1.size(3); + + __shared__ scalar_t f1[CHANNEL_STRIDE][BLOCK_HW+1]; + __shared__ scalar_t f2[CHANNEL_STRIDE][BLOCK_HW+1]; + __shared__ scalar_t x2s[BLOCK_HW]; + __shared__ scalar_t y2s[BLOCK_HW]; + + for (int c=0; c(floor(y2s[k1]))-r+iy; + int w2 = static_cast(floor(x2s[k1]))-r+ix; + int c2 = tid % CHANNEL_STRIDE; + + auto fptr = fmap2[b][h2][w2]; + if (within_bounds(h2, w2, H2, W2)) + f2[c2][k1] = fptr[c+c2]; + else + f2[c2][k1] = 0.0; + } + + __syncthreads(); + + scalar_t s = 0.0; + for (int k=0; k 0 && ix > 0 && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_nw) += nw; + + if (iy > 0 && ix < rd && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_ne) += ne; + + if (iy < rd && ix > 0 && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_sw) += sw; + + if (iy < rd && ix < rd && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_se) += se; + } + } + } + } +} + + +template +__global__ void corr_backward_kernel( + const torch::PackedTensorAccessor32 fmap1, + const torch::PackedTensorAccessor32 fmap2, + const torch::PackedTensorAccessor32 coords, + const torch::PackedTensorAccessor32 corr_grad, + torch::PackedTensorAccessor32 fmap1_grad, + torch::PackedTensorAccessor32 fmap2_grad, + torch::PackedTensorAccessor32 coords_grad, + int r) +{ + + const int b = blockIdx.x; + const int h0 = blockIdx.y * blockDim.x; + const int w0 = blockIdx.z * blockDim.y; + const int tid = threadIdx.x * blockDim.y + threadIdx.y; + + const int H1 = fmap1.size(1); + const int W1 = fmap1.size(2); + const int H2 = fmap2.size(1); + const int W2 = fmap2.size(2); + const int N = coords.size(1); + const int C = fmap1.size(3); + + __shared__ scalar_t f1[CHANNEL_STRIDE][BLOCK_HW+1]; + __shared__ scalar_t f2[CHANNEL_STRIDE][BLOCK_HW+1]; + + __shared__ scalar_t f1_grad[CHANNEL_STRIDE][BLOCK_HW+1]; + __shared__ scalar_t f2_grad[CHANNEL_STRIDE][BLOCK_HW+1]; + + __shared__ scalar_t x2s[BLOCK_HW]; + __shared__ scalar_t y2s[BLOCK_HW]; + + for (int c=0; c(floor(y2s[k1]))-r+iy; + int w2 = static_cast(floor(x2s[k1]))-r+ix; + int c2 = tid % CHANNEL_STRIDE; + + auto fptr = fmap2[b][h2][w2]; + if (within_bounds(h2, w2, H2, W2)) + f2[c2][k1] = fptr[c+c2]; + else + f2[c2][k1] = 0.0; + + f2_grad[c2][k1] = 0.0; + } + + __syncthreads(); + + const scalar_t* grad_ptr = &corr_grad[b][n][0][h1][w1]; + scalar_t g = 0.0; + + int ix_nw = H1*W1*((iy-1) + rd*(ix-1)); + int ix_ne = H1*W1*((iy-1) + rd*ix); + int ix_sw = H1*W1*(iy + rd*(ix-1)); + int ix_se = H1*W1*(iy + rd*ix); + + if (iy > 0 && ix > 0 && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_nw) * dy * dx; + + if (iy > 0 && ix < rd && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_ne) * dy * (1-dx); + + if (iy < rd && ix > 0 && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_sw) * (1-dy) * dx; + + if (iy < rd && ix < rd && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_se) * (1-dy) * (1-dx); + + for (int k=0; k(floor(y2s[k1]))-r+iy; + int w2 = static_cast(floor(x2s[k1]))-r+ix; + int c2 = tid % CHANNEL_STRIDE; + + scalar_t* fptr = &fmap2_grad[b][h2][w2][0]; + if (within_bounds(h2, w2, H2, W2)) + atomicAdd(fptr+c+c2, f2_grad[c2][k1]); + } + } + } + } + __syncthreads(); + + + for (int k=0; k corr_cuda_forward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + int radius) +{ + const auto B = coords.size(0); + const auto N = coords.size(1); + const auto H = coords.size(2); + const auto W = coords.size(3); + + const auto rd = 2 * radius + 1; + auto opts = fmap1.options(); + auto corr = torch::zeros({B, N, rd*rd, H, W}, opts); + + const dim3 blocks(B, (H+BLOCK_H-1)/BLOCK_H, (W+BLOCK_W-1)/BLOCK_W); + const dim3 threads(BLOCK_H, BLOCK_W); + + corr_forward_kernel<<>>( + fmap1.packed_accessor32(), + fmap2.packed_accessor32(), + coords.packed_accessor32(), + corr.packed_accessor32(), + radius); + + return {corr}; +} + +std::vector corr_cuda_backward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius) +{ + const auto B = coords.size(0); + const auto N = coords.size(1); + + const auto H1 = fmap1.size(1); + const auto W1 = fmap1.size(2); + const auto H2 = fmap2.size(1); + const auto W2 = fmap2.size(2); + const auto C = fmap1.size(3); + + auto opts = fmap1.options(); + auto fmap1_grad = torch::zeros({B, H1, W1, C}, opts); + auto fmap2_grad = torch::zeros({B, H2, W2, C}, opts); + auto coords_grad = torch::zeros({B, N, H1, W1, 2}, opts); + + const dim3 blocks(B, (H1+BLOCK_H-1)/BLOCK_H, (W1+BLOCK_W-1)/BLOCK_W); + const dim3 threads(BLOCK_H, BLOCK_W); + + + corr_backward_kernel<<>>( + fmap1.packed_accessor32(), + fmap2.packed_accessor32(), + coords.packed_accessor32(), + corr_grad.packed_accessor32(), + fmap1_grad.packed_accessor32(), + fmap2_grad.packed_accessor32(), + coords_grad.packed_accessor32(), + radius); + + return {fmap1_grad, fmap2_grad, coords_grad}; +} \ No newline at end of file diff --git a/vtoonify/model/raft/alt_cuda_corr/setup.py b/vtoonify/model/raft/alt_cuda_corr/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..c0207ff285ffac4c8146c79d154f12416dbef48c --- /dev/null +++ b/vtoonify/model/raft/alt_cuda_corr/setup.py @@ -0,0 +1,15 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + + +setup( + name='correlation', + ext_modules=[ + CUDAExtension('alt_cuda_corr', + sources=['correlation.cpp', 'correlation_kernel.cu'], + extra_compile_args={'cxx': [], 'nvcc': ['-O3']}), + ], + cmdclass={ + 'build_ext': BuildExtension + }) + diff --git a/vtoonify/model/raft/chairs_split.txt b/vtoonify/model/raft/chairs_split.txt new file mode 100644 index 0000000000000000000000000000000000000000..6ae8f0b72a22fc061552604c94664e3a0287914e --- /dev/null +++ b/vtoonify/model/raft/chairs_split.txt @@ -0,0 +1,22872 @@ +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +2 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 \ No newline at end of file diff --git a/vtoonify/model/raft/core/__init__.py b/vtoonify/model/raft/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vtoonify/model/raft/core/corr.py b/vtoonify/model/raft/core/corr.py new file mode 100644 index 0000000000000000000000000000000000000000..40214aa5e6f0392a732eacab9d9cb0cbfb4669f3 --- /dev/null +++ b/vtoonify/model/raft/core/corr.py @@ -0,0 +1,91 @@ +import torch +import torch.nn.functional as F +from model.raft.core.utils.utils import bilinear_sampler, coords_grid + +try: + import alt_cuda_corr +except: + # alt_cuda_corr is not compiled + pass + + +class CorrBlock: + def __init__(self, fmap1, fmap2, num_levels=4, radius=4): + self.num_levels = num_levels + self.radius = radius + self.corr_pyramid = [] + + # all pairs correlation + corr = CorrBlock.corr(fmap1, fmap2) + + batch, h1, w1, dim, h2, w2 = corr.shape + corr = corr.reshape(batch*h1*w1, dim, h2, w2) + + self.corr_pyramid.append(corr) + for i in range(self.num_levels-1): + corr = F.avg_pool2d(corr, 2, stride=2) + self.corr_pyramid.append(corr) + + def __call__(self, coords): + r = self.radius + coords = coords.permute(0, 2, 3, 1) + batch, h1, w1, _ = coords.shape + + out_pyramid = [] + for i in range(self.num_levels): + corr = self.corr_pyramid[i] + dx = torch.linspace(-r, r, 2*r+1, device=coords.device) + dy = torch.linspace(-r, r, 2*r+1, device=coords.device) + delta = torch.stack(torch.meshgrid(dy, dx), axis=-1) + + centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i + delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2) + coords_lvl = centroid_lvl + delta_lvl + + corr = bilinear_sampler(corr, coords_lvl) + corr = corr.view(batch, h1, w1, -1) + out_pyramid.append(corr) + + out = torch.cat(out_pyramid, dim=-1) + return out.permute(0, 3, 1, 2).contiguous().float() + + @staticmethod + def corr(fmap1, fmap2): + batch, dim, ht, wd = fmap1.shape + fmap1 = fmap1.view(batch, dim, ht*wd) + fmap2 = fmap2.view(batch, dim, ht*wd) + + corr = torch.matmul(fmap1.transpose(1,2), fmap2) + corr = corr.view(batch, ht, wd, 1, ht, wd) + return corr / torch.sqrt(torch.tensor(dim).float()) + + +class AlternateCorrBlock: + def __init__(self, fmap1, fmap2, num_levels=4, radius=4): + self.num_levels = num_levels + self.radius = radius + + self.pyramid = [(fmap1, fmap2)] + for i in range(self.num_levels): + fmap1 = F.avg_pool2d(fmap1, 2, stride=2) + fmap2 = F.avg_pool2d(fmap2, 2, stride=2) + self.pyramid.append((fmap1, fmap2)) + + def __call__(self, coords): + coords = coords.permute(0, 2, 3, 1) + B, H, W, _ = coords.shape + dim = self.pyramid[0][0].shape[1] + + corr_list = [] + for i in range(self.num_levels): + r = self.radius + fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous() + fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous() + + coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous() + corr, = alt_cuda_corr.forward(fmap1_i, fmap2_i, coords_i, r) + corr_list.append(corr.squeeze(1)) + + corr = torch.stack(corr_list, dim=1) + corr = corr.reshape(B, -1, H, W) + return corr / torch.sqrt(torch.tensor(dim).float()) diff --git a/vtoonify/model/raft/core/datasets.py b/vtoonify/model/raft/core/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..9991f15f4c3861c19d1a4b8766d49f83af11db70 --- /dev/null +++ b/vtoonify/model/raft/core/datasets.py @@ -0,0 +1,235 @@ +# Data loading based on https://github.com/NVIDIA/flownet2-pytorch + +import numpy as np +import torch +import torch.utils.data as data +import torch.nn.functional as F + +import os +import math +import random +from glob import glob +import os.path as osp + +from model.raft.core.utils import frame_utils +from model.raft.core.utils.augmentor import FlowAugmentor, SparseFlowAugmentor + + +class FlowDataset(data.Dataset): + def __init__(self, aug_params=None, sparse=False): + self.augmentor = None + self.sparse = sparse + if aug_params is not None: + if sparse: + self.augmentor = SparseFlowAugmentor(**aug_params) + else: + self.augmentor = FlowAugmentor(**aug_params) + + self.is_test = False + self.init_seed = False + self.flow_list = [] + self.image_list = [] + self.extra_info = [] + + def __getitem__(self, index): + + if self.is_test: + img1 = frame_utils.read_gen(self.image_list[index][0]) + img2 = frame_utils.read_gen(self.image_list[index][1]) + img1 = np.array(img1).astype(np.uint8)[..., :3] + img2 = np.array(img2).astype(np.uint8)[..., :3] + img1 = torch.from_numpy(img1).permute(2, 0, 1).float() + img2 = torch.from_numpy(img2).permute(2, 0, 1).float() + return img1, img2, self.extra_info[index] + + if not self.init_seed: + worker_info = torch.utils.data.get_worker_info() + if worker_info is not None: + torch.manual_seed(worker_info.id) + np.random.seed(worker_info.id) + random.seed(worker_info.id) + self.init_seed = True + + index = index % len(self.image_list) + valid = None + if self.sparse: + flow, valid = frame_utils.readFlowKITTI(self.flow_list[index]) + else: + flow = frame_utils.read_gen(self.flow_list[index]) + + img1 = frame_utils.read_gen(self.image_list[index][0]) + img2 = frame_utils.read_gen(self.image_list[index][1]) + + flow = np.array(flow).astype(np.float32) + img1 = np.array(img1).astype(np.uint8) + img2 = np.array(img2).astype(np.uint8) + + # grayscale images + if len(img1.shape) == 2: + img1 = np.tile(img1[...,None], (1, 1, 3)) + img2 = np.tile(img2[...,None], (1, 1, 3)) + else: + img1 = img1[..., :3] + img2 = img2[..., :3] + + if self.augmentor is not None: + if self.sparse: + img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid) + else: + img1, img2, flow = self.augmentor(img1, img2, flow) + + img1 = torch.from_numpy(img1).permute(2, 0, 1).float() + img2 = torch.from_numpy(img2).permute(2, 0, 1).float() + flow = torch.from_numpy(flow).permute(2, 0, 1).float() + + if valid is not None: + valid = torch.from_numpy(valid) + else: + valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000) + + return img1, img2, flow, valid.float() + + + def __rmul__(self, v): + self.flow_list = v * self.flow_list + self.image_list = v * self.image_list + return self + + def __len__(self): + return len(self.image_list) + + +class MpiSintel(FlowDataset): + def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'): + super(MpiSintel, self).__init__(aug_params) + flow_root = osp.join(root, split, 'flow') + image_root = osp.join(root, split, dstype) + + if split == 'test': + self.is_test = True + + for scene in os.listdir(image_root): + image_list = sorted(glob(osp.join(image_root, scene, '*.png'))) + for i in range(len(image_list)-1): + self.image_list += [ [image_list[i], image_list[i+1]] ] + self.extra_info += [ (scene, i) ] # scene and frame_id + + if split != 'test': + self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo'))) + + +class FlyingChairs(FlowDataset): + def __init__(self, aug_params=None, split='train', root='datasets/FlyingChairs_release/data'): + super(FlyingChairs, self).__init__(aug_params) + + images = sorted(glob(osp.join(root, '*.ppm'))) + flows = sorted(glob(osp.join(root, '*.flo'))) + assert (len(images)//2 == len(flows)) + + split_list = np.loadtxt('chairs_split.txt', dtype=np.int32) + for i in range(len(flows)): + xid = split_list[i] + if (split=='training' and xid==1) or (split=='validation' and xid==2): + self.flow_list += [ flows[i] ] + self.image_list += [ [images[2*i], images[2*i+1]] ] + + +class FlyingThings3D(FlowDataset): + def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'): + super(FlyingThings3D, self).__init__(aug_params) + + for cam in ['left']: + for direction in ['into_future', 'into_past']: + image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*'))) + image_dirs = sorted([osp.join(f, cam) for f in image_dirs]) + + flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*'))) + flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs]) + + for idir, fdir in zip(image_dirs, flow_dirs): + images = sorted(glob(osp.join(idir, '*.png')) ) + flows = sorted(glob(osp.join(fdir, '*.pfm')) ) + for i in range(len(flows)-1): + if direction == 'into_future': + self.image_list += [ [images[i], images[i+1]] ] + self.flow_list += [ flows[i] ] + elif direction == 'into_past': + self.image_list += [ [images[i+1], images[i]] ] + self.flow_list += [ flows[i+1] ] + + +class KITTI(FlowDataset): + def __init__(self, aug_params=None, split='training', root='datasets/KITTI'): + super(KITTI, self).__init__(aug_params, sparse=True) + if split == 'testing': + self.is_test = True + + root = osp.join(root, split) + images1 = sorted(glob(osp.join(root, 'image_2/*_10.png'))) + images2 = sorted(glob(osp.join(root, 'image_2/*_11.png'))) + + for img1, img2 in zip(images1, images2): + frame_id = img1.split('/')[-1] + self.extra_info += [ [frame_id] ] + self.image_list += [ [img1, img2] ] + + if split == 'training': + self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png'))) + + +class HD1K(FlowDataset): + def __init__(self, aug_params=None, root='datasets/HD1k'): + super(HD1K, self).__init__(aug_params, sparse=True) + + seq_ix = 0 + while 1: + flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix))) + images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix))) + + if len(flows) == 0: + break + + for i in range(len(flows)-1): + self.flow_list += [flows[i]] + self.image_list += [ [images[i], images[i+1]] ] + + seq_ix += 1 + + +def fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'): + """ Create the data loader for the corresponding trainign set """ + + if args.stage == 'chairs': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True} + train_dataset = FlyingChairs(aug_params, split='training') + + elif args.stage == 'things': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True} + clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass') + final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass') + train_dataset = clean_dataset + final_dataset + + elif args.stage == 'sintel': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True} + things = FlyingThings3D(aug_params, dstype='frames_cleanpass') + sintel_clean = MpiSintel(aug_params, split='training', dstype='clean') + sintel_final = MpiSintel(aug_params, split='training', dstype='final') + + if TRAIN_DS == 'C+T+K+S+H': + kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True}) + hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True}) + train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things + + elif TRAIN_DS == 'C+T+K/S': + train_dataset = 100*sintel_clean + 100*sintel_final + things + + elif args.stage == 'kitti': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False} + train_dataset = KITTI(aug_params, split='training') + + train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, + pin_memory=False, shuffle=True, num_workers=4, drop_last=True) + + print('Training with %d image pairs' % len(train_dataset)) + return train_loader + diff --git a/vtoonify/model/raft/core/extractor.py b/vtoonify/model/raft/core/extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9c759d1243d4694e8656c2f6f8a37e53edd009 --- /dev/null +++ b/vtoonify/model/raft/core/extractor.py @@ -0,0 +1,267 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ResidualBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn='group', stride=1): + super(ResidualBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(planes) + self.norm2 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm3 = nn.BatchNorm2d(planes) + + elif norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(planes) + self.norm2 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm3 = nn.InstanceNorm2d(planes) + + elif norm_fn == 'none': + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + if not stride == 1: + self.norm3 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) + + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x+y) + + + +class BottleneckBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn='group', stride=1): + super(BottleneckBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0) + self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride) + self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(planes//4) + self.norm2 = nn.BatchNorm2d(planes//4) + self.norm3 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm4 = nn.BatchNorm2d(planes) + + elif norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(planes//4) + self.norm2 = nn.InstanceNorm2d(planes//4) + self.norm3 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm4 = nn.InstanceNorm2d(planes) + + elif norm_fn == 'none': + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + self.norm3 = nn.Sequential() + if not stride == 1: + self.norm4 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4) + + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + y = self.relu(self.norm3(self.conv3(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x+y) + +class BasicEncoder(nn.Module): + def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0): + super(BasicEncoder, self).__init__() + self.norm_fn = norm_fn + + if self.norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64) + + elif self.norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(64) + + elif self.norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(64) + + elif self.norm_fn == 'none': + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = 64 + self.layer1 = self._make_layer(64, stride=1) + self.layer2 = self._make_layer(96, stride=2) + self.layer3 = self._make_layer(128, stride=2) + + # output convolution + self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1) + + self.dropout = None + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + + def forward(self, x): + + # if input is list, combine batch dimension + is_list = isinstance(x, tuple) or isinstance(x, list) + if is_list: + batch_dim = x[0].shape[0] + x = torch.cat(x, dim=0) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + + x = self.conv2(x) + + if self.training and self.dropout is not None: + x = self.dropout(x) + + if is_list: + x = torch.split(x, [batch_dim, batch_dim], dim=0) + + return x + + +class SmallEncoder(nn.Module): + def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0): + super(SmallEncoder, self).__init__() + self.norm_fn = norm_fn + + if self.norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32) + + elif self.norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(32) + + elif self.norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(32) + + elif self.norm_fn == 'none': + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = 32 + self.layer1 = self._make_layer(32, stride=1) + self.layer2 = self._make_layer(64, stride=2) + self.layer3 = self._make_layer(96, stride=2) + + self.dropout = None + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + + self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + + def forward(self, x): + + # if input is list, combine batch dimension + is_list = isinstance(x, tuple) or isinstance(x, list) + if is_list: + batch_dim = x[0].shape[0] + x = torch.cat(x, dim=0) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.conv2(x) + + if self.training and self.dropout is not None: + x = self.dropout(x) + + if is_list: + x = torch.split(x, [batch_dim, batch_dim], dim=0) + + return x diff --git a/vtoonify/model/raft/core/raft.py b/vtoonify/model/raft/core/raft.py new file mode 100644 index 0000000000000000000000000000000000000000..a25c22f78c96470e3dca4c25e81683133ae024e3 --- /dev/null +++ b/vtoonify/model/raft/core/raft.py @@ -0,0 +1,144 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from model.raft.core.update import BasicUpdateBlock, SmallUpdateBlock +from model.raft.core.extractor import BasicEncoder, SmallEncoder +from model.raft.core.corr import CorrBlock, AlternateCorrBlock +from model.raft.core.utils.utils import bilinear_sampler, coords_grid, upflow8 + +try: + autocast = torch.cuda.amp.autocast +except: + # dummy autocast for PyTorch < 1.6 + class autocast: + def __init__(self, enabled): + pass + def __enter__(self): + pass + def __exit__(self, *args): + pass + + +class RAFT(nn.Module): + def __init__(self, args): + super(RAFT, self).__init__() + self.args = args + + if args.small: + self.hidden_dim = hdim = 96 + self.context_dim = cdim = 64 + args.corr_levels = 4 + args.corr_radius = 3 + + else: + self.hidden_dim = hdim = 128 + self.context_dim = cdim = 128 + args.corr_levels = 4 + args.corr_radius = 4 + + if 'dropout' not in self.args: + self.args.dropout = 0 + + if 'alternate_corr' not in self.args: + self.args.alternate_corr = False + + # feature network, context network, and update block + if args.small: + self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout) + self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout) + self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim) + + else: + self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout) + self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout) + self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim) + + def freeze_bn(self): + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + + def initialize_flow(self, img): + """ Flow is represented as difference between two coordinate grids flow = coords1 - coords0""" + N, C, H, W = img.shape + coords0 = coords_grid(N, H//8, W//8, device=img.device) + coords1 = coords_grid(N, H//8, W//8, device=img.device) + + # optical flow computed as difference: flow = coords1 - coords0 + return coords0, coords1 + + def upsample_flow(self, flow, mask): + """ Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """ + N, _, H, W = flow.shape + mask = mask.view(N, 1, 9, 8, 8, H, W) + mask = torch.softmax(mask, dim=2) + + up_flow = F.unfold(8 * flow, [3,3], padding=1) + up_flow = up_flow.view(N, 2, 9, 1, 1, H, W) + + up_flow = torch.sum(mask * up_flow, dim=2) + up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) + return up_flow.reshape(N, 2, 8*H, 8*W) + + + def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False): + """ Estimate optical flow between pair of frames """ + + image1 = 2 * (image1 / 255.0) - 1.0 + image2 = 2 * (image2 / 255.0) - 1.0 + + image1 = image1.contiguous() + image2 = image2.contiguous() + + hdim = self.hidden_dim + cdim = self.context_dim + + # run the feature network + with autocast(enabled=self.args.mixed_precision): + fmap1, fmap2 = self.fnet([image1, image2]) + + fmap1 = fmap1.float() + fmap2 = fmap2.float() + if self.args.alternate_corr: + corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius) + else: + corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius) + + # run the context network + with autocast(enabled=self.args.mixed_precision): + cnet = self.cnet(image1) + net, inp = torch.split(cnet, [hdim, cdim], dim=1) + net = torch.tanh(net) + inp = torch.relu(inp) + + coords0, coords1 = self.initialize_flow(image1) + + if flow_init is not None: + coords1 = coords1 + flow_init + + flow_predictions = [] + for itr in range(iters): + coords1 = coords1.detach() + corr = corr_fn(coords1) # index correlation volume + + flow = coords1 - coords0 + with autocast(enabled=self.args.mixed_precision): + net, up_mask, delta_flow = self.update_block(net, inp, corr, flow) + + # F(t+1) = F(t) + \Delta(t) + coords1 = coords1 + delta_flow + + # upsample predictions + if up_mask is None: + flow_up = upflow8(coords1 - coords0) + else: + flow_up = self.upsample_flow(coords1 - coords0, up_mask) + + flow_predictions.append(flow_up) + + if test_mode: + return coords1 - coords0, flow_up + + return flow_predictions diff --git a/vtoonify/model/raft/core/update.py b/vtoonify/model/raft/core/update.py new file mode 100644 index 0000000000000000000000000000000000000000..f940497f9b5eb1c12091574fe9a0223a1b196d50 --- /dev/null +++ b/vtoonify/model/raft/core/update.py @@ -0,0 +1,139 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class FlowHead(nn.Module): + def __init__(self, input_dim=128, hidden_dim=256): + super(FlowHead, self).__init__() + self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1) + self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + return self.conv2(self.relu(self.conv1(x))) + +class ConvGRU(nn.Module): + def __init__(self, hidden_dim=128, input_dim=192+128): + super(ConvGRU, self).__init__() + self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1) + self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1) + self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1) + + def forward(self, h, x): + hx = torch.cat([h, x], dim=1) + + z = torch.sigmoid(self.convz(hx)) + r = torch.sigmoid(self.convr(hx)) + q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1))) + + h = (1-z) * h + z * q + return h + +class SepConvGRU(nn.Module): + def __init__(self, hidden_dim=128, input_dim=192+128): + super(SepConvGRU, self).__init__() + self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2)) + self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2)) + self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2)) + + self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0)) + self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0)) + self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0)) + + + def forward(self, h, x): + # horizontal + hx = torch.cat([h, x], dim=1) + z = torch.sigmoid(self.convz1(hx)) + r = torch.sigmoid(self.convr1(hx)) + q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1))) + h = (1-z) * h + z * q + + # vertical + hx = torch.cat([h, x], dim=1) + z = torch.sigmoid(self.convz2(hx)) + r = torch.sigmoid(self.convr2(hx)) + q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1))) + h = (1-z) * h + z * q + + return h + +class SmallMotionEncoder(nn.Module): + def __init__(self, args): + super(SmallMotionEncoder, self).__init__() + cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2 + self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0) + self.convf1 = nn.Conv2d(2, 64, 7, padding=3) + self.convf2 = nn.Conv2d(64, 32, 3, padding=1) + self.conv = nn.Conv2d(128, 80, 3, padding=1) + + def forward(self, flow, corr): + cor = F.relu(self.convc1(corr)) + flo = F.relu(self.convf1(flow)) + flo = F.relu(self.convf2(flo)) + cor_flo = torch.cat([cor, flo], dim=1) + out = F.relu(self.conv(cor_flo)) + return torch.cat([out, flow], dim=1) + +class BasicMotionEncoder(nn.Module): + def __init__(self, args): + super(BasicMotionEncoder, self).__init__() + cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2 + self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0) + self.convc2 = nn.Conv2d(256, 192, 3, padding=1) + self.convf1 = nn.Conv2d(2, 128, 7, padding=3) + self.convf2 = nn.Conv2d(128, 64, 3, padding=1) + self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1) + + def forward(self, flow, corr): + cor = F.relu(self.convc1(corr)) + cor = F.relu(self.convc2(cor)) + flo = F.relu(self.convf1(flow)) + flo = F.relu(self.convf2(flo)) + + cor_flo = torch.cat([cor, flo], dim=1) + out = F.relu(self.conv(cor_flo)) + return torch.cat([out, flow], dim=1) + +class SmallUpdateBlock(nn.Module): + def __init__(self, args, hidden_dim=96): + super(SmallUpdateBlock, self).__init__() + self.encoder = SmallMotionEncoder(args) + self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64) + self.flow_head = FlowHead(hidden_dim, hidden_dim=128) + + def forward(self, net, inp, corr, flow): + motion_features = self.encoder(flow, corr) + inp = torch.cat([inp, motion_features], dim=1) + net = self.gru(net, inp) + delta_flow = self.flow_head(net) + + return net, None, delta_flow + +class BasicUpdateBlock(nn.Module): + def __init__(self, args, hidden_dim=128, input_dim=128): + super(BasicUpdateBlock, self).__init__() + self.args = args + self.encoder = BasicMotionEncoder(args) + self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim) + self.flow_head = FlowHead(hidden_dim, hidden_dim=256) + + self.mask = nn.Sequential( + nn.Conv2d(128, 256, 3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 64*9, 1, padding=0)) + + def forward(self, net, inp, corr, flow, upsample=True): + motion_features = self.encoder(flow, corr) + inp = torch.cat([inp, motion_features], dim=1) + + net = self.gru(net, inp) + delta_flow = self.flow_head(net) + + # scale mask to balence gradients + mask = .25 * self.mask(net) + return net, mask, delta_flow + + + diff --git a/vtoonify/model/raft/core/utils/__init__.py b/vtoonify/model/raft/core/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vtoonify/model/raft/core/utils/augmentor.py b/vtoonify/model/raft/core/utils/augmentor.py new file mode 100644 index 0000000000000000000000000000000000000000..e81c4f2b5c16c31c0ae236d744f299d430228a04 --- /dev/null +++ b/vtoonify/model/raft/core/utils/augmentor.py @@ -0,0 +1,246 @@ +import numpy as np +import random +import math +from PIL import Image + +import cv2 +cv2.setNumThreads(0) +cv2.ocl.setUseOpenCL(False) + +import torch +from torchvision.transforms import ColorJitter +import torch.nn.functional as F + + +class FlowAugmentor: + def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True): + + # spatial augmentation params + self.crop_size = crop_size + self.min_scale = min_scale + self.max_scale = max_scale + self.spatial_aug_prob = 0.8 + self.stretch_prob = 0.8 + self.max_stretch = 0.2 + + # flip augmentation params + self.do_flip = do_flip + self.h_flip_prob = 0.5 + self.v_flip_prob = 0.1 + + # photometric augmentation params + self.photo_aug = ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.5/3.14) + self.asymmetric_color_aug_prob = 0.2 + self.eraser_aug_prob = 0.5 + + def color_transform(self, img1, img2): + """ Photometric augmentation """ + + # asymmetric + if np.random.rand() < self.asymmetric_color_aug_prob: + img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8) + img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8) + + # symmetric + else: + image_stack = np.concatenate([img1, img2], axis=0) + image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8) + img1, img2 = np.split(image_stack, 2, axis=0) + + return img1, img2 + + def eraser_transform(self, img1, img2, bounds=[50, 100]): + """ Occlusion augmentation """ + + ht, wd = img1.shape[:2] + if np.random.rand() < self.eraser_aug_prob: + mean_color = np.mean(img2.reshape(-1, 3), axis=0) + for _ in range(np.random.randint(1, 3)): + x0 = np.random.randint(0, wd) + y0 = np.random.randint(0, ht) + dx = np.random.randint(bounds[0], bounds[1]) + dy = np.random.randint(bounds[0], bounds[1]) + img2[y0:y0+dy, x0:x0+dx, :] = mean_color + + return img1, img2 + + def spatial_transform(self, img1, img2, flow): + # randomly sample scale + ht, wd = img1.shape[:2] + min_scale = np.maximum( + (self.crop_size[0] + 8) / float(ht), + (self.crop_size[1] + 8) / float(wd)) + + scale = 2 ** np.random.uniform(self.min_scale, self.max_scale) + scale_x = scale + scale_y = scale + if np.random.rand() < self.stretch_prob: + scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch) + scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch) + + scale_x = np.clip(scale_x, min_scale, None) + scale_y = np.clip(scale_y, min_scale, None) + + if np.random.rand() < self.spatial_aug_prob: + # rescale the images + img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + flow = flow * [scale_x, scale_y] + + if self.do_flip: + if np.random.rand() < self.h_flip_prob: # h-flip + img1 = img1[:, ::-1] + img2 = img2[:, ::-1] + flow = flow[:, ::-1] * [-1.0, 1.0] + + if np.random.rand() < self.v_flip_prob: # v-flip + img1 = img1[::-1, :] + img2 = img2[::-1, :] + flow = flow[::-1, :] * [1.0, -1.0] + + y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0]) + x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1]) + + img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + + return img1, img2, flow + + def __call__(self, img1, img2, flow): + img1, img2 = self.color_transform(img1, img2) + img1, img2 = self.eraser_transform(img1, img2) + img1, img2, flow = self.spatial_transform(img1, img2, flow) + + img1 = np.ascontiguousarray(img1) + img2 = np.ascontiguousarray(img2) + flow = np.ascontiguousarray(flow) + + return img1, img2, flow + +class SparseFlowAugmentor: + def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False): + # spatial augmentation params + self.crop_size = crop_size + self.min_scale = min_scale + self.max_scale = max_scale + self.spatial_aug_prob = 0.8 + self.stretch_prob = 0.8 + self.max_stretch = 0.2 + + # flip augmentation params + self.do_flip = do_flip + self.h_flip_prob = 0.5 + self.v_flip_prob = 0.1 + + # photometric augmentation params + self.photo_aug = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14) + self.asymmetric_color_aug_prob = 0.2 + self.eraser_aug_prob = 0.5 + + def color_transform(self, img1, img2): + image_stack = np.concatenate([img1, img2], axis=0) + image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8) + img1, img2 = np.split(image_stack, 2, axis=0) + return img1, img2 + + def eraser_transform(self, img1, img2): + ht, wd = img1.shape[:2] + if np.random.rand() < self.eraser_aug_prob: + mean_color = np.mean(img2.reshape(-1, 3), axis=0) + for _ in range(np.random.randint(1, 3)): + x0 = np.random.randint(0, wd) + y0 = np.random.randint(0, ht) + dx = np.random.randint(50, 100) + dy = np.random.randint(50, 100) + img2[y0:y0+dy, x0:x0+dx, :] = mean_color + + return img1, img2 + + def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0): + ht, wd = flow.shape[:2] + coords = np.meshgrid(np.arange(wd), np.arange(ht)) + coords = np.stack(coords, axis=-1) + + coords = coords.reshape(-1, 2).astype(np.float32) + flow = flow.reshape(-1, 2).astype(np.float32) + valid = valid.reshape(-1).astype(np.float32) + + coords0 = coords[valid>=1] + flow0 = flow[valid>=1] + + ht1 = int(round(ht * fy)) + wd1 = int(round(wd * fx)) + + coords1 = coords0 * [fx, fy] + flow1 = flow0 * [fx, fy] + + xx = np.round(coords1[:,0]).astype(np.int32) + yy = np.round(coords1[:,1]).astype(np.int32) + + v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1) + xx = xx[v] + yy = yy[v] + flow1 = flow1[v] + + flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32) + valid_img = np.zeros([ht1, wd1], dtype=np.int32) + + flow_img[yy, xx] = flow1 + valid_img[yy, xx] = 1 + + return flow_img, valid_img + + def spatial_transform(self, img1, img2, flow, valid): + # randomly sample scale + + ht, wd = img1.shape[:2] + min_scale = np.maximum( + (self.crop_size[0] + 1) / float(ht), + (self.crop_size[1] + 1) / float(wd)) + + scale = 2 ** np.random.uniform(self.min_scale, self.max_scale) + scale_x = np.clip(scale, min_scale, None) + scale_y = np.clip(scale, min_scale, None) + + if np.random.rand() < self.spatial_aug_prob: + # rescale the images + img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y) + + if self.do_flip: + if np.random.rand() < 0.5: # h-flip + img1 = img1[:, ::-1] + img2 = img2[:, ::-1] + flow = flow[:, ::-1] * [-1.0, 1.0] + valid = valid[:, ::-1] + + margin_y = 20 + margin_x = 50 + + y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y) + x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x) + + y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0]) + x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1]) + + img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + return img1, img2, flow, valid + + + def __call__(self, img1, img2, flow, valid): + img1, img2 = self.color_transform(img1, img2) + img1, img2 = self.eraser_transform(img1, img2) + img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid) + + img1 = np.ascontiguousarray(img1) + img2 = np.ascontiguousarray(img2) + flow = np.ascontiguousarray(flow) + valid = np.ascontiguousarray(valid) + + return img1, img2, flow, valid diff --git a/vtoonify/model/raft/core/utils/flow_viz.py b/vtoonify/model/raft/core/utils/flow_viz.py new file mode 100644 index 0000000000000000000000000000000000000000..dcee65e89b91b07ee0496aeb4c7e7436abf99641 --- /dev/null +++ b/vtoonify/model/raft/core/utils/flow_viz.py @@ -0,0 +1,132 @@ +# Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization + + +# MIT License +# +# Copyright (c) 2018 Tom Runia +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to conditions. +# +# Author: Tom Runia +# Date Created: 2018-08-03 + +import numpy as np + +def make_colorwheel(): + """ + Generates a color wheel for optical flow visualization as presented in: + Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) + URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf + + Code follows the original C++ source code of Daniel Scharstein. + Code follows the the Matlab source code of Deqing Sun. + + Returns: + np.ndarray: Color wheel + """ + + RY = 15 + YG = 6 + GC = 4 + CB = 11 + BM = 13 + MR = 6 + + ncols = RY + YG + GC + CB + BM + MR + colorwheel = np.zeros((ncols, 3)) + col = 0 + + # RY + colorwheel[0:RY, 0] = 255 + colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY) + col = col+RY + # YG + colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG) + colorwheel[col:col+YG, 1] = 255 + col = col+YG + # GC + colorwheel[col:col+GC, 1] = 255 + colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC) + col = col+GC + # CB + colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB) + colorwheel[col:col+CB, 2] = 255 + col = col+CB + # BM + colorwheel[col:col+BM, 2] = 255 + colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM) + col = col+BM + # MR + colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR) + colorwheel[col:col+MR, 0] = 255 + return colorwheel + + +def flow_uv_to_colors(u, v, convert_to_bgr=False): + """ + Applies the flow color wheel to (possibly clipped) flow components u and v. + + According to the C++ source code of Daniel Scharstein + According to the Matlab source code of Deqing Sun + + Args: + u (np.ndarray): Input horizontal flow of shape [H,W] + v (np.ndarray): Input vertical flow of shape [H,W] + convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. + + Returns: + np.ndarray: Flow visualization image of shape [H,W,3] + """ + flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) + colorwheel = make_colorwheel() # shape [55x3] + ncols = colorwheel.shape[0] + rad = np.sqrt(np.square(u) + np.square(v)) + a = np.arctan2(-v, -u)/np.pi + fk = (a+1) / 2*(ncols-1) + k0 = np.floor(fk).astype(np.int32) + k1 = k0 + 1 + k1[k1 == ncols] = 0 + f = fk - k0 + for i in range(colorwheel.shape[1]): + tmp = colorwheel[:,i] + col0 = tmp[k0] / 255.0 + col1 = tmp[k1] / 255.0 + col = (1-f)*col0 + f*col1 + idx = (rad <= 1) + col[idx] = 1 - rad[idx] * (1-col[idx]) + col[~idx] = col[~idx] * 0.75 # out of range + # Note the 2-i => BGR instead of RGB + ch_idx = 2-i if convert_to_bgr else i + flow_image[:,:,ch_idx] = np.floor(255 * col) + return flow_image + + +def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False): + """ + Expects a two dimensional flow image of shape. + + Args: + flow_uv (np.ndarray): Flow UV image of shape [H,W,2] + clip_flow (float, optional): Clip maximum of flow values. Defaults to None. + convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. + + Returns: + np.ndarray: Flow visualization image of shape [H,W,3] + """ + assert flow_uv.ndim == 3, 'input flow must have three dimensions' + assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]' + if clip_flow is not None: + flow_uv = np.clip(flow_uv, 0, clip_flow) + u = flow_uv[:,:,0] + v = flow_uv[:,:,1] + rad = np.sqrt(np.square(u) + np.square(v)) + rad_max = np.max(rad) + epsilon = 1e-5 + u = u / (rad_max + epsilon) + v = v / (rad_max + epsilon) + return flow_uv_to_colors(u, v, convert_to_bgr) \ No newline at end of file diff --git a/vtoonify/model/raft/core/utils/frame_utils.py b/vtoonify/model/raft/core/utils/frame_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6c491135efaffc25bd61ec3ecde99d236f5deb12 --- /dev/null +++ b/vtoonify/model/raft/core/utils/frame_utils.py @@ -0,0 +1,137 @@ +import numpy as np +from PIL import Image +from os.path import * +import re + +import cv2 +cv2.setNumThreads(0) +cv2.ocl.setUseOpenCL(False) + +TAG_CHAR = np.array([202021.25], np.float32) + +def readFlow(fn): + """ Read .flo file in Middlebury format""" + # Code adapted from: + # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy + + # WARNING: this will work on little-endian architectures (eg Intel x86) only! + # print 'fn = %s'%(fn) + with open(fn, 'rb') as f: + magic = np.fromfile(f, np.float32, count=1) + if 202021.25 != magic: + print('Magic number incorrect. Invalid .flo file') + return None + else: + w = np.fromfile(f, np.int32, count=1) + h = np.fromfile(f, np.int32, count=1) + # print 'Reading %d x %d flo file\n' % (w, h) + data = np.fromfile(f, np.float32, count=2*int(w)*int(h)) + # Reshape data into 3D array (columns, rows, bands) + # The reshape here is for visualization, the original code is (w,h,2) + return np.resize(data, (int(h), int(w), 2)) + +def readPFM(file): + file = open(file, 'rb') + + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().rstrip() + if header == b'PF': + color = True + elif header == b'Pf': + color = False + else: + raise Exception('Not a PFM file.') + + dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline()) + if dim_match: + width, height = map(int, dim_match.groups()) + else: + raise Exception('Malformed PFM header.') + + scale = float(file.readline().rstrip()) + if scale < 0: # little-endian + endian = '<' + scale = -scale + else: + endian = '>' # big-endian + + data = np.fromfile(file, endian + 'f') + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + return data + +def writeFlow(filename,uv,v=None): + """ Write optical flow to file. + + If v is None, uv is assumed to contain both u and v channels, + stacked in depth. + Original code by Deqing Sun, adapted from Daniel Scharstein. + """ + nBands = 2 + + if v is None: + assert(uv.ndim == 3) + assert(uv.shape[2] == 2) + u = uv[:,:,0] + v = uv[:,:,1] + else: + u = uv + + assert(u.shape == v.shape) + height,width = u.shape + f = open(filename,'wb') + # write the header + f.write(TAG_CHAR) + np.array(width).astype(np.int32).tofile(f) + np.array(height).astype(np.int32).tofile(f) + # arrange into matrix form + tmp = np.zeros((height, width*nBands)) + tmp[:,np.arange(width)*2] = u + tmp[:,np.arange(width)*2 + 1] = v + tmp.astype(np.float32).tofile(f) + f.close() + + +def readFlowKITTI(filename): + flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR) + flow = flow[:,:,::-1].astype(np.float32) + flow, valid = flow[:, :, :2], flow[:, :, 2] + flow = (flow - 2**15) / 64.0 + return flow, valid + +def readDispKITTI(filename): + disp = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) / 256.0 + valid = disp > 0.0 + flow = np.stack([-disp, np.zeros_like(disp)], -1) + return flow, valid + + +def writeFlowKITTI(filename, uv): + uv = 64.0 * uv + 2**15 + valid = np.ones([uv.shape[0], uv.shape[1], 1]) + uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16) + cv2.imwrite(filename, uv[..., ::-1]) + + +def read_gen(file_name, pil=False): + ext = splitext(file_name)[-1] + if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg': + return Image.open(file_name) + elif ext == '.bin' or ext == '.raw': + return np.load(file_name) + elif ext == '.flo': + return readFlow(file_name).astype(np.float32) + elif ext == '.pfm': + flow = readPFM(file_name).astype(np.float32) + if len(flow.shape) == 2: + return flow + else: + return flow[:, :, :-1] + return [] \ No newline at end of file diff --git a/vtoonify/model/raft/core/utils/utils.py b/vtoonify/model/raft/core/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..741ccfe4d0d778c3199c586d368edc2882d4fff8 --- /dev/null +++ b/vtoonify/model/raft/core/utils/utils.py @@ -0,0 +1,82 @@ +import torch +import torch.nn.functional as F +import numpy as np +from scipy import interpolate + + +class InputPadder: + """ Pads images such that dimensions are divisible by 8 """ + def __init__(self, dims, mode='sintel'): + self.ht, self.wd = dims[-2:] + pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8 + pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8 + if mode == 'sintel': + self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2] + else: + self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht] + + def pad(self, *inputs): + return [F.pad(x, self._pad, mode='replicate') for x in inputs] + + def unpad(self,x): + ht, wd = x.shape[-2:] + c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]] + return x[..., c[0]:c[1], c[2]:c[3]] + +def forward_interpolate(flow): + flow = flow.detach().cpu().numpy() + dx, dy = flow[0], flow[1] + + ht, wd = dx.shape + x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht)) + + x1 = x0 + dx + y1 = y0 + dy + + x1 = x1.reshape(-1) + y1 = y1.reshape(-1) + dx = dx.reshape(-1) + dy = dy.reshape(-1) + + valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht) + x1 = x1[valid] + y1 = y1[valid] + dx = dx[valid] + dy = dy[valid] + + flow_x = interpolate.griddata( + (x1, y1), dx, (x0, y0), method='nearest', fill_value=0) + + flow_y = interpolate.griddata( + (x1, y1), dy, (x0, y0), method='nearest', fill_value=0) + + flow = np.stack([flow_x, flow_y], axis=0) + return torch.from_numpy(flow).float() + + +def bilinear_sampler(img, coords, mode='bilinear', mask=False): + """ Wrapper for grid_sample, uses pixel coordinates """ + H, W = img.shape[-2:] + xgrid, ygrid = coords.split([1,1], dim=-1) + xgrid = 2*xgrid/(W-1) - 1 + ygrid = 2*ygrid/(H-1) - 1 + + grid = torch.cat([xgrid, ygrid], dim=-1) + img = F.grid_sample(img, grid, align_corners=True) + + if mask: + mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1) + return img, mask.float() + + return img + + +def coords_grid(batch, ht, wd, device): + coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) + coords = torch.stack(coords[::-1], dim=0).float() + return coords[None].repeat(batch, 1, 1, 1) + + +def upflow8(flow, mode='bilinear'): + new_size = (8 * flow.shape[2], 8 * flow.shape[3]) + return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True) diff --git a/vtoonify/model/raft/demo.py b/vtoonify/model/raft/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..5abc1da863f1231af1247209739402b05fa8bf85 --- /dev/null +++ b/vtoonify/model/raft/demo.py @@ -0,0 +1,75 @@ +import sys +sys.path.append('core') + +import argparse +import os +import cv2 +import glob +import numpy as np +import torch +from PIL import Image + +from raft import RAFT +from utils import flow_viz +from utils.utils import InputPadder + + + +DEVICE = 'cuda' + +def load_image(imfile): + img = np.array(Image.open(imfile)).astype(np.uint8) + img = torch.from_numpy(img).permute(2, 0, 1).float() + return img[None].to(DEVICE) + + +def viz(img, flo): + img = img[0].permute(1,2,0).cpu().numpy() + flo = flo[0].permute(1,2,0).cpu().numpy() + + # map flow to rgb image + flo = flow_viz.flow_to_image(flo) + img_flo = np.concatenate([img, flo], axis=0) + + # import matplotlib.pyplot as plt + # plt.imshow(img_flo / 255.0) + # plt.show() + + cv2.imshow('image', img_flo[:, :, [2,1,0]]/255.0) + cv2.waitKey() + + +def demo(args): + model = torch.nn.DataParallel(RAFT(args)) + model.load_state_dict(torch.load(args.model)) + + model = model.module + model.to(DEVICE) + model.eval() + + with torch.no_grad(): + images = glob.glob(os.path.join(args.path, '*.png')) + \ + glob.glob(os.path.join(args.path, '*.jpg')) + + images = sorted(images) + for imfile1, imfile2 in zip(images[:-1], images[1:]): + image1 = load_image(imfile1) + image2 = load_image(imfile2) + + padder = InputPadder(image1.shape) + image1, image2 = padder.pad(image1, image2) + + flow_low, flow_up = model(image1, image2, iters=20, test_mode=True) + viz(image1, flow_up) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--model', help="restore checkpoint") + parser.add_argument('--path', help="dataset for evaluation") + parser.add_argument('--small', action='store_true', help='use small model') + parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') + parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation') + args = parser.parse_args() + + demo(args) diff --git a/vtoonify/model/raft/download_models.sh b/vtoonify/model/raft/download_models.sh new file mode 100644 index 0000000000000000000000000000000000000000..7b6ed7e478b74699d3c8db3bd744643c35f7da76 --- /dev/null +++ b/vtoonify/model/raft/download_models.sh @@ -0,0 +1,3 @@ +#!/bin/bash +wget https://www.dropbox.com/s/4j4z58wuv8o0mfz/models.zip +unzip models.zip diff --git a/vtoonify/model/raft/evaluate.py b/vtoonify/model/raft/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..431a0f58891bede2804454fa7f28e9434c4c8746 --- /dev/null +++ b/vtoonify/model/raft/evaluate.py @@ -0,0 +1,197 @@ +import sys +sys.path.append('core') + +from PIL import Image +import argparse +import os +import time +import numpy as np +import torch +import torch.nn.functional as F +import matplotlib.pyplot as plt + +import datasets +from utils import flow_viz +from utils import frame_utils + +from raft import RAFT +from utils.utils import InputPadder, forward_interpolate + + +@torch.no_grad() +def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'): + """ Create submission for the Sintel leaderboard """ + model.eval() + for dstype in ['clean', 'final']: + test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype) + + flow_prev, sequence_prev = None, None + for test_id in range(len(test_dataset)): + image1, image2, (sequence, frame) = test_dataset[test_id] + if sequence != sequence_prev: + flow_prev = None + + padder = InputPadder(image1.shape) + image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda()) + + flow_low, flow_pr = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True) + flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() + + if warm_start: + flow_prev = forward_interpolate(flow_low[0])[None].cuda() + + output_dir = os.path.join(output_path, dstype, sequence) + output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame+1)) + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + frame_utils.writeFlow(output_file, flow) + sequence_prev = sequence + + +@torch.no_grad() +def create_kitti_submission(model, iters=24, output_path='kitti_submission'): + """ Create submission for the Sintel leaderboard """ + model.eval() + test_dataset = datasets.KITTI(split='testing', aug_params=None) + + if not os.path.exists(output_path): + os.makedirs(output_path) + + for test_id in range(len(test_dataset)): + image1, image2, (frame_id, ) = test_dataset[test_id] + padder = InputPadder(image1.shape, mode='kitti') + image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda()) + + _, flow_pr = model(image1, image2, iters=iters, test_mode=True) + flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() + + output_filename = os.path.join(output_path, frame_id) + frame_utils.writeFlowKITTI(output_filename, flow) + + +@torch.no_grad() +def validate_chairs(model, iters=24): + """ Perform evaluation on the FlyingChairs (test) split """ + model.eval() + epe_list = [] + + val_dataset = datasets.FlyingChairs(split='validation') + for val_id in range(len(val_dataset)): + image1, image2, flow_gt, _ = val_dataset[val_id] + image1 = image1[None].cuda() + image2 = image2[None].cuda() + + _, flow_pr = model(image1, image2, iters=iters, test_mode=True) + epe = torch.sum((flow_pr[0].cpu() - flow_gt)**2, dim=0).sqrt() + epe_list.append(epe.view(-1).numpy()) + + epe = np.mean(np.concatenate(epe_list)) + print("Validation Chairs EPE: %f" % epe) + return {'chairs': epe} + + +@torch.no_grad() +def validate_sintel(model, iters=32): + """ Peform validation using the Sintel (train) split """ + model.eval() + results = {} + for dstype in ['clean', 'final']: + val_dataset = datasets.MpiSintel(split='training', dstype=dstype) + epe_list = [] + + for val_id in range(len(val_dataset)): + image1, image2, flow_gt, _ = val_dataset[val_id] + image1 = image1[None].cuda() + image2 = image2[None].cuda() + + padder = InputPadder(image1.shape) + image1, image2 = padder.pad(image1, image2) + + flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True) + flow = padder.unpad(flow_pr[0]).cpu() + + epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt() + epe_list.append(epe.view(-1).numpy()) + + epe_all = np.concatenate(epe_list) + epe = np.mean(epe_all) + px1 = np.mean(epe_all<1) + px3 = np.mean(epe_all<3) + px5 = np.mean(epe_all<5) + + print("Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f" % (dstype, epe, px1, px3, px5)) + results[dstype] = np.mean(epe_list) + + return results + + +@torch.no_grad() +def validate_kitti(model, iters=24): + """ Peform validation using the KITTI-2015 (train) split """ + model.eval() + val_dataset = datasets.KITTI(split='training') + + out_list, epe_list = [], [] + for val_id in range(len(val_dataset)): + image1, image2, flow_gt, valid_gt = val_dataset[val_id] + image1 = image1[None].cuda() + image2 = image2[None].cuda() + + padder = InputPadder(image1.shape, mode='kitti') + image1, image2 = padder.pad(image1, image2) + + flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True) + flow = padder.unpad(flow_pr[0]).cpu() + + epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt() + mag = torch.sum(flow_gt**2, dim=0).sqrt() + + epe = epe.view(-1) + mag = mag.view(-1) + val = valid_gt.view(-1) >= 0.5 + + out = ((epe > 3.0) & ((epe/mag) > 0.05)).float() + epe_list.append(epe[val].mean().item()) + out_list.append(out[val].cpu().numpy()) + + epe_list = np.array(epe_list) + out_list = np.concatenate(out_list) + + epe = np.mean(epe_list) + f1 = 100 * np.mean(out_list) + + print("Validation KITTI: %f, %f" % (epe, f1)) + return {'kitti-epe': epe, 'kitti-f1': f1} + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--model', help="restore checkpoint") + parser.add_argument('--dataset', help="dataset for evaluation") + parser.add_argument('--small', action='store_true', help='use small model') + parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') + parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation') + args = parser.parse_args() + + model = torch.nn.DataParallel(RAFT(args)) + model.load_state_dict(torch.load(args.model)) + + model.cuda() + model.eval() + + # create_sintel_submission(model.module, warm_start=True) + # create_kitti_submission(model.module) + + with torch.no_grad(): + if args.dataset == 'chairs': + validate_chairs(model.module) + + elif args.dataset == 'sintel': + validate_sintel(model.module) + + elif args.dataset == 'kitti': + validate_kitti(model.module) + + diff --git a/vtoonify/model/raft/train.py b/vtoonify/model/raft/train.py new file mode 100644 index 0000000000000000000000000000000000000000..307573097f13ee30c67bbe11658f457fdf1ead3c --- /dev/null +++ b/vtoonify/model/raft/train.py @@ -0,0 +1,247 @@ +from __future__ import print_function, division +import sys +sys.path.append('core') + +import argparse +import os +import cv2 +import time +import numpy as np +import matplotlib.pyplot as plt + +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F + +from torch.utils.data import DataLoader +from raft import RAFT +import evaluate +import datasets + +from torch.utils.tensorboard import SummaryWriter + +try: + from torch.cuda.amp import GradScaler +except: + # dummy GradScaler for PyTorch < 1.6 + class GradScaler: + def __init__(self): + pass + def scale(self, loss): + return loss + def unscale_(self, optimizer): + pass + def step(self, optimizer): + optimizer.step() + def update(self): + pass + + +# exclude extremly large displacements +MAX_FLOW = 400 +SUM_FREQ = 100 +VAL_FREQ = 5000 + + +def sequence_loss(flow_preds, flow_gt, valid, gamma=0.8, max_flow=MAX_FLOW): + """ Loss function defined over sequence of flow predictions """ + + n_predictions = len(flow_preds) + flow_loss = 0.0 + + # exlude invalid pixels and extremely large diplacements + mag = torch.sum(flow_gt**2, dim=1).sqrt() + valid = (valid >= 0.5) & (mag < max_flow) + + for i in range(n_predictions): + i_weight = gamma**(n_predictions - i - 1) + i_loss = (flow_preds[i] - flow_gt).abs() + flow_loss += i_weight * (valid[:, None] * i_loss).mean() + + epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt() + epe = epe.view(-1)[valid.view(-1)] + + metrics = { + 'epe': epe.mean().item(), + '1px': (epe < 1).float().mean().item(), + '3px': (epe < 3).float().mean().item(), + '5px': (epe < 5).float().mean().item(), + } + + return flow_loss, metrics + + +def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + +def fetch_optimizer(args, model): + """ Create the optimizer and learning rate scheduler """ + optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon) + + scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100, + pct_start=0.05, cycle_momentum=False, anneal_strategy='linear') + + return optimizer, scheduler + + +class Logger: + def __init__(self, model, scheduler): + self.model = model + self.scheduler = scheduler + self.total_steps = 0 + self.running_loss = {} + self.writer = None + + def _print_training_status(self): + metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())] + training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0]) + metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data) + + # print the training status + print(training_str + metrics_str) + + if self.writer is None: + self.writer = SummaryWriter() + + for k in self.running_loss: + self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps) + self.running_loss[k] = 0.0 + + def push(self, metrics): + self.total_steps += 1 + + for key in metrics: + if key not in self.running_loss: + self.running_loss[key] = 0.0 + + self.running_loss[key] += metrics[key] + + if self.total_steps % SUM_FREQ == SUM_FREQ-1: + self._print_training_status() + self.running_loss = {} + + def write_dict(self, results): + if self.writer is None: + self.writer = SummaryWriter() + + for key in results: + self.writer.add_scalar(key, results[key], self.total_steps) + + def close(self): + self.writer.close() + + +def train(args): + + model = nn.DataParallel(RAFT(args), device_ids=args.gpus) + print("Parameter Count: %d" % count_parameters(model)) + + if args.restore_ckpt is not None: + model.load_state_dict(torch.load(args.restore_ckpt), strict=False) + + model.cuda() + model.train() + + if args.stage != 'chairs': + model.module.freeze_bn() + + train_loader = datasets.fetch_dataloader(args) + optimizer, scheduler = fetch_optimizer(args, model) + + total_steps = 0 + scaler = GradScaler(enabled=args.mixed_precision) + logger = Logger(model, scheduler) + + VAL_FREQ = 5000 + add_noise = True + + should_keep_training = True + while should_keep_training: + + for i_batch, data_blob in enumerate(train_loader): + optimizer.zero_grad() + image1, image2, flow, valid = [x.cuda() for x in data_blob] + + if args.add_noise: + stdv = np.random.uniform(0.0, 5.0) + image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0) + image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0) + + flow_predictions = model(image1, image2, iters=args.iters) + + loss, metrics = sequence_loss(flow_predictions, flow, valid, args.gamma) + scaler.scale(loss).backward() + scaler.unscale_(optimizer) + torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) + + scaler.step(optimizer) + scheduler.step() + scaler.update() + + logger.push(metrics) + + if total_steps % VAL_FREQ == VAL_FREQ - 1: + PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name) + torch.save(model.state_dict(), PATH) + + results = {} + for val_dataset in args.validation: + if val_dataset == 'chairs': + results.update(evaluate.validate_chairs(model.module)) + elif val_dataset == 'sintel': + results.update(evaluate.validate_sintel(model.module)) + elif val_dataset == 'kitti': + results.update(evaluate.validate_kitti(model.module)) + + logger.write_dict(results) + + model.train() + if args.stage != 'chairs': + model.module.freeze_bn() + + total_steps += 1 + + if total_steps > args.num_steps: + should_keep_training = False + break + + logger.close() + PATH = 'checkpoints/%s.pth' % args.name + torch.save(model.state_dict(), PATH) + + return PATH + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--name', default='raft', help="name your experiment") + parser.add_argument('--stage', help="determines which dataset to use for training") + parser.add_argument('--restore_ckpt', help="restore checkpoint") + parser.add_argument('--small', action='store_true', help='use small model') + parser.add_argument('--validation', type=str, nargs='+') + + parser.add_argument('--lr', type=float, default=0.00002) + parser.add_argument('--num_steps', type=int, default=100000) + parser.add_argument('--batch_size', type=int, default=6) + parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512]) + parser.add_argument('--gpus', type=int, nargs='+', default=[0,1]) + parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') + + parser.add_argument('--iters', type=int, default=12) + parser.add_argument('--wdecay', type=float, default=.00005) + parser.add_argument('--epsilon', type=float, default=1e-8) + parser.add_argument('--clip', type=float, default=1.0) + parser.add_argument('--dropout', type=float, default=0.0) + parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting') + parser.add_argument('--add_noise', action='store_true') + args = parser.parse_args() + + torch.manual_seed(1234) + np.random.seed(1234) + + if not os.path.isdir('checkpoints'): + os.mkdir('checkpoints') + + train(args) \ No newline at end of file diff --git a/vtoonify/model/raft/train_mixed.sh b/vtoonify/model/raft/train_mixed.sh new file mode 100644 index 0000000000000000000000000000000000000000..d9b979f143902a17a0ba7b0a8f960598b7096e0b --- /dev/null +++ b/vtoonify/model/raft/train_mixed.sh @@ -0,0 +1,6 @@ +#!/bin/bash +mkdir -p checkpoints +python -u train.py --name raft-chairs --stage chairs --validation chairs --gpus 0 --num_steps 120000 --batch_size 8 --lr 0.00025 --image_size 368 496 --wdecay 0.0001 --mixed_precision +python -u train.py --name raft-things --stage things --validation sintel --restore_ckpt checkpoints/raft-chairs.pth --gpus 0 --num_steps 120000 --batch_size 5 --lr 0.0001 --image_size 400 720 --wdecay 0.0001 --mixed_precision +python -u train.py --name raft-sintel --stage sintel --validation sintel --restore_ckpt checkpoints/raft-things.pth --gpus 0 --num_steps 120000 --batch_size 5 --lr 0.0001 --image_size 368 768 --wdecay 0.00001 --gamma=0.85 --mixed_precision +python -u train.py --name raft-kitti --stage kitti --validation kitti --restore_ckpt checkpoints/raft-sintel.pth --gpus 0 --num_steps 50000 --batch_size 5 --lr 0.0001 --image_size 288 960 --wdecay 0.00001 --gamma=0.85 --mixed_precision diff --git a/vtoonify/model/raft/train_standard.sh b/vtoonify/model/raft/train_standard.sh new file mode 100644 index 0000000000000000000000000000000000000000..7f559b386b6b596ec14a94f0d8c13974309b7d80 --- /dev/null +++ b/vtoonify/model/raft/train_standard.sh @@ -0,0 +1,6 @@ +#!/bin/bash +mkdir -p checkpoints +python -u train.py --name raft-chairs --stage chairs --validation chairs --gpus 0 1 --num_steps 100000 --batch_size 10 --lr 0.0004 --image_size 368 496 --wdecay 0.0001 +python -u train.py --name raft-things --stage things --validation sintel --restore_ckpt checkpoints/raft-chairs.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 400 720 --wdecay 0.0001 +python -u train.py --name raft-sintel --stage sintel --validation sintel --restore_ckpt checkpoints/raft-things.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 368 768 --wdecay 0.00001 --gamma=0.85 +python -u train.py --name raft-kitti --stage kitti --validation kitti --restore_ckpt checkpoints/raft-sintel.pth --gpus 0 1 --num_steps 50000 --batch_size 6 --lr 0.0001 --image_size 288 960 --wdecay 0.00001 --gamma=0.85 diff --git a/vtoonify/model/simple_augment.py b/vtoonify/model/simple_augment.py new file mode 100644 index 0000000000000000000000000000000000000000..d76a36ddc8072487bd0914f4e98e664d7ac765f8 --- /dev/null +++ b/vtoonify/model/simple_augment.py @@ -0,0 +1,468 @@ +# almost the same as model.stylegan.non_leaking +# we only modify the parameters in sample_affine() to make the transformations mild + +import math + +import torch +from torch import autograd +from torch.nn import functional as F +import numpy as np + +from model.stylegan.distributed import reduce_sum +from model.stylegan.op import upfirdn2d + + +class AdaptiveAugment: + def __init__(self, ada_aug_target, ada_aug_len, update_every, device): + self.ada_aug_target = ada_aug_target + self.ada_aug_len = ada_aug_len + self.update_every = update_every + + self.ada_update = 0 + self.ada_aug_buf = torch.tensor([0.0, 0.0], device=device) + self.r_t_stat = 0 + self.ada_aug_p = 0 + + @torch.no_grad() + def tune(self, real_pred): + self.ada_aug_buf += torch.tensor( + (torch.sign(real_pred).sum().item(), real_pred.shape[0]), + device=real_pred.device, + ) + self.ada_update += 1 + + if self.ada_update % self.update_every == 0: + self.ada_aug_buf = reduce_sum(self.ada_aug_buf) + pred_signs, n_pred = self.ada_aug_buf.tolist() + + self.r_t_stat = pred_signs / n_pred + + if self.r_t_stat > self.ada_aug_target: + sign = 1 + + else: + sign = -1 + + self.ada_aug_p += sign * n_pred / self.ada_aug_len + self.ada_aug_p = min(1, max(0, self.ada_aug_p)) + self.ada_aug_buf.mul_(0) + self.ada_update = 0 + + return self.ada_aug_p + + +SYM6 = ( + 0.015404109327027373, + 0.0034907120842174702, + -0.11799011114819057, + -0.048311742585633, + 0.4910559419267466, + 0.787641141030194, + 0.3379294217276218, + -0.07263752278646252, + -0.021060292512300564, + 0.04472490177066578, + 0.0017677118642428036, + -0.007800708325034148, +) + + +def translate_mat(t_x, t_y, device="cpu"): + batch = t_x.shape[0] + + mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1) + translate = torch.stack((t_x, t_y), 1) + mat[:, :2, 2] = translate + + return mat + + +def rotate_mat(theta, device="cpu"): + batch = theta.shape[0] + + mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1) + sin_t = torch.sin(theta) + cos_t = torch.cos(theta) + rot = torch.stack((cos_t, -sin_t, sin_t, cos_t), 1).view(batch, 2, 2) + mat[:, :2, :2] = rot + + return mat + + +def scale_mat(s_x, s_y, device="cpu"): + batch = s_x.shape[0] + + mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1) + mat[:, 0, 0] = s_x + mat[:, 1, 1] = s_y + + return mat + + +def translate3d_mat(t_x, t_y, t_z): + batch = t_x.shape[0] + + mat = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) + translate = torch.stack((t_x, t_y, t_z), 1) + mat[:, :3, 3] = translate + + return mat + + +def rotate3d_mat(axis, theta): + batch = theta.shape[0] + + u_x, u_y, u_z = axis + + eye = torch.eye(3).unsqueeze(0) + cross = torch.tensor([(0, -u_z, u_y), (u_z, 0, -u_x), (-u_y, u_x, 0)]).unsqueeze(0) + outer = torch.tensor(axis) + outer = (outer.unsqueeze(1) * outer).unsqueeze(0) + + sin_t = torch.sin(theta).view(-1, 1, 1) + cos_t = torch.cos(theta).view(-1, 1, 1) + + rot = cos_t * eye + sin_t * cross + (1 - cos_t) * outer + + eye_4 = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) + eye_4[:, :3, :3] = rot + + return eye_4 + + +def scale3d_mat(s_x, s_y, s_z): + batch = s_x.shape[0] + + mat = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) + mat[:, 0, 0] = s_x + mat[:, 1, 1] = s_y + mat[:, 2, 2] = s_z + + return mat + + +def luma_flip_mat(axis, i): + batch = i.shape[0] + + eye = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) + axis = torch.tensor(axis + (0,)) + flip = 2 * torch.ger(axis, axis) * i.view(-1, 1, 1) + + return eye - flip + + +def saturation_mat(axis, i): + batch = i.shape[0] + + eye = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) + axis = torch.tensor(axis + (0,)) + axis = torch.ger(axis, axis) + saturate = axis + (eye - axis) * i.view(-1, 1, 1) + + return saturate + + +def lognormal_sample(size, mean=0, std=1, device="cpu"): + return torch.empty(size, device=device).log_normal_(mean=mean, std=std) + + +def category_sample(size, categories, device="cpu"): + category = torch.tensor(categories, device=device) + sample = torch.randint(high=len(categories), size=(size,), device=device) + + return category[sample] + + +def uniform_sample(size, low, high, device="cpu"): + return torch.empty(size, device=device).uniform_(low, high) + + +def normal_sample(size, mean=0, std=1, device="cpu"): + return torch.empty(size, device=device).normal_(mean, std) + + +def bernoulli_sample(size, p, device="cpu"): + return torch.empty(size, device=device).bernoulli_(p) + + +def random_mat_apply(p, transform, prev, eye, device="cpu"): + size = transform.shape[0] + select = bernoulli_sample(size, p, device=device).view(size, 1, 1) + select_transform = select * transform + (1 - select) * eye + + return select_transform @ prev + + +def sample_affine(p, size, height, width, device="cpu"): + G = torch.eye(3, device=device).unsqueeze(0).repeat(size, 1, 1) + eye = G + + # flip + param = category_sample(size, (0, 1)) + Gc = scale_mat(1 - 2.0 * param, torch.ones(size), device=device) + G = random_mat_apply(p, Gc, G, eye, device=device) + # print('flip', G, scale_mat(1 - 2.0 * param, torch.ones(size)), sep='\n') + + # 90 rotate + #param = category_sample(size, (0, 3)) + #Gc = rotate_mat(-math.pi / 2 * param, device=device) + #G = random_mat_apply(p, Gc, G, eye, device=device) + # print('90 rotate', G, rotate_mat(-math.pi / 2 * param), sep='\n') + + # integer translate + param = uniform_sample(size, -0.125, 0.125) + param_height = torch.round(param * height) / height + param_width = torch.round(param * width) / width + Gc = translate_mat(param_width, param_height, device=device) + G = random_mat_apply(p, Gc, G, eye, device=device) + # print('integer translate', G, translate_mat(param_width, param_height), sep='\n') + + # isotropic scale + param = lognormal_sample(size, std=0.1 * math.log(2)) + Gc = scale_mat(param, param, device=device) + G = random_mat_apply(p, Gc, G, eye, device=device) + # print('isotropic scale', G, scale_mat(param, param), sep='\n') + + p_rot = 1 - math.sqrt(1 - p) + + # pre-rotate + param = uniform_sample(size, -math.pi * 0.25, math.pi * 0.25) + Gc = rotate_mat(-param, device=device) + G = random_mat_apply(p_rot, Gc, G, eye, device=device) + # print('pre-rotate', G, rotate_mat(-param), sep='\n') + + # anisotropic scale + param = lognormal_sample(size, std=0.1 * math.log(2)) + Gc = scale_mat(param, 1 / param, device=device) + G = random_mat_apply(p, Gc, G, eye, device=device) + # print('anisotropic scale', G, scale_mat(param, 1 / param), sep='\n') + + # post-rotate + param = uniform_sample(size, -math.pi * 0.25, math.pi * 0.25) + Gc = rotate_mat(-param, device=device) + G = random_mat_apply(p_rot, Gc, G, eye, device=device) + # print('post-rotate', G, rotate_mat(-param), sep='\n') + + # fractional translate + param = normal_sample(size, std=0.125) + Gc = translate_mat(param, param, device=device) + G = random_mat_apply(p, Gc, G, eye, device=device) + # print('fractional translate', G, translate_mat(param, param), sep='\n') + + return G + + +def sample_color(p, size): + C = torch.eye(4).unsqueeze(0).repeat(size, 1, 1) + eye = C + axis_val = 1 / math.sqrt(3) + axis = (axis_val, axis_val, axis_val) + + # brightness + param = normal_sample(size, std=0.2) + Cc = translate3d_mat(param, param, param) + C = random_mat_apply(p, Cc, C, eye) + + # contrast + param = lognormal_sample(size, std=0.5 * math.log(2)) + Cc = scale3d_mat(param, param, param) + C = random_mat_apply(p, Cc, C, eye) + + # luma flip + param = category_sample(size, (0, 1)) + Cc = luma_flip_mat(axis, param) + C = random_mat_apply(p, Cc, C, eye) + + # hue rotation + param = uniform_sample(size, -math.pi, math.pi) + Cc = rotate3d_mat(axis, param) + C = random_mat_apply(p, Cc, C, eye) + + # saturation + param = lognormal_sample(size, std=1 * math.log(2)) + Cc = saturation_mat(axis, param) + C = random_mat_apply(p, Cc, C, eye) + + return C + + +def make_grid(shape, x0, x1, y0, y1, device): + n, c, h, w = shape + grid = torch.empty(n, h, w, 3, device=device) + grid[:, :, :, 0] = torch.linspace(x0, x1, w, device=device) + grid[:, :, :, 1] = torch.linspace(y0, y1, h, device=device).unsqueeze(-1) + grid[:, :, :, 2] = 1 + + return grid + + +def affine_grid(grid, mat): + n, h, w, _ = grid.shape + return (grid.view(n, h * w, 3) @ mat.transpose(1, 2)).view(n, h, w, 2) + + +def get_padding(G, height, width, kernel_size): + device = G.device + + cx = (width - 1) / 2 + cy = (height - 1) / 2 + cp = torch.tensor( + [(-cx, -cy, 1), (cx, -cy, 1), (cx, cy, 1), (-cx, cy, 1)], device=device + ) + cp = G @ cp.T + + pad_k = kernel_size // 4 + + pad = cp[:, :2, :].permute(1, 0, 2).flatten(1) + pad = torch.cat((-pad, pad)).max(1).values + pad = pad + torch.tensor([pad_k * 2 - cx, pad_k * 2 - cy] * 2, device=device) + pad = pad.max(torch.tensor([0, 0] * 2, device=device)) + pad = pad.min(torch.tensor([width - 1, height - 1] * 2, device=device)) + + pad_x1, pad_y1, pad_x2, pad_y2 = pad.ceil().to(torch.int32) + + return pad_x1, pad_x2, pad_y1, pad_y2 + + +def try_sample_affine_and_pad(img, p, kernel_size, G=None): + batch, _, height, width = img.shape + + G_try = G + + if G is None: + G_try = torch.inverse(sample_affine(p, batch, height, width)) + + pad_x1, pad_x2, pad_y1, pad_y2 = get_padding(G_try, height, width, kernel_size) + + img_pad = F.pad(img, (pad_x1, pad_x2, pad_y1, pad_y2), mode="reflect") + + return img_pad, G_try, (pad_x1, pad_x2, pad_y1, pad_y2) + + +class GridSampleForward(autograd.Function): + @staticmethod + def forward(ctx, input, grid): + out = F.grid_sample( + input, grid, mode="bilinear", padding_mode="zeros", align_corners=False + ) + ctx.save_for_backward(input, grid) + + return out + + @staticmethod + def backward(ctx, grad_output): + input, grid = ctx.saved_tensors + grad_input, grad_grid = GridSampleBackward.apply(grad_output, input, grid) + + return grad_input, grad_grid + + +class GridSampleBackward(autograd.Function): + @staticmethod + def forward(ctx, grad_output, input, grid): + op = torch._C._jit_get_operation("aten::grid_sampler_2d_backward") + grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) + ctx.save_for_backward(grid) + + return grad_input, grad_grid + + @staticmethod + def backward(ctx, grad_grad_input, grad_grad_grid): + grid, = ctx.saved_tensors + grad_grad_output = None + + if ctx.needs_input_grad[0]: + grad_grad_output = GridSampleForward.apply(grad_grad_input, grid) + + return grad_grad_output, None, None + + +grid_sample = GridSampleForward.apply + + +def scale_mat_single(s_x, s_y): + return torch.tensor(((s_x, 0, 0), (0, s_y, 0), (0, 0, 1)), dtype=torch.float32) + + +def translate_mat_single(t_x, t_y): + return torch.tensor(((1, 0, t_x), (0, 1, t_y), (0, 0, 1)), dtype=torch.float32) + + +def random_apply_affine(img, p, G=None, antialiasing_kernel=SYM6): + kernel = antialiasing_kernel + len_k = len(kernel) + + kernel = torch.as_tensor(kernel).to(img) + # kernel = torch.ger(kernel, kernel).to(img) + kernel_flip = torch.flip(kernel, (0,)) + + img_pad, G, (pad_x1, pad_x2, pad_y1, pad_y2) = try_sample_affine_and_pad( + img, p, len_k, G + ) + + G_inv = ( + translate_mat_single((pad_x1 - pad_x2).item() / 2, (pad_y1 - pad_y2).item() / 2) + @ G + ) + up_pad = ( + (len_k + 2 - 1) // 2, + (len_k - 2) // 2, + (len_k + 2 - 1) // 2, + (len_k - 2) // 2, + ) + img_2x = upfirdn2d(img_pad, kernel.unsqueeze(0), up=(2, 1), pad=(*up_pad[:2], 0, 0)) + img_2x = upfirdn2d(img_2x, kernel.unsqueeze(1), up=(1, 2), pad=(0, 0, *up_pad[2:])) + G_inv = scale_mat_single(2, 2) @ G_inv @ scale_mat_single(1 / 2, 1 / 2) + G_inv = translate_mat_single(-0.5, -0.5) @ G_inv @ translate_mat_single(0.5, 0.5) + batch_size, channel, height, width = img.shape + pad_k = len_k // 4 + shape = (batch_size, channel, (height + pad_k * 2) * 2, (width + pad_k * 2) * 2) + G_inv = ( + scale_mat_single(2 / img_2x.shape[3], 2 / img_2x.shape[2]) + @ G_inv + @ scale_mat_single(1 / (2 / shape[3]), 1 / (2 / shape[2])) + ) + grid = F.affine_grid(G_inv[:, :2, :].to(img_2x), shape, align_corners=False) + img_affine = grid_sample(img_2x, grid) + d_p = -pad_k * 2 + down_pad = ( + d_p + (len_k - 2 + 1) // 2, + d_p + (len_k - 2) // 2, + d_p + (len_k - 2 + 1) // 2, + d_p + (len_k - 2) // 2, + ) + img_down = upfirdn2d( + img_affine, kernel_flip.unsqueeze(0), down=(2, 1), pad=(*down_pad[:2], 0, 0) + ) + img_down = upfirdn2d( + img_down, kernel_flip.unsqueeze(1), down=(1, 2), pad=(0, 0, *down_pad[2:]) + ) + + return img_down, G + + +def apply_color(img, mat): + batch = img.shape[0] + img = img.permute(0, 2, 3, 1) + mat_mul = mat[:, :3, :3].transpose(1, 2).view(batch, 1, 3, 3) + mat_add = mat[:, :3, 3].view(batch, 1, 1, 3) + img = img @ mat_mul + mat_add + img = img.permute(0, 3, 1, 2) + + return img + + +def random_apply_color(img, p, C=None): + if C is None: + C = sample_color(p, img.shape[0]) + + img = apply_color(img, C.to(img)) + + return img, C + + +def augment(img, p, transform_matrix=(None, None)): + img, G = random_apply_affine(img, p, transform_matrix[0]) + img, C = random_apply_color(img, p, transform_matrix[1]) + + return img, (G, C) diff --git a/vtoonify/model/stylegan/__init__.py b/vtoonify/model/stylegan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vtoonify/model/stylegan/dataset.py b/vtoonify/model/stylegan/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..7713ea2f8bc94d202d2dfbe830af3cb96b1e803d --- /dev/null +++ b/vtoonify/model/stylegan/dataset.py @@ -0,0 +1,40 @@ +from io import BytesIO + +import lmdb +from PIL import Image +from torch.utils.data import Dataset + + +class MultiResolutionDataset(Dataset): + def __init__(self, path, transform, resolution=256): + self.env = lmdb.open( + path, + max_readers=32, + readonly=True, + lock=False, + readahead=False, + meminit=False, + ) + + if not self.env: + raise IOError('Cannot open lmdb dataset', path) + + with self.env.begin(write=False) as txn: + self.length = int(txn.get('length'.encode('utf-8')).decode('utf-8')) + + self.resolution = resolution + self.transform = transform + + def __len__(self): + return self.length + + def __getitem__(self, index): + with self.env.begin(write=False) as txn: + key = f'{self.resolution}-{str(index).zfill(5)}'.encode('utf-8') + img_bytes = txn.get(key) + + buffer = BytesIO(img_bytes) + img = Image.open(buffer) + img = self.transform(img) + + return img diff --git a/vtoonify/model/stylegan/distributed.py b/vtoonify/model/stylegan/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..51fa243257ef302e2015d5ff36ac531b86a9a0ce --- /dev/null +++ b/vtoonify/model/stylegan/distributed.py @@ -0,0 +1,126 @@ +import math +import pickle + +import torch +from torch import distributed as dist +from torch.utils.data.sampler import Sampler + + +def get_rank(): + if not dist.is_available(): + return 0 + + if not dist.is_initialized(): + return 0 + + return dist.get_rank() + + +def synchronize(): + if not dist.is_available(): + return + + if not dist.is_initialized(): + return + + world_size = dist.get_world_size() + + if world_size == 1: + return + + dist.barrier() + + +def get_world_size(): + if not dist.is_available(): + return 1 + + if not dist.is_initialized(): + return 1 + + return dist.get_world_size() + + +def reduce_sum(tensor): + if not dist.is_available(): + return tensor + + if not dist.is_initialized(): + return tensor + + tensor = tensor.clone() + dist.all_reduce(tensor, op=dist.ReduceOp.SUM) + + return tensor + + +def gather_grad(params): + world_size = get_world_size() + + if world_size == 1: + return + + for param in params: + if param.grad is not None: + dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM) + param.grad.data.div_(world_size) + + +def all_gather(data): + world_size = get_world_size() + + if world_size == 1: + return [data] + + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to('cuda') + + local_size = torch.IntTensor([tensor.numel()]).to('cuda') + size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda')) + + if local_size != max_size: + padding = torch.ByteTensor(size=(max_size - local_size,)).to('cuda') + tensor = torch.cat((tensor, padding), 0) + + dist.all_gather(tensor_list, tensor) + + data_list = [] + + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_loss_dict(loss_dict): + world_size = get_world_size() + + if world_size < 2: + return loss_dict + + with torch.no_grad(): + keys = [] + losses = [] + + for k in sorted(loss_dict.keys()): + keys.append(k) + losses.append(loss_dict[k]) + + losses = torch.stack(losses, 0) + dist.reduce(losses, dst=0) + + if dist.get_rank() == 0: + losses /= world_size + + reduced_losses = {k: v for k, v in zip(keys, losses)} + + return reduced_losses diff --git a/vtoonify/model/stylegan/lpips/__init__.py b/vtoonify/model/stylegan/lpips/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b3c9cdc35a03a4e4585bd6bbc9c793331eb1723 --- /dev/null +++ b/vtoonify/model/stylegan/lpips/__init__.py @@ -0,0 +1,161 @@ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +#from skimage.measure import compare_ssim +from skimage.metrics import structural_similarity as compare_ssim +import torch +from torch.autograd import Variable + +from model.stylegan.lpips import dist_model + +class PerceptualLoss(torch.nn.Module): + def __init__(self, model='net-lin', net='alex', colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0]): # VGG using our perceptually-learned weights (LPIPS metric) + # def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss + super(PerceptualLoss, self).__init__() + print('Setting up Perceptual loss...') + self.use_gpu = use_gpu + self.spatial = spatial + self.gpu_ids = gpu_ids + self.model = dist_model.DistModel() + self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace, spatial=self.spatial, gpu_ids=gpu_ids) + print('...[%s] initialized'%self.model.name()) + print('...Done') + + def forward(self, pred, target, normalize=False): + """ + Pred and target are Variables. + If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1] + If normalize is False, assumes the images are already between [-1,+1] + + Inputs pred and target are Nx3xHxW + Output pytorch Variable N long + """ + + if normalize: + target = 2 * target - 1 + pred = 2 * pred - 1 + + return self.model.forward(target, pred) + +def normalize_tensor(in_feat,eps=1e-10): + norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True)) + return in_feat/(norm_factor+eps) + +def l2(p0, p1, range=255.): + return .5*np.mean((p0 / range - p1 / range)**2) + +def psnr(p0, p1, peak=255.): + return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2)) + +def dssim(p0, p1, range=255.): + return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2. + +def rgb2lab(in_img,mean_cent=False): + from skimage import color + img_lab = color.rgb2lab(in_img) + if(mean_cent): + img_lab[:,:,0] = img_lab[:,:,0]-50 + return img_lab + +def tensor2np(tensor_obj): + # change dimension of a tensor object into a numpy array + return tensor_obj[0].cpu().float().numpy().transpose((1,2,0)) + +def np2tensor(np_obj): + # change dimenion of np array into tensor array + return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1))) + +def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False): + # image tensor to lab tensor + from skimage import color + + img = tensor2im(image_tensor) + img_lab = color.rgb2lab(img) + if(mc_only): + img_lab[:,:,0] = img_lab[:,:,0]-50 + if(to_norm and not mc_only): + img_lab[:,:,0] = img_lab[:,:,0]-50 + img_lab = img_lab/100. + + return np2tensor(img_lab) + +def tensorlab2tensor(lab_tensor,return_inbnd=False): + from skimage import color + import warnings + warnings.filterwarnings("ignore") + + lab = tensor2np(lab_tensor)*100. + lab[:,:,0] = lab[:,:,0]+50 + + rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1) + if(return_inbnd): + # convert back to lab, see if we match + lab_back = color.rgb2lab(rgb_back.astype('uint8')) + mask = 1.*np.isclose(lab_back,lab,atol=2.) + mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis]) + return (im2tensor(rgb_back),mask) + else: + return im2tensor(rgb_back) + +def rgb2lab(input): + from skimage import color + return color.rgb2lab(input / 255.) + +def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.): + image_numpy = image_tensor[0].cpu().float().numpy() + image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor + return image_numpy.astype(imtype) + +def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.): + return torch.Tensor((image / factor - cent) + [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) + +def tensor2vec(vector_tensor): + return vector_tensor.data.cpu().numpy()[:, :, 0, 0] + +def voc_ap(rec, prec, use_07_metric=False): + """ ap = voc_ap(rec, prec, [use_07_metric]) + Compute VOC AP given precision and recall. + If use_07_metric is true, uses the + VOC 07 11 point method (default:False). + """ + if use_07_metric: + # 11 point metric + ap = 0. + for t in np.arange(0., 1.1, 0.1): + if np.sum(rec >= t) == 0: + p = 0 + else: + p = np.max(prec[rec >= t]) + ap = ap + p / 11. + else: + # correct AP calculation + # first append sentinel values at the end + mrec = np.concatenate(([0.], rec, [1.])) + mpre = np.concatenate(([0.], prec, [0.])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + +def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.): +# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.): + image_numpy = image_tensor[0].cpu().float().numpy() + image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor + return image_numpy.astype(imtype) + +def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.): +# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.): + return torch.Tensor((image / factor - cent) + [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) diff --git a/vtoonify/model/stylegan/lpips/base_model.py b/vtoonify/model/stylegan/lpips/base_model.py new file mode 100644 index 0000000000000000000000000000000000000000..8de1d16f0c7fa52d8067139abc6e769e96d0a6a1 --- /dev/null +++ b/vtoonify/model/stylegan/lpips/base_model.py @@ -0,0 +1,58 @@ +import os +import numpy as np +import torch +from torch.autograd import Variable +from pdb import set_trace as st +from IPython import embed + +class BaseModel(): + def __init__(self): + pass; + + def name(self): + return 'BaseModel' + + def initialize(self, use_gpu=True, gpu_ids=[0]): + self.use_gpu = use_gpu + self.gpu_ids = gpu_ids + + def forward(self): + pass + + def get_image_paths(self): + pass + + def optimize_parameters(self): + pass + + def get_current_visuals(self): + return self.input + + def get_current_errors(self): + return {} + + def save(self, label): + pass + + # helper saving function that can be used by subclasses + def save_network(self, network, path, network_label, epoch_label): + save_filename = '%s_net_%s.pth' % (epoch_label, network_label) + save_path = os.path.join(path, save_filename) + torch.save(network.state_dict(), save_path) + + # helper loading function that can be used by subclasses + def load_network(self, network, network_label, epoch_label): + save_filename = '%s_net_%s.pth' % (epoch_label, network_label) + save_path = os.path.join(self.save_dir, save_filename) + print('Loading network from %s'%save_path) + network.load_state_dict(torch.load(save_path)) + + def update_learning_rate(): + pass + + def get_image_paths(self): + return self.image_paths + + def save_done(self, flag=False): + np.save(os.path.join(self.save_dir, 'done_flag'),flag) + np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i') diff --git a/vtoonify/model/stylegan/lpips/dist_model.py b/vtoonify/model/stylegan/lpips/dist_model.py new file mode 100644 index 0000000000000000000000000000000000000000..d8a14a61ca36f2562e16feb66c9625dd2f5e0469 --- /dev/null +++ b/vtoonify/model/stylegan/lpips/dist_model.py @@ -0,0 +1,284 @@ + +from __future__ import absolute_import + +import sys +import numpy as np +import torch +from torch import nn +import os +from collections import OrderedDict +from torch.autograd import Variable +import itertools +from model.stylegan.lpips.base_model import BaseModel +from scipy.ndimage import zoom +import fractions +import functools +import skimage.transform +from tqdm import tqdm + +from IPython import embed + +from model.stylegan.lpips import networks_basic as networks +import model.stylegan.lpips as util + +class DistModel(BaseModel): + def name(self): + return self.model_name + + def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None, + use_gpu=True, printNet=False, spatial=False, + is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]): + ''' + INPUTS + model - ['net-lin'] for linearly calibrated network + ['net'] for off-the-shelf network + ['L2'] for L2 distance in Lab colorspace + ['SSIM'] for ssim in RGB colorspace + net - ['squeeze','alex','vgg'] + model_path - if None, will look in weights/[NET_NAME].pth + colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM + use_gpu - bool - whether or not to use a GPU + printNet - bool - whether or not to print network architecture out + spatial - bool - whether to output an array containing varying distances across spatial dimensions + spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below). + spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images. + spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear). + is_train - bool - [True] for training mode + lr - float - initial learning rate + beta1 - float - initial momentum term for adam + version - 0.1 for latest, 0.0 was original (with a bug) + gpu_ids - int array - [0] by default, gpus to use + ''' + BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids) + + self.model = model + self.net = net + self.is_train = is_train + self.spatial = spatial + self.gpu_ids = gpu_ids + self.model_name = '%s [%s]'%(model,net) + + if(self.model == 'net-lin'): # pretrained net + linear layer + self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net, + use_dropout=True, spatial=spatial, version=version, lpips=True) + kw = {} + if not use_gpu: + kw['map_location'] = 'cpu' + if(model_path is None): + import inspect + model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'weights/v%s/%s.pth'%(version,net))) + + if(not is_train): + print('Loading model from: %s'%model_path) + self.net.load_state_dict(torch.load(model_path, **kw), strict=False) + + elif(self.model=='net'): # pretrained network + self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False) + elif(self.model in ['L2','l2']): + self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing + self.model_name = 'L2' + elif(self.model in ['DSSIM','dssim','SSIM','ssim']): + self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace) + self.model_name = 'SSIM' + else: + raise ValueError("Model [%s] not recognized." % self.model) + + self.parameters = list(self.net.parameters()) + + if self.is_train: # training mode + # extra network on top to go from distances (d0,d1) => predicted human judgment (h*) + self.rankLoss = networks.BCERankingLoss() + self.parameters += list(self.rankLoss.net.parameters()) + self.lr = lr + self.old_lr = lr + self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999)) + else: # test mode + self.net.eval() + + if(use_gpu): + self.net.to(gpu_ids[0]) + self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids) + if(self.is_train): + self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0 + + if(printNet): + print('---------- Networks initialized -------------') + networks.print_network(self.net) + print('-----------------------------------------------') + + def forward(self, in0, in1, retPerLayer=False): + ''' Function computes the distance between image patches in0 and in1 + INPUTS + in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1] + OUTPUT + computed distances between in0 and in1 + ''' + + return self.net.forward(in0, in1, retPerLayer=retPerLayer) + + # ***** TRAINING FUNCTIONS ***** + def optimize_parameters(self): + self.forward_train() + self.optimizer_net.zero_grad() + self.backward_train() + self.optimizer_net.step() + self.clamp_weights() + + def clamp_weights(self): + for module in self.net.modules(): + if(hasattr(module, 'weight') and module.kernel_size==(1,1)): + module.weight.data = torch.clamp(module.weight.data,min=0) + + def set_input(self, data): + self.input_ref = data['ref'] + self.input_p0 = data['p0'] + self.input_p1 = data['p1'] + self.input_judge = data['judge'] + + if(self.use_gpu): + self.input_ref = self.input_ref.to(device=self.gpu_ids[0]) + self.input_p0 = self.input_p0.to(device=self.gpu_ids[0]) + self.input_p1 = self.input_p1.to(device=self.gpu_ids[0]) + self.input_judge = self.input_judge.to(device=self.gpu_ids[0]) + + self.var_ref = Variable(self.input_ref,requires_grad=True) + self.var_p0 = Variable(self.input_p0,requires_grad=True) + self.var_p1 = Variable(self.input_p1,requires_grad=True) + + def forward_train(self): # run forward pass + # print(self.net.module.scaling_layer.shift) + # print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item()) + + self.d0 = self.forward(self.var_ref, self.var_p0) + self.d1 = self.forward(self.var_ref, self.var_p1) + self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge) + + self.var_judge = Variable(1.*self.input_judge).view(self.d0.size()) + + self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.) + + return self.loss_total + + def backward_train(self): + torch.mean(self.loss_total).backward() + + def compute_accuracy(self,d0,d1,judge): + ''' d0, d1 are Variables, judge is a Tensor ''' + d1_lt_d0 = (d1 %f' % (type,self.old_lr, lr)) + self.old_lr = lr + +def score_2afc_dataset(data_loader, func, name=''): + ''' Function computes Two Alternative Forced Choice (2AFC) score using + distance function 'func' in dataset 'data_loader' + INPUTS + data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside + func - callable distance function - calling d=func(in0,in1) should take 2 + pytorch tensors with shape Nx3xXxY, and return numpy array of length N + OUTPUTS + [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators + [1] - dictionary with following elements + d0s,d1s - N arrays containing distances between reference patch to perturbed patches + gts - N array in [0,1], preferred patch selected by human evaluators + (closer to "0" for left patch p0, "1" for right patch p1, + "0.6" means 60pct people preferred right patch, 40pct preferred left) + scores - N array in [0,1], corresponding to what percentage function agreed with humans + CONSTS + N - number of test triplets in data_loader + ''' + + d0s = [] + d1s = [] + gts = [] + + for data in tqdm(data_loader.load_data(), desc=name): + d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist() + d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist() + gts+=data['judge'].cpu().numpy().flatten().tolist() + + d0s = np.array(d0s) + d1s = np.array(d1s) + gts = np.array(gts) + scores = (d0s 1: + kernel = kernel * (upsample_factor ** 2) + + self.register_buffer("kernel", kernel) + + self.pad = pad + + def forward(self, input): + out = upfirdn2d(input, self.kernel, pad=self.pad) + + return out + + +class EqualConv2d(nn.Module): + def __init__( + self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, dilation=1 ## modified + ): + super().__init__() + + self.weight = nn.Parameter( + torch.randn(out_channel, in_channel, kernel_size, kernel_size) + ) + self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) + + self.stride = stride + self.padding = padding + self.dilation = dilation ## modified + + if bias: + self.bias = nn.Parameter(torch.zeros(out_channel)) + + else: + self.bias = None + + def forward(self, input): + out = conv2d_gradfix.conv2d( + input, + self.weight * self.scale, + bias=self.bias, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, ## modified + ) + + return out + + def __repr__(self): + return ( + f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}," + f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding}, dilation={self.dilation})" ## modified + ) + + +class EqualLinear(nn.Module): + def __init__( + self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None + ): + super().__init__() + + self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) + + if bias: + self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) + + else: + self.bias = None + + self.activation = activation + + self.scale = (1 / math.sqrt(in_dim)) * lr_mul + self.lr_mul = lr_mul + + def forward(self, input): + if self.activation: + out = F.linear(input, self.weight * self.scale) + out = fused_leaky_relu(out, self.bias * self.lr_mul) + + else: + out = F.linear( + input, self.weight * self.scale, bias=self.bias * self.lr_mul + ) + + return out + + def __repr__(self): + return ( + f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})" + ) + + +class ModulatedConv2d(nn.Module): + def __init__( + self, + in_channel, + out_channel, + kernel_size, + style_dim, + demodulate=True, + upsample=False, + downsample=False, + blur_kernel=[1, 3, 3, 1], + fused=True, + ): + super().__init__() + + self.eps = 1e-8 + self.kernel_size = kernel_size + self.in_channel = in_channel + self.out_channel = out_channel + self.upsample = upsample + self.downsample = downsample + + if upsample: + factor = 2 + p = (len(blur_kernel) - factor) - (kernel_size - 1) + pad0 = (p + 1) // 2 + factor - 1 + pad1 = p // 2 + 1 + + self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor) + + if downsample: + factor = 2 + p = (len(blur_kernel) - factor) + (kernel_size - 1) + pad0 = (p + 1) // 2 + pad1 = p // 2 + + self.blur = Blur(blur_kernel, pad=(pad0, pad1)) + + fan_in = in_channel * kernel_size ** 2 + self.scale = 1 / math.sqrt(fan_in) + self.padding = kernel_size // 2 + + self.weight = nn.Parameter( + torch.randn(1, out_channel, in_channel, kernel_size, kernel_size) + ) + + self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) + + self.demodulate = demodulate + self.fused = fused + + def __repr__(self): + return ( + f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, " + f"upsample={self.upsample}, downsample={self.downsample})" + ) + + def forward(self, input, style, externalweight=None): + batch, in_channel, height, width = input.shape + + if not self.fused: + weight = self.scale * self.weight.squeeze(0) + style = self.modulation(style) + + if self.demodulate: + w = weight.unsqueeze(0) * style.view(batch, 1, in_channel, 1, 1) + dcoefs = (w.square().sum((2, 3, 4)) + 1e-8).rsqrt() + + input = input * style.reshape(batch, in_channel, 1, 1) + + if self.upsample: + weight = weight.transpose(0, 1) + out = conv2d_gradfix.conv_transpose2d( + input, weight, padding=0, stride=2 + ) + out = self.blur(out) + + elif self.downsample: + input = self.blur(input) + out = conv2d_gradfix.conv2d(input, weight, padding=0, stride=2) + + else: + out = conv2d_gradfix.conv2d(input, weight, padding=self.padding) + + if self.demodulate: + out = out * dcoefs.view(batch, -1, 1, 1) + + return out + + style = self.modulation(style).view(batch, 1, in_channel, 1, 1) + if externalweight is None: + weight = self.scale * self.weight * style + else: + weight = self.scale * (self.weight + externalweight) * style + + if self.demodulate: + demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) + weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) + + weight = weight.view( + batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size + ) + + if self.upsample: + input = input.view(1, batch * in_channel, height, width) + weight = weight.view( + batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size + ) + weight = weight.transpose(1, 2).reshape( + batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size + ) + out = conv2d_gradfix.conv_transpose2d( + input, weight, padding=0, stride=2, groups=batch + ) + _, _, height, width = out.shape + out = out.view(batch, self.out_channel, height, width) + out = self.blur(out) + + elif self.downsample: + input = self.blur(input) + _, _, height, width = input.shape + input = input.view(1, batch * in_channel, height, width) + out = conv2d_gradfix.conv2d( + input, weight, padding=0, stride=2, groups=batch + ) + _, _, height, width = out.shape + out = out.view(batch, self.out_channel, height, width) + + else: + input = input.view(1, batch * in_channel, height, width) + out = conv2d_gradfix.conv2d( + input, weight, padding=self.padding, groups=batch + ) + _, _, height, width = out.shape + out = out.view(batch, self.out_channel, height, width) + + return out + + +class NoiseInjection(nn.Module): + def __init__(self): + super().__init__() + + self.weight = nn.Parameter(torch.zeros(1)) + + def forward(self, image, noise=None): + if noise is None: + batch, _, height, width = image.shape + noise = image.new_empty(batch, 1, height, width).normal_() + + return image + self.weight * noise + + +class ConstantInput(nn.Module): + def __init__(self, channel, size=4): + super().__init__() + + self.input = nn.Parameter(torch.randn(1, channel, size, size)) + + def forward(self, input): + batch = input.shape[0] + out = self.input.repeat(batch, 1, 1, 1) + + return out + + +class StyledConv(nn.Module): + def __init__( + self, + in_channel, + out_channel, + kernel_size, + style_dim, + upsample=False, + blur_kernel=[1, 3, 3, 1], + demodulate=True, + ): + super().__init__() + + self.conv = ModulatedConv2d( + in_channel, + out_channel, + kernel_size, + style_dim, + upsample=upsample, + blur_kernel=blur_kernel, + demodulate=demodulate, + ) + + self.noise = NoiseInjection() + # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) + # self.activate = ScaledLeakyReLU(0.2) + self.activate = FusedLeakyReLU(out_channel) + + def forward(self, input, style, noise=None, externalweight=None): + out = self.conv(input, style, externalweight) + out = self.noise(out, noise=noise) + # out = out + self.bias + out = self.activate(out) + + return out + + +class ToRGB(nn.Module): + def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): + super().__init__() + + if upsample: + self.upsample = Upsample(blur_kernel) + + self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False) + self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) + + def forward(self, input, style, skip=None, externalweight=None): + out = self.conv(input, style, externalweight) + out = out + self.bias + + if skip is not None: + skip = self.upsample(skip) + + out = out + skip + + return out + + +class Generator(nn.Module): + def __init__( + self, + size, + style_dim, + n_mlp, + channel_multiplier=2, + blur_kernel=[1, 3, 3, 1], + lr_mlp=0.01, + ): + super().__init__() + + self.size = size + + self.style_dim = style_dim + + layers = [PixelNorm()] + + for i in range(n_mlp): + layers.append( + EqualLinear( + style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu" + ) + ) + + self.style = nn.Sequential(*layers) + + self.channels = { + 4: 512, + 8: 512, + 16: 512, + 32: 512, + 64: 256 * channel_multiplier, + 128: 128 * channel_multiplier, + 256: 64 * channel_multiplier, + 512: 32 * channel_multiplier, + 1024: 16 * channel_multiplier, + } + + self.input = ConstantInput(self.channels[4]) + self.conv1 = StyledConv( + self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel + ) + self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False) + + self.log_size = int(math.log(size, 2)) + self.num_layers = (self.log_size - 2) * 2 + 1 + + self.convs = nn.ModuleList() + self.upsamples = nn.ModuleList() + self.to_rgbs = nn.ModuleList() + self.noises = nn.Module() + + in_channel = self.channels[4] + + for layer_idx in range(self.num_layers): + res = (layer_idx + 5) // 2 + shape = [1, 1, 2 ** res, 2 ** res] + self.noises.register_buffer(f"noise_{layer_idx}", torch.randn(*shape)) + + for i in range(3, self.log_size + 1): + out_channel = self.channels[2 ** i] + + self.convs.append( + StyledConv( + in_channel, + out_channel, + 3, + style_dim, + upsample=True, + blur_kernel=blur_kernel, + ) + ) + + self.convs.append( + StyledConv( + out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel + ) + ) + + self.to_rgbs.append(ToRGB(out_channel, style_dim)) + + in_channel = out_channel + + self.n_latent = self.log_size * 2 - 2 + + def make_noise(self): + device = self.input.input.device + + noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)] + + for i in range(3, self.log_size + 1): + for _ in range(2): + noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device)) + + return noises + + def mean_latent(self, n_latent): + latent_in = torch.randn( + n_latent, self.style_dim, device=self.input.input.device + ) + latent = self.style(latent_in).mean(0, keepdim=True) + + return latent + + def get_latent(self, input): + return self.style(input) + + def forward( + self, + styles, + return_latents=False, + inject_index=None, + truncation=1, + truncation_latent=None, + input_is_latent=False, + noise=None, + randomize_noise=True, + z_plus_latent=False, + return_feature_ind=999, + ): + if not input_is_latent: + if not z_plus_latent: + styles = [self.style(s) for s in styles] + else: + styles_ = [] + for s in styles: + style_ = [] + for i in range(s.shape[1]): + style_.append(self.style(s[:,i]).unsqueeze(1)) + styles_.append(torch.cat(style_,dim=1)) + styles = styles_ + + if noise is None: + if randomize_noise: + noise = [None] * self.num_layers + else: + noise = [ + getattr(self.noises, f"noise_{i}") for i in range(self.num_layers) + ] + + if truncation < 1: + style_t = [] + + for style in styles: + style_t.append( + truncation_latent + truncation * (style - truncation_latent) + ) + + styles = style_t + + if len(styles) < 2: + inject_index = self.n_latent + + if styles[0].ndim < 3: + latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + + else: + latent = styles[0] + + else: + if inject_index is None: + inject_index = random.randint(1, self.n_latent - 1) + + if styles[0].ndim < 3: + latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1) + + latent = torch.cat([latent, latent2], 1) + else: + latent = torch.cat([styles[0][:,0:inject_index], styles[1][:,inject_index:]], 1) + + out = self.input(latent) + out = self.conv1(out, latent[:, 0], noise=noise[0]) + + skip = self.to_rgb1(out, latent[:, 1]) + + i = 1 + for conv1, conv2, noise1, noise2, to_rgb in zip( + self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs + ): + out = conv1(out, latent[:, i], noise=noise1) + out = conv2(out, latent[:, i + 1], noise=noise2) + skip = to_rgb(out, latent[:, i + 2], skip) + + i += 2 + if i > return_feature_ind: + return out, skip + + image = skip + + if return_latents: + return image, latent + + else: + return image, None + + +class ConvLayer(nn.Sequential): + def __init__( + self, + in_channel, + out_channel, + kernel_size, + downsample=False, + blur_kernel=[1, 3, 3, 1], + bias=True, + activate=True, + dilation=1, ## modified + ): + layers = [] + + if downsample: + factor = 2 + p = (len(blur_kernel) - factor) + (kernel_size - 1) + pad0 = (p + 1) // 2 + pad1 = p // 2 + + layers.append(Blur(blur_kernel, pad=(pad0, pad1))) + + stride = 2 + self.padding = 0 + + else: + stride = 1 + self.padding = kernel_size // 2 + dilation-1 ## modified + + layers.append( + EqualConv2d( + in_channel, + out_channel, + kernel_size, + padding=self.padding, + stride=stride, + bias=bias and not activate, + dilation=dilation, ## modified + ) + ) + + if activate: + layers.append(FusedLeakyReLU(out_channel, bias=bias)) + + super().__init__(*layers) + + +class ResBlock(nn.Module): + def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]): + super().__init__() + + self.conv1 = ConvLayer(in_channel, in_channel, 3) + self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True) + + self.skip = ConvLayer( + in_channel, out_channel, 1, downsample=True, activate=False, bias=False + ) + + def forward(self, input): + out = self.conv1(input) + out = self.conv2(out) + + skip = self.skip(input) + out = (out + skip) / math.sqrt(2) + + return out + + +class Discriminator(nn.Module): + def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]): + super().__init__() + + channels = { + 4: 512, + 8: 512, + 16: 512, + 32: 512, + 64: 256 * channel_multiplier, + 128: 128 * channel_multiplier, + 256: 64 * channel_multiplier, + 512: 32 * channel_multiplier, + 1024: 16 * channel_multiplier, + } + + convs = [ConvLayer(3, channels[size], 1)] + + log_size = int(math.log(size, 2)) + + in_channel = channels[size] + + for i in range(log_size, 2, -1): + out_channel = channels[2 ** (i - 1)] + + convs.append(ResBlock(in_channel, out_channel, blur_kernel)) + + in_channel = out_channel + + self.convs = nn.Sequential(*convs) + + self.stddev_group = 4 + self.stddev_feat = 1 + + self.final_conv = ConvLayer(in_channel + 1, channels[4], 3) + self.final_linear = nn.Sequential( + EqualLinear(channels[4] * 4 * 4, channels[4], activation="fused_lrelu"), + EqualLinear(channels[4], 1), + ) + + def forward(self, input): + out = self.convs(input) + + batch, channel, height, width = out.shape + group = min(batch, self.stddev_group) + stddev = out.view( + group, -1, self.stddev_feat, channel // self.stddev_feat, height, width + ) + stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) + stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) + stddev = stddev.repeat(group, 1, height, width) + out = torch.cat([out, stddev], 1) + + out = self.final_conv(out) + + out = out.view(batch, -1) + out = self.final_linear(out) + + return out \ No newline at end of file diff --git a/vtoonify/model/stylegan/non_leaking.py b/vtoonify/model/stylegan/non_leaking.py new file mode 100644 index 0000000000000000000000000000000000000000..0cd9239dfe73fdfde6b8c009b67bec6d2b2de625 --- /dev/null +++ b/vtoonify/model/stylegan/non_leaking.py @@ -0,0 +1,469 @@ +import math + +import torch +from torch import autograd +from torch.nn import functional as F +import numpy as np + +from model.stylegan.distributed import reduce_sum +from model.stylegan.op import upfirdn2d + + +class AdaptiveAugment: + def __init__(self, ada_aug_target, ada_aug_len, update_every, device): + self.ada_aug_target = ada_aug_target + self.ada_aug_len = ada_aug_len + self.update_every = update_every + + self.ada_update = 0 + self.ada_aug_buf = torch.tensor([0.0, 0.0], device=device) + self.r_t_stat = 0 + self.ada_aug_p = 0 + + @torch.no_grad() + def tune(self, real_pred): + self.ada_aug_buf += torch.tensor( + (torch.sign(real_pred).sum().item(), real_pred.shape[0]), + device=real_pred.device, + ) + self.ada_update += 1 + + if self.ada_update % self.update_every == 0: + self.ada_aug_buf = reduce_sum(self.ada_aug_buf) + pred_signs, n_pred = self.ada_aug_buf.tolist() + + self.r_t_stat = pred_signs / n_pred + + if self.r_t_stat > self.ada_aug_target: + sign = 1 + + else: + sign = -1 + + self.ada_aug_p += sign * n_pred / self.ada_aug_len + self.ada_aug_p = min(1, max(0, self.ada_aug_p)) + self.ada_aug_buf.mul_(0) + self.ada_update = 0 + + return self.ada_aug_p + + +SYM6 = ( + 0.015404109327027373, + 0.0034907120842174702, + -0.11799011114819057, + -0.048311742585633, + 0.4910559419267466, + 0.787641141030194, + 0.3379294217276218, + -0.07263752278646252, + -0.021060292512300564, + 0.04472490177066578, + 0.0017677118642428036, + -0.007800708325034148, +) + + +def translate_mat(t_x, t_y, device="cpu"): + batch = t_x.shape[0] + + mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1) + translate = torch.stack((t_x, t_y), 1) + mat[:, :2, 2] = translate + + return mat + + +def rotate_mat(theta, device="cpu"): + batch = theta.shape[0] + + mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1) + sin_t = torch.sin(theta) + cos_t = torch.cos(theta) + rot = torch.stack((cos_t, -sin_t, sin_t, cos_t), 1).view(batch, 2, 2) + mat[:, :2, :2] = rot + + return mat + + +def scale_mat(s_x, s_y, device="cpu"): + batch = s_x.shape[0] + + mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1) + mat[:, 0, 0] = s_x + mat[:, 1, 1] = s_y + + return mat + + +def translate3d_mat(t_x, t_y, t_z): + batch = t_x.shape[0] + + mat = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) + translate = torch.stack((t_x, t_y, t_z), 1) + mat[:, :3, 3] = translate + + return mat + + +def rotate3d_mat(axis, theta): + batch = theta.shape[0] + + u_x, u_y, u_z = axis + + eye = torch.eye(3).unsqueeze(0) + cross = torch.tensor([(0, -u_z, u_y), (u_z, 0, -u_x), (-u_y, u_x, 0)]).unsqueeze(0) + outer = torch.tensor(axis) + outer = (outer.unsqueeze(1) * outer).unsqueeze(0) + + sin_t = torch.sin(theta).view(-1, 1, 1) + cos_t = torch.cos(theta).view(-1, 1, 1) + + rot = cos_t * eye + sin_t * cross + (1 - cos_t) * outer + + eye_4 = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) + eye_4[:, :3, :3] = rot + + return eye_4 + + +def scale3d_mat(s_x, s_y, s_z): + batch = s_x.shape[0] + + mat = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) + mat[:, 0, 0] = s_x + mat[:, 1, 1] = s_y + mat[:, 2, 2] = s_z + + return mat + + +def luma_flip_mat(axis, i): + batch = i.shape[0] + + eye = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) + axis = torch.tensor(axis + (0,)) + flip = 2 * torch.ger(axis, axis) * i.view(-1, 1, 1) + + return eye - flip + + +def saturation_mat(axis, i): + batch = i.shape[0] + + eye = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) + axis = torch.tensor(axis + (0,)) + axis = torch.ger(axis, axis) + saturate = axis + (eye - axis) * i.view(-1, 1, 1) + + return saturate + + +def lognormal_sample(size, mean=0, std=1, device="cpu"): + return torch.empty(size, device=device).log_normal_(mean=mean, std=std) + + +def category_sample(size, categories, device="cpu"): + category = torch.tensor(categories, device=device) + sample = torch.randint(high=len(categories), size=(size,), device=device) + + return category[sample] + + +def uniform_sample(size, low, high, device="cpu"): + return torch.empty(size, device=device).uniform_(low, high) + + +def normal_sample(size, mean=0, std=1, device="cpu"): + return torch.empty(size, device=device).normal_(mean, std) + + +def bernoulli_sample(size, p, device="cpu"): + return torch.empty(size, device=device).bernoulli_(p) + + +def random_mat_apply(p, transform, prev, eye, device="cpu"): + size = transform.shape[0] + select = bernoulli_sample(size, p, device=device).view(size, 1, 1) + select_transform = select * transform + (1 - select) * eye + + return select_transform @ prev + + +def sample_affine(p, size, height, width, device="cpu"): + G = torch.eye(3, device=device).unsqueeze(0).repeat(size, 1, 1) + eye = G + + # flip + param = category_sample(size, (0, 1)) + Gc = scale_mat(1 - 2.0 * param, torch.ones(size), device=device) + G = random_mat_apply(p, Gc, G, eye, device=device) + # print('flip', G, scale_mat(1 - 2.0 * param, torch.ones(size)), sep='\n') + + # 90 rotate + #param = category_sample(size, (0, 3)) + #Gc = rotate_mat(-math.pi / 2 * param, device=device) + #G = random_mat_apply(p, Gc, G, eye, device=device) + # print('90 rotate', G, rotate_mat(-math.pi / 2 * param), sep='\n') + + # integer translate + param = uniform_sample(size, -0.125, 0.125) + param_height = torch.round(param * height) / height + param_width = torch.round(param * width) / width + Gc = translate_mat(param_width, param_height, device=device) + G = random_mat_apply(p, Gc, G, eye, device=device) + # print('integer translate', G, translate_mat(param_width, param_height), sep='\n') + + # isotropic scale + param = lognormal_sample(size, std=0.2 * math.log(2)) + Gc = scale_mat(param, param, device=device) + G = random_mat_apply(p, Gc, G, eye, device=device) + # print('isotropic scale', G, scale_mat(param, param), sep='\n') + + p_rot = 1 - math.sqrt(1 - p) + + # pre-rotate + param = uniform_sample(size, -math.pi, math.pi) + Gc = rotate_mat(-param, device=device) + G = random_mat_apply(p_rot, Gc, G, eye, device=device) + # print('pre-rotate', G, rotate_mat(-param), sep='\n') + + # anisotropic scale + param = lognormal_sample(size, std=0.2 * math.log(2)) + Gc = scale_mat(param, 1 / param, device=device) + G = random_mat_apply(p, Gc, G, eye, device=device) + # print('anisotropic scale', G, scale_mat(param, 1 / param), sep='\n') + + # post-rotate + param = uniform_sample(size, -math.pi, math.pi) + Gc = rotate_mat(-param, device=device) + G = random_mat_apply(p_rot, Gc, G, eye, device=device) + # print('post-rotate', G, rotate_mat(-param), sep='\n') + + # fractional translate + param = normal_sample(size, std=0.125) + Gc = translate_mat(param, param, device=device) + G = random_mat_apply(p, Gc, G, eye, device=device) + # print('fractional translate', G, translate_mat(param, param), sep='\n') + + return G + + +def sample_color(p, size): + C = torch.eye(4).unsqueeze(0).repeat(size, 1, 1) + eye = C + axis_val = 1 / math.sqrt(3) + axis = (axis_val, axis_val, axis_val) + + # brightness + param = normal_sample(size, std=0.2) + Cc = translate3d_mat(param, param, param) + C = random_mat_apply(p, Cc, C, eye) + + # contrast + param = lognormal_sample(size, std=0.5 * math.log(2)) + Cc = scale3d_mat(param, param, param) + C = random_mat_apply(p, Cc, C, eye) + + # luma flip + param = category_sample(size, (0, 1)) + Cc = luma_flip_mat(axis, param) + C = random_mat_apply(p, Cc, C, eye) + + # hue rotation + param = uniform_sample(size, -math.pi, math.pi) + Cc = rotate3d_mat(axis, param) + C = random_mat_apply(p, Cc, C, eye) + + # saturation + param = lognormal_sample(size, std=1 * math.log(2)) + Cc = saturation_mat(axis, param) + C = random_mat_apply(p, Cc, C, eye) + + return C + + +def make_grid(shape, x0, x1, y0, y1, device): + n, c, h, w = shape + grid = torch.empty(n, h, w, 3, device=device) + grid[:, :, :, 0] = torch.linspace(x0, x1, w, device=device) + grid[:, :, :, 1] = torch.linspace(y0, y1, h, device=device).unsqueeze(-1) + grid[:, :, :, 2] = 1 + + return grid + + +def affine_grid(grid, mat): + n, h, w, _ = grid.shape + return (grid.view(n, h * w, 3) @ mat.transpose(1, 2)).view(n, h, w, 2) + + +def get_padding(G, height, width, kernel_size): + device = G.device + + cx = (width - 1) / 2 + cy = (height - 1) / 2 + cp = torch.tensor( + [(-cx, -cy, 1), (cx, -cy, 1), (cx, cy, 1), (-cx, cy, 1)], device=device + ) + cp = G @ cp.T + + pad_k = kernel_size // 4 + + pad = cp[:, :2, :].permute(1, 0, 2).flatten(1) + pad = torch.cat((-pad, pad)).max(1).values + pad = pad + torch.tensor([pad_k * 2 - cx, pad_k * 2 - cy] * 2, device=device) + pad = pad.max(torch.tensor([0, 0] * 2, device=device)) + pad = pad.min(torch.tensor([width - 1, height - 1] * 2, device=device)) + + pad_x1, pad_y1, pad_x2, pad_y2 = pad.ceil().to(torch.int32) + + return pad_x1, pad_x2, pad_y1, pad_y2 + + +def try_sample_affine_and_pad(img, p, kernel_size, G=None): + batch, _, height, width = img.shape + + G_try = G + + if G is None: + G_try = torch.inverse(sample_affine(p, batch, height, width)) + + pad_x1, pad_x2, pad_y1, pad_y2 = get_padding(G_try, height, width, kernel_size) + + img_pad = F.pad(img, (pad_x1, pad_x2, pad_y1, pad_y2), mode="reflect") + + return img_pad, G_try, (pad_x1, pad_x2, pad_y1, pad_y2) + + +class GridSampleForward(autograd.Function): + @staticmethod + def forward(ctx, input, grid): + out = F.grid_sample( + input, grid, mode="bilinear", padding_mode="zeros", align_corners=False + ) + ctx.save_for_backward(input, grid) + + return out + + @staticmethod + def backward(ctx, grad_output): + input, grid = ctx.saved_tensors + grad_input, grad_grid = GridSampleBackward.apply(grad_output, input, grid) + + return grad_input, grad_grid + + +class GridSampleBackward(autograd.Function): + @staticmethod + def forward(ctx, grad_output, input, grid): + op = torch._C._jit_get_operation("aten::grid_sampler_2d_backward") + grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) + ctx.save_for_backward(grid) + + return grad_input, grad_grid + + @staticmethod + def backward(ctx, grad_grad_input, grad_grad_grid): + grid, = ctx.saved_tensors + grad_grad_output = None + + if ctx.needs_input_grad[0]: + grad_grad_output = GridSampleForward.apply(grad_grad_input, grid) + + return grad_grad_output, None, None + + +grid_sample = GridSampleForward.apply + + +def scale_mat_single(s_x, s_y): + return torch.tensor(((s_x, 0, 0), (0, s_y, 0), (0, 0, 1)), dtype=torch.float32) + + +def translate_mat_single(t_x, t_y): + return torch.tensor(((1, 0, t_x), (0, 1, t_y), (0, 0, 1)), dtype=torch.float32) + + +def random_apply_affine(img, p, G=None, antialiasing_kernel=SYM6): + kernel = antialiasing_kernel + len_k = len(kernel) + + kernel = torch.as_tensor(kernel).to(img) + # kernel = torch.ger(kernel, kernel).to(img) + kernel_flip = torch.flip(kernel, (0,)) + + img_pad, G, (pad_x1, pad_x2, pad_y1, pad_y2) = try_sample_affine_and_pad( + img, p, len_k, G + ) + + G_inv = ( + translate_mat_single((pad_x1 - pad_x2).item() / 2, (pad_y1 - pad_y2).item() / 2) + @ G + ) + up_pad = ( + (len_k + 2 - 1) // 2, + (len_k - 2) // 2, + (len_k + 2 - 1) // 2, + (len_k - 2) // 2, + ) + img_2x = upfirdn2d(img_pad, kernel.unsqueeze(0), up=(2, 1), pad=(*up_pad[:2], 0, 0)) + img_2x = upfirdn2d(img_2x, kernel.unsqueeze(1), up=(1, 2), pad=(0, 0, *up_pad[2:])) + G_inv = scale_mat_single(2, 2) @ G_inv @ scale_mat_single(1 / 2, 1 / 2) + G_inv = translate_mat_single(-0.5, -0.5) @ G_inv @ translate_mat_single(0.5, 0.5) + batch_size, channel, height, width = img.shape + pad_k = len_k // 4 + shape = (batch_size, channel, (height + pad_k * 2) * 2, (width + pad_k * 2) * 2) + G_inv = ( + scale_mat_single(2 / img_2x.shape[3], 2 / img_2x.shape[2]) + @ G_inv + @ scale_mat_single(1 / (2 / shape[3]), 1 / (2 / shape[2])) + ) + grid = F.affine_grid(G_inv[:, :2, :].to(img_2x), shape, align_corners=False) + img_affine = grid_sample(img_2x, grid) + d_p = -pad_k * 2 + down_pad = ( + d_p + (len_k - 2 + 1) // 2, + d_p + (len_k - 2) // 2, + d_p + (len_k - 2 + 1) // 2, + d_p + (len_k - 2) // 2, + ) + img_down = upfirdn2d( + img_affine, kernel_flip.unsqueeze(0), down=(2, 1), pad=(*down_pad[:2], 0, 0) + ) + img_down = upfirdn2d( + img_down, kernel_flip.unsqueeze(1), down=(1, 2), pad=(0, 0, *down_pad[2:]) + ) + + return img_down, G + + +def apply_color(img, mat): + batch = img.shape[0] + img = img.permute(0, 2, 3, 1) + mat_mul = mat[:, :3, :3].transpose(1, 2).view(batch, 1, 3, 3) + mat_add = mat[:, :3, 3].view(batch, 1, 1, 3) + img = img @ mat_mul + mat_add + img = img.permute(0, 3, 1, 2) + + return img + + +def random_apply_color(img, p, C=None): + if C is None: + C = sample_color(p, img.shape[0]) + + img = apply_color(img, C.to(img)) + + return img, C + + +def augment(img, p, transform_matrix=(None, None)): + img, G = random_apply_affine(img, p, transform_matrix[0]) + if img.shape[1] == 3: + img, C = random_apply_color(img, p, transform_matrix[1]) + else: + tmp, C = random_apply_color(img[:,0:3], p, transform_matrix[1]) + img = torch.cat((tmp, img[:,3:]), dim=1) + + return img, (G, C) diff --git a/vtoonify/model/stylegan/op/__init__.py b/vtoonify/model/stylegan/op/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d0918d92285955855be89f00096b888ee5597ce3 --- /dev/null +++ b/vtoonify/model/stylegan/op/__init__.py @@ -0,0 +1,2 @@ +from .fused_act import FusedLeakyReLU, fused_leaky_relu +from .upfirdn2d import upfirdn2d diff --git a/vtoonify/model/stylegan/op/conv2d_gradfix.py b/vtoonify/model/stylegan/op/conv2d_gradfix.py new file mode 100644 index 0000000000000000000000000000000000000000..947e07030eaeb40f55ce8345d751ecf2480dbd5c --- /dev/null +++ b/vtoonify/model/stylegan/op/conv2d_gradfix.py @@ -0,0 +1,227 @@ +import contextlib +import warnings + +import torch +from torch import autograd +from torch.nn import functional as F + +enabled = True +weight_gradients_disabled = False + + +@contextlib.contextmanager +def no_weight_gradients(): + global weight_gradients_disabled + + old = weight_gradients_disabled + weight_gradients_disabled = True + yield + weight_gradients_disabled = old + + +def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): + if could_use_op(input): + return conv2d_gradfix( + transpose=False, + weight_shape=weight.shape, + stride=stride, + padding=padding, + output_padding=0, + dilation=dilation, + groups=groups, + ).apply(input, weight, bias) + + return F.conv2d( + input=input, + weight=weight, + bias=bias, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + + +def conv_transpose2d( + input, + weight, + bias=None, + stride=1, + padding=0, + output_padding=0, + groups=1, + dilation=1, +): + if could_use_op(input): + return conv2d_gradfix( + transpose=True, + weight_shape=weight.shape, + stride=stride, + padding=padding, + output_padding=output_padding, + groups=groups, + dilation=dilation, + ).apply(input, weight, bias) + + return F.conv_transpose2d( + input=input, + weight=weight, + bias=bias, + stride=stride, + padding=padding, + output_padding=output_padding, + dilation=dilation, + groups=groups, + ) + + +def could_use_op(input): + if (not enabled) or (not torch.backends.cudnn.enabled): + return False + + if input.device.type != "cuda": + return False + + if any(torch.__version__.startswith(x) for x in ["1.7.", "1.8."]): + return True + + warnings.warn( + f"conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d()." + ) + + return False + + +def ensure_tuple(xs, ndim): + xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim + + return xs + + +conv2d_gradfix_cache = dict() + + +def conv2d_gradfix( + transpose, weight_shape, stride, padding, output_padding, dilation, groups +): + ndim = 2 + weight_shape = tuple(weight_shape) + stride = ensure_tuple(stride, ndim) + padding = ensure_tuple(padding, ndim) + output_padding = ensure_tuple(output_padding, ndim) + dilation = ensure_tuple(dilation, ndim) + + key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) + if key in conv2d_gradfix_cache: + return conv2d_gradfix_cache[key] + + common_kwargs = dict( + stride=stride, padding=padding, dilation=dilation, groups=groups + ) + + def calc_output_padding(input_shape, output_shape): + if transpose: + return [0, 0] + + return [ + input_shape[i + 2] + - (output_shape[i + 2] - 1) * stride[i] + - (1 - 2 * padding[i]) + - dilation[i] * (weight_shape[i + 2] - 1) + for i in range(ndim) + ] + + class Conv2d(autograd.Function): + @staticmethod + def forward(ctx, input, weight, bias): + if not transpose: + out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) + + else: + out = F.conv_transpose2d( + input=input, + weight=weight, + bias=bias, + output_padding=output_padding, + **common_kwargs, + ) + + ctx.save_for_backward(input, weight) + + return out + + @staticmethod + def backward(ctx, grad_output): + input, weight = ctx.saved_tensors + grad_input, grad_weight, grad_bias = None, None, None + + if ctx.needs_input_grad[0]: + p = calc_output_padding( + input_shape=input.shape, output_shape=grad_output.shape + ) + grad_input = conv2d_gradfix( + transpose=(not transpose), + weight_shape=weight_shape, + output_padding=p, + **common_kwargs, + ).apply(grad_output, weight, None) + + if ctx.needs_input_grad[1] and not weight_gradients_disabled: + grad_weight = Conv2dGradWeight.apply(grad_output, input) + + if ctx.needs_input_grad[2]: + grad_bias = grad_output.sum((0, 2, 3)) + + return grad_input, grad_weight, grad_bias + + class Conv2dGradWeight(autograd.Function): + @staticmethod + def forward(ctx, grad_output, input): + op = torch._C._jit_get_operation( + "aten::cudnn_convolution_backward_weight" + if not transpose + else "aten::cudnn_convolution_transpose_backward_weight" + ) + flags = [ + torch.backends.cudnn.benchmark, + torch.backends.cudnn.deterministic, + torch.backends.cudnn.allow_tf32, + ] + grad_weight = op( + weight_shape, + grad_output, + input, + padding, + stride, + dilation, + groups, + *flags, + ) + ctx.save_for_backward(grad_output, input) + + return grad_weight + + @staticmethod + def backward(ctx, grad_grad_weight): + grad_output, input = ctx.saved_tensors + grad_grad_output, grad_grad_input = None, None + + if ctx.needs_input_grad[0]: + grad_grad_output = Conv2d.apply(input, grad_grad_weight, None) + + if ctx.needs_input_grad[1]: + p = calc_output_padding( + input_shape=input.shape, output_shape=grad_output.shape + ) + grad_grad_input = conv2d_gradfix( + transpose=(not transpose), + weight_shape=weight_shape, + output_padding=p, + **common_kwargs, + ).apply(grad_output, grad_grad_weight, None) + + return grad_grad_output, grad_grad_input + + conv2d_gradfix_cache[key] = Conv2d + + return Conv2d diff --git a/vtoonify/model/stylegan/op/fused_act.py b/vtoonify/model/stylegan/op/fused_act.py new file mode 100644 index 0000000000000000000000000000000000000000..c90a1d9198e5b0751a918fa58cefca72533793cc --- /dev/null +++ b/vtoonify/model/stylegan/op/fused_act.py @@ -0,0 +1,34 @@ +import torch +from torch import nn +from torch.nn import functional as F + + +class FusedLeakyReLU(nn.Module): + def __init__(self, channel, bias=True, negative_slope=0.2, scale=2 ** 0.5): + super().__init__() + + if bias: + self.bias = nn.Parameter(torch.zeros(channel)) + + else: + self.bias = None + + self.negative_slope = negative_slope + self.scale = scale + + def forward(self, inputs): + return fused_leaky_relu(inputs, self.bias, self.negative_slope, self.scale) + + +def fused_leaky_relu(inputs, bias=None, negative_slope=0.2, scale=2 ** 0.5): + if bias is not None: + rest_dim = [1] * (inputs.ndim - bias.ndim - 1) + return ( + F.leaky_relu( + inputs + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope + ) + * scale + ) + + else: + return F.leaky_relu(inputs, negative_slope=negative_slope) * scale \ No newline at end of file diff --git a/vtoonify/model/stylegan/op/readme.md b/vtoonify/model/stylegan/op/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..7cffcfc72069ff9a098d292f9e37035031e19081 --- /dev/null +++ b/vtoonify/model/stylegan/op/readme.md @@ -0,0 +1,12 @@ +Code from [rosinality-stylegan2-pytorch-cp](https://github.com/senior-sigan/rosinality-stylegan2-pytorch-cpu) + +Scripts to convert rosinality/stylegan2-pytorch to the CPU compatible format + +If you would like to use CPU for testing or have a problem regarding the cpp extention (fused and upfirdn2d), please make the following changes: + +Change `model.stylegan.op` to `model.stylegan.op_cpu` +https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/util.py#L14 + +https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/model/simple_augment.py#L12 + +https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/model/stylegan/model.py#L11 diff --git a/vtoonify/model/stylegan/op/upfirdn2d.py b/vtoonify/model/stylegan/op/upfirdn2d.py new file mode 100644 index 0000000000000000000000000000000000000000..45f4f7f12cb4706ab6bea620d14008c462508322 --- /dev/null +++ b/vtoonify/model/stylegan/op/upfirdn2d.py @@ -0,0 +1,61 @@ +from collections import abc + +import torch +from torch.nn import functional as F + + +def upfirdn2d(inputs, kernel, up=1, down=1, pad=(0, 0)): + if not isinstance(up, abc.Iterable): + up = (up, up) + + if not isinstance(down, abc.Iterable): + down = (down, down) + + if len(pad) == 2: + pad = (pad[0], pad[1], pad[0], pad[1]) + + return upfirdn2d_native(inputs, kernel, *up, *down, *pad) + + +def upfirdn2d_native( + inputs, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 +): + _, channel, in_h, in_w = inputs.shape + inputs = inputs.reshape(-1, in_h, in_w, 1) + + _, in_h, in_w, minor = inputs.shape + kernel_h, kernel_w = kernel.shape + + out = inputs.view(-1, in_h, 1, in_w, 1, minor) + out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) + out = out.view(-1, in_h * up_y, in_w * up_x, minor) + + out = F.pad( + out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] + ) + out = out[ + :, + max(-pad_y0, 0): out.shape[1] - max(-pad_y1, 0), + max(-pad_x0, 0): out.shape[2] - max(-pad_x1, 0), + :, + ] + + out = out.permute(0, 3, 1, 2) + out = out.reshape( + [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] + ) + w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) + out = F.conv2d(out, w) + out = out.reshape( + -1, + minor, + in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, + in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, + ) + out = out.permute(0, 2, 3, 1) + out = out[:, ::down_y, ::down_x, :] + + out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h + down_y) // down_y + out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w + down_x) // down_x + + return out.view(-1, channel, out_h, out_w) \ No newline at end of file diff --git a/vtoonify/model/stylegan/op_gpu/__init__.py b/vtoonify/model/stylegan/op_gpu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d0918d92285955855be89f00096b888ee5597ce3 --- /dev/null +++ b/vtoonify/model/stylegan/op_gpu/__init__.py @@ -0,0 +1,2 @@ +from .fused_act import FusedLeakyReLU, fused_leaky_relu +from .upfirdn2d import upfirdn2d diff --git a/vtoonify/model/stylegan/op_gpu/conv2d_gradfix.py b/vtoonify/model/stylegan/op_gpu/conv2d_gradfix.py new file mode 100644 index 0000000000000000000000000000000000000000..64229c5a7fd04292140abac1f490619963009328 --- /dev/null +++ b/vtoonify/model/stylegan/op_gpu/conv2d_gradfix.py @@ -0,0 +1,227 @@ +import contextlib +import warnings + +import torch +from torch import autograd +from torch.nn import functional as F + +enabled = True +weight_gradients_disabled = False + + +@contextlib.contextmanager +def no_weight_gradients(): + global weight_gradients_disabled + + old = weight_gradients_disabled + weight_gradients_disabled = True + yield + weight_gradients_disabled = old + + +def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): + if could_use_op(input): + return conv2d_gradfix( + transpose=False, + weight_shape=weight.shape, + stride=stride, + padding=padding, + output_padding=0, + dilation=dilation, + groups=groups, + ).apply(input, weight, bias) + + return F.conv2d( + input=input, + weight=weight, + bias=bias, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + + +def conv_transpose2d( + input, + weight, + bias=None, + stride=1, + padding=0, + output_padding=0, + groups=1, + dilation=1, +): + if could_use_op(input): + return conv2d_gradfix( + transpose=True, + weight_shape=weight.shape, + stride=stride, + padding=padding, + output_padding=output_padding, + groups=groups, + dilation=dilation, + ).apply(input, weight, bias) + + return F.conv_transpose2d( + input=input, + weight=weight, + bias=bias, + stride=stride, + padding=padding, + output_padding=output_padding, + dilation=dilation, + groups=groups, + ) + + +def could_use_op(input): + if (not enabled) or (not torch.backends.cudnn.enabled): + return False + + if input.device.type != "cuda": + return False + + if any(torch.__version__.startswith(x) for x in ["1.7.", "1.8."]): + return True + + warnings.warn( + f"conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d()." + ) + + return False + + +def ensure_tuple(xs, ndim): + xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim + + return xs + + +conv2d_gradfix_cache = dict() + + +def conv2d_gradfix( + transpose, weight_shape, stride, padding, output_padding, dilation, groups +): + ndim = 2 + weight_shape = tuple(weight_shape) + stride = ensure_tuple(stride, ndim) + padding = ensure_tuple(padding, ndim) + output_padding = ensure_tuple(output_padding, ndim) + dilation = ensure_tuple(dilation, ndim) + + key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) + if key in conv2d_gradfix_cache: + return conv2d_gradfix_cache[key] + + common_kwargs = dict( + stride=stride, padding=padding, dilation=dilation, groups=groups + ) + + def calc_output_padding(input_shape, output_shape): + if transpose: + return [0, 0] + + return [ + input_shape[i + 2] + - (output_shape[i + 2] - 1) * stride[i] + - (1 - 2 * padding[i]) + - dilation[i] * (weight_shape[i + 2] - 1) + for i in range(ndim) + ] + + class Conv2d(autograd.Function): + @staticmethod + def forward(ctx, input, weight, bias): + if not transpose: + out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) + + else: + out = F.conv_transpose2d( + input=input, + weight=weight, + bias=bias, + output_padding=output_padding, + **common_kwargs, + ) + + ctx.save_for_backward(input, weight) + + return out + + @staticmethod + def backward(ctx, grad_output): + input, weight = ctx.saved_tensors + grad_input, grad_weight, grad_bias = None, None, None + + if ctx.needs_input_grad[0]: + p = calc_output_padding( + input_shape=input.shape, output_shape=grad_output.shape + ) + grad_input = conv2d_gradfix( + transpose=(not transpose), + weight_shape=weight_shape, + output_padding=p, + **common_kwargs, + ).apply(grad_output, weight, None) + + if ctx.needs_input_grad[1] and not weight_gradients_disabled: + grad_weight = Conv2dGradWeight.apply(grad_output, input) + + if ctx.needs_input_grad[2]: + grad_bias = grad_output.sum((0, 2, 3)) + + return grad_input, grad_weight, grad_bias + + class Conv2dGradWeight(autograd.Function): + @staticmethod + def forward(ctx, grad_output, input): + op = torch._C._jit_get_operation( + "aten::cudnn_convolution_backward_weight" + if not transpose + else "aten::cudnn_convolution_transpose_backward_weight" + ) + flags = [ + torch.backends.cudnn.benchmark, + torch.backends.cudnn.deterministic, + torch.backends.cudnn.allow_tf32, + ] + grad_weight = op( + weight_shape, + grad_output, + input, + padding, + stride, + dilation, + groups, + *flags, + ) + ctx.save_for_backward(grad_output, input) + + return grad_weight + + @staticmethod + def backward(ctx, grad_grad_weight): + grad_output, input = ctx.saved_tensors + grad_grad_output, grad_grad_input = None, None + + if ctx.needs_input_grad[0]: + grad_grad_output = Conv2d.apply(input, grad_grad_weight, None) + + if ctx.needs_input_grad[1]: + p = calc_output_padding( + input_shape=input.shape, output_shape=grad_output.shape + ) + grad_grad_input = conv2d_gradfix( + transpose=(not transpose), + weight_shape=weight_shape, + output_padding=p, + **common_kwargs, + ).apply(grad_output, grad_grad_weight, None) + + return grad_grad_output, grad_grad_input + + conv2d_gradfix_cache[key] = Conv2d + + return Conv2d diff --git a/vtoonify/model/stylegan/op_gpu/fused_act.py b/vtoonify/model/stylegan/op_gpu/fused_act.py new file mode 100644 index 0000000000000000000000000000000000000000..c3b735c09de86f1b97dac147be219a4d62c46263 --- /dev/null +++ b/vtoonify/model/stylegan/op_gpu/fused_act.py @@ -0,0 +1,119 @@ +import os + +import torch +from torch import nn +from torch.nn import functional as F +from torch.autograd import Function +from torch.utils.cpp_extension import load + + +module_path = os.path.dirname(__file__) +fused = load( + "fused", + sources=[ + os.path.join(module_path, "fused_bias_act.cpp"), + os.path.join(module_path, "fused_bias_act_kernel.cu"), + ], +) + + +class FusedLeakyReLUFunctionBackward(Function): + @staticmethod + def forward(ctx, grad_output, out, bias, negative_slope, scale): + ctx.save_for_backward(out) + ctx.negative_slope = negative_slope + ctx.scale = scale + + empty = grad_output.new_empty(0) + + grad_input = fused.fused_bias_act( + grad_output.contiguous(), empty, out, 3, 1, negative_slope, scale + ) + + dim = [0] + + if grad_input.ndim > 2: + dim += list(range(2, grad_input.ndim)) + + if bias: + grad_bias = grad_input.sum(dim).detach() + + else: + grad_bias = empty + + return grad_input, grad_bias + + @staticmethod + def backward(ctx, gradgrad_input, gradgrad_bias): + out, = ctx.saved_tensors + gradgrad_out = fused.fused_bias_act( + gradgrad_input.contiguous(), gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale + ) + + return gradgrad_out, None, None, None, None + + +class FusedLeakyReLUFunction(Function): + @staticmethod + def forward(ctx, input, bias, negative_slope, scale): + empty = input.new_empty(0) + + ctx.bias = bias is not None + + if bias is None: + bias = empty + + out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) + ctx.save_for_backward(out) + ctx.negative_slope = negative_slope + ctx.scale = scale + + return out + + @staticmethod + def backward(ctx, grad_output): + out, = ctx.saved_tensors + + grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( + grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale + ) + + if not ctx.bias: + grad_bias = None + + return grad_input, grad_bias, None, None + + +class FusedLeakyReLU(nn.Module): + def __init__(self, channel, bias=True, negative_slope=0.2, scale=2 ** 0.5): + super().__init__() + + if bias: + self.bias = nn.Parameter(torch.zeros(channel)) + + else: + self.bias = None + + self.negative_slope = negative_slope + self.scale = scale + + def forward(self, input): + return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale) + + +def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5): + if input.device.type == "cpu": + if bias is not None: + rest_dim = [1] * (input.ndim - bias.ndim - 1) + return ( + F.leaky_relu( + input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2 + ) + * scale + ) + + else: + return F.leaky_relu(input, negative_slope=0.2) * scale + + else: + return FusedLeakyReLUFunction.apply(input.contiguous(), bias, negative_slope, scale) diff --git a/vtoonify/model/stylegan/op_gpu/fused_bias_act.cpp b/vtoonify/model/stylegan/op_gpu/fused_bias_act.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e8466aa15b2a36d501eda423ccb31b7b9cbd03e5 --- /dev/null +++ b/vtoonify/model/stylegan/op_gpu/fused_bias_act.cpp @@ -0,0 +1,32 @@ + +#include +#include + +torch::Tensor fused_bias_act_op(const torch::Tensor &input, + const torch::Tensor &bias, + const torch::Tensor &refer, int act, int grad, + float alpha, float scale); + +#define CHECK_CUDA(x) \ + TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +torch::Tensor fused_bias_act(const torch::Tensor &input, + const torch::Tensor &bias, + const torch::Tensor &refer, int act, int grad, + float alpha, float scale) { + CHECK_INPUT(input); + CHECK_INPUT(bias); + + at::DeviceGuard guard(input.device()); + + return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); +} \ No newline at end of file diff --git a/vtoonify/model/stylegan/op_gpu/fused_bias_act_kernel.cu b/vtoonify/model/stylegan/op_gpu/fused_bias_act_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..65427d333b8a615d1cea5765de22318623706134 --- /dev/null +++ b/vtoonify/model/stylegan/op_gpu/fused_bias_act_kernel.cu @@ -0,0 +1,105 @@ +// Copyright (c) 2019, NVIDIA Corporation. All rights reserved. +// +// This work is made available under the Nvidia Source Code License-NC. +// To view a copy of this license, visit +// https://nvlabs.github.io/stylegan2/license.html + +#include + +#include +#include +#include +#include + + +#include +#include + +template +static __global__ void +fused_bias_act_kernel(scalar_t *out, const scalar_t *p_x, const scalar_t *p_b, + const scalar_t *p_ref, int act, int grad, scalar_t alpha, + scalar_t scale, int loop_x, int size_x, int step_b, + int size_b, int use_bias, int use_ref) { + int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x; + + scalar_t zero = 0.0; + + for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; + loop_idx++, xi += blockDim.x) { + scalar_t x = p_x[xi]; + + if (use_bias) { + x += p_b[(xi / step_b) % size_b]; + } + + scalar_t ref = use_ref ? p_ref[xi] : zero; + + scalar_t y; + + switch (act * 10 + grad) { + default: + case 10: + y = x; + break; + case 11: + y = x; + break; + case 12: + y = 0.0; + break; + + case 30: + y = (x > 0.0) ? x : x * alpha; + break; + case 31: + y = (ref > 0.0) ? x : x * alpha; + break; + case 32: + y = 0.0; + break; + } + + out[xi] = y * scale; + } +} + +torch::Tensor fused_bias_act_op(const torch::Tensor &input, + const torch::Tensor &bias, + const torch::Tensor &refer, int act, int grad, + float alpha, float scale) { + int curDevice = -1; + cudaGetDevice(&curDevice); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + auto x = input.contiguous(); + auto b = bias.contiguous(); + auto ref = refer.contiguous(); + + int use_bias = b.numel() ? 1 : 0; + int use_ref = ref.numel() ? 1 : 0; + + int size_x = x.numel(); + int size_b = b.numel(); + int step_b = 1; + + for (int i = 1 + 1; i < x.dim(); i++) { + step_b *= x.size(i); + } + + int loop_x = 4; + int block_size = 4 * 32; + int grid_size = (size_x - 1) / (loop_x * block_size) + 1; + + auto y = torch::empty_like(x); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + x.scalar_type(), "fused_bias_act_kernel", [&] { + fused_bias_act_kernel<<>>( + y.data_ptr(), x.data_ptr(), + b.data_ptr(), ref.data_ptr(), act, grad, alpha, + scale, loop_x, size_x, step_b, size_b, use_bias, use_ref); + }); + + return y; +} \ No newline at end of file diff --git a/vtoonify/model/stylegan/op_gpu/upfirdn2d.cpp b/vtoonify/model/stylegan/op_gpu/upfirdn2d.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d667b2cfcb2eb129b6582dea58dc0dda31e4bc45 --- /dev/null +++ b/vtoonify/model/stylegan/op_gpu/upfirdn2d.cpp @@ -0,0 +1,31 @@ +#include +#include + +torch::Tensor upfirdn2d_op(const torch::Tensor &input, + const torch::Tensor &kernel, int up_x, int up_y, + int down_x, int down_y, int pad_x0, int pad_x1, + int pad_y0, int pad_y1); + +#define CHECK_CUDA(x) \ + TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +torch::Tensor upfirdn2d(const torch::Tensor &input, const torch::Tensor &kernel, + int up_x, int up_y, int down_x, int down_y, int pad_x0, + int pad_x1, int pad_y0, int pad_y1) { + CHECK_INPUT(input); + CHECK_INPUT(kernel); + + at::DeviceGuard guard(input.device()); + + return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, + pad_y0, pad_y1); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)"); +} \ No newline at end of file diff --git a/vtoonify/model/stylegan/op_gpu/upfirdn2d.py b/vtoonify/model/stylegan/op_gpu/upfirdn2d.py new file mode 100644 index 0000000000000000000000000000000000000000..67e0375efac0f1cadcda12e667b72c36369e9c88 --- /dev/null +++ b/vtoonify/model/stylegan/op_gpu/upfirdn2d.py @@ -0,0 +1,209 @@ +from collections import abc +import os + +import torch +from torch.nn import functional as F +from torch.autograd import Function +from torch.utils.cpp_extension import load + + +module_path = os.path.dirname(__file__) +upfirdn2d_op = load( + "upfirdn2d", + sources=[ + os.path.join(module_path, "upfirdn2d.cpp"), + os.path.join(module_path, "upfirdn2d_kernel.cu"), + ], +) + + +class UpFirDn2dBackward(Function): + @staticmethod + def forward( + ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size + ): + + up_x, up_y = up + down_x, down_y = down + g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad + + grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) + + grad_input = upfirdn2d_op.upfirdn2d( + grad_output, + grad_kernel, + down_x, + down_y, + up_x, + up_y, + g_pad_x0, + g_pad_x1, + g_pad_y0, + g_pad_y1, + ) + grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) + + ctx.save_for_backward(kernel) + + pad_x0, pad_x1, pad_y0, pad_y1 = pad + + ctx.up_x = up_x + ctx.up_y = up_y + ctx.down_x = down_x + ctx.down_y = down_y + ctx.pad_x0 = pad_x0 + ctx.pad_x1 = pad_x1 + ctx.pad_y0 = pad_y0 + ctx.pad_y1 = pad_y1 + ctx.in_size = in_size + ctx.out_size = out_size + + return grad_input + + @staticmethod + def backward(ctx, gradgrad_input): + kernel, = ctx.saved_tensors + + gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1) + + gradgrad_out = upfirdn2d_op.upfirdn2d( + gradgrad_input, + kernel, + ctx.up_x, + ctx.up_y, + ctx.down_x, + ctx.down_y, + ctx.pad_x0, + ctx.pad_x1, + ctx.pad_y0, + ctx.pad_y1, + ) + # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3]) + gradgrad_out = gradgrad_out.view( + ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1] + ) + + return gradgrad_out, None, None, None, None, None, None, None, None + + +class UpFirDn2d(Function): + @staticmethod + def forward(ctx, input, kernel, up, down, pad): + up_x, up_y = up + down_x, down_y = down + pad_x0, pad_x1, pad_y0, pad_y1 = pad + + kernel_h, kernel_w = kernel.shape + batch, channel, in_h, in_w = input.shape + ctx.in_size = input.shape + + input = input.reshape(-1, in_h, in_w, 1) + + ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) + + out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h + down_y) // down_y + out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w + down_x) // down_x + ctx.out_size = (out_h, out_w) + + ctx.up = (up_x, up_y) + ctx.down = (down_x, down_y) + ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) + + g_pad_x0 = kernel_w - pad_x0 - 1 + g_pad_y0 = kernel_h - pad_y0 - 1 + g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 + g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 + + ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) + + out = upfirdn2d_op.upfirdn2d( + input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 + ) + # out = out.view(major, out_h, out_w, minor) + out = out.view(-1, channel, out_h, out_w) + + return out + + @staticmethod + def backward(ctx, grad_output): + kernel, grad_kernel = ctx.saved_tensors + + grad_input = None + + if ctx.needs_input_grad[0]: + grad_input = UpFirDn2dBackward.apply( + grad_output, + kernel, + grad_kernel, + ctx.up, + ctx.down, + ctx.pad, + ctx.g_pad, + ctx.in_size, + ctx.out_size, + ) + + return grad_input, None, None, None, None + + +def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): + if not isinstance(up, abc.Iterable): + up = (up, up) + + if not isinstance(down, abc.Iterable): + down = (down, down) + + if len(pad) == 2: + pad = (pad[0], pad[1], pad[0], pad[1]) + + if input.device.type == "cpu": + out = upfirdn2d_native(input, kernel, *up, *down, *pad) + + else: + out = UpFirDn2d.apply(input, kernel, up, down, pad) + + return out + + +def upfirdn2d_native( + input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 +): + _, channel, in_h, in_w = input.shape + input = input.reshape(-1, in_h, in_w, 1) + + _, in_h, in_w, minor = input.shape + kernel_h, kernel_w = kernel.shape + + out = input.view(-1, in_h, 1, in_w, 1, minor) + out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) + out = out.view(-1, in_h * up_y, in_w * up_x, minor) + + out = F.pad( + out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] + ) + out = out[ + :, + max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), + max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), + :, + ] + + out = out.permute(0, 3, 1, 2) + out = out.reshape( + [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] + ) + w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) + out = F.conv2d(out, w) + out = out.reshape( + -1, + minor, + in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, + in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, + ) + out = out.permute(0, 2, 3, 1) + out = out[:, ::down_y, ::down_x, :] + + out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h + down_y) // down_y + out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w + down_x) // down_x + + return out.view(-1, channel, out_h, out_w) diff --git a/vtoonify/model/stylegan/op_gpu/upfirdn2d_kernel.cu b/vtoonify/model/stylegan/op_gpu/upfirdn2d_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..61d3dc413041738bb6885e33da7b3ed8fbfa2496 --- /dev/null +++ b/vtoonify/model/stylegan/op_gpu/upfirdn2d_kernel.cu @@ -0,0 +1,369 @@ +// Copyright (c) 2019, NVIDIA Corporation. All rights reserved. +// +// This work is made available under the Nvidia Source Code License-NC. +// To view a copy of this license, visit +// https://nvlabs.github.io/stylegan2/license.html + +#include + +#include +#include +#include +#include + +#include +#include + +static __host__ __device__ __forceinline__ int floor_div(int a, int b) { + int c = a / b; + + if (c * b > a) { + c--; + } + + return c; +} + +struct UpFirDn2DKernelParams { + int up_x; + int up_y; + int down_x; + int down_y; + int pad_x0; + int pad_x1; + int pad_y0; + int pad_y1; + + int major_dim; + int in_h; + int in_w; + int minor_dim; + int kernel_h; + int kernel_w; + int out_h; + int out_w; + int loop_major; + int loop_x; +}; + +template +__global__ void upfirdn2d_kernel_large(scalar_t *out, const scalar_t *input, + const scalar_t *kernel, + const UpFirDn2DKernelParams p) { + int minor_idx = blockIdx.x * blockDim.x + threadIdx.x; + int out_y = minor_idx / p.minor_dim; + minor_idx -= out_y * p.minor_dim; + int out_x_base = blockIdx.y * p.loop_x * blockDim.y + threadIdx.y; + int major_idx_base = blockIdx.z * p.loop_major; + + if (out_x_base >= p.out_w || out_y >= p.out_h || + major_idx_base >= p.major_dim) { + return; + } + + int mid_y = out_y * p.down_y + p.up_y - 1 - p.pad_y0; + int in_y = min(max(floor_div(mid_y, p.up_y), 0), p.in_h); + int h = min(max(floor_div(mid_y + p.kernel_h, p.up_y), 0), p.in_h) - in_y; + int kernel_y = mid_y + p.kernel_h - (in_y + 1) * p.up_y; + + for (int loop_major = 0, major_idx = major_idx_base; + loop_major < p.loop_major && major_idx < p.major_dim; + loop_major++, major_idx++) { + for (int loop_x = 0, out_x = out_x_base; + loop_x < p.loop_x && out_x < p.out_w; loop_x++, out_x += blockDim.y) { + int mid_x = out_x * p.down_x + p.up_x - 1 - p.pad_x0; + int in_x = min(max(floor_div(mid_x, p.up_x), 0), p.in_w); + int w = min(max(floor_div(mid_x + p.kernel_w, p.up_x), 0), p.in_w) - in_x; + int kernel_x = mid_x + p.kernel_w - (in_x + 1) * p.up_x; + + const scalar_t *x_p = + &input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + + minor_idx]; + const scalar_t *k_p = &kernel[kernel_y * p.kernel_w + kernel_x]; + int x_px = p.minor_dim; + int k_px = -p.up_x; + int x_py = p.in_w * p.minor_dim; + int k_py = -p.up_y * p.kernel_w; + + scalar_t v = 0.0f; + + for (int y = 0; y < h; y++) { + for (int x = 0; x < w; x++) { + v += static_cast(*x_p) * static_cast(*k_p); + x_p += x_px; + k_p += k_px; + } + + x_p += x_py - w * x_px; + k_p += k_py - w * k_px; + } + + out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + + minor_idx] = v; + } + } +} + +template +__global__ void upfirdn2d_kernel(scalar_t *out, const scalar_t *input, + const scalar_t *kernel, + const UpFirDn2DKernelParams p) { + const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1; + const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1; + + __shared__ volatile float sk[kernel_h][kernel_w]; + __shared__ volatile float sx[tile_in_h][tile_in_w]; + + int minor_idx = blockIdx.x; + int tile_out_y = minor_idx / p.minor_dim; + minor_idx -= tile_out_y * p.minor_dim; + tile_out_y *= tile_out_h; + int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w; + int major_idx_base = blockIdx.z * p.loop_major; + + if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | + major_idx_base >= p.major_dim) { + return; + } + + for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; + tap_idx += blockDim.x) { + int ky = tap_idx / kernel_w; + int kx = tap_idx - ky * kernel_w; + scalar_t v = 0.0; + + if (kx < p.kernel_w & ky < p.kernel_h) { + v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)]; + } + + sk[ky][kx] = v; + } + + for (int loop_major = 0, major_idx = major_idx_base; + loop_major < p.loop_major & major_idx < p.major_dim; + loop_major++, major_idx++) { + for (int loop_x = 0, tile_out_x = tile_out_x_base; + loop_x < p.loop_x & tile_out_x < p.out_w; + loop_x++, tile_out_x += tile_out_w) { + int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0; + int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0; + int tile_in_x = floor_div(tile_mid_x, up_x); + int tile_in_y = floor_div(tile_mid_y, up_y); + + __syncthreads(); + + for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; + in_idx += blockDim.x) { + int rel_in_y = in_idx / tile_in_w; + int rel_in_x = in_idx - rel_in_y * tile_in_w; + int in_x = rel_in_x + tile_in_x; + int in_y = rel_in_y + tile_in_y; + + scalar_t v = 0.0; + + if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) { + v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * + p.minor_dim + + minor_idx]; + } + + sx[rel_in_y][rel_in_x] = v; + } + + __syncthreads(); + for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; + out_idx += blockDim.x) { + int rel_out_y = out_idx / tile_out_w; + int rel_out_x = out_idx - rel_out_y * tile_out_w; + int out_x = rel_out_x + tile_out_x; + int out_y = rel_out_y + tile_out_y; + + int mid_x = tile_mid_x + rel_out_x * down_x; + int mid_y = tile_mid_y + rel_out_y * down_y; + int in_x = floor_div(mid_x, up_x); + int in_y = floor_div(mid_y, up_y); + int rel_in_x = in_x - tile_in_x; + int rel_in_y = in_y - tile_in_y; + int kernel_x = (in_x + 1) * up_x - mid_x - 1; + int kernel_y = (in_y + 1) * up_y - mid_y - 1; + + scalar_t v = 0.0; + +#pragma unroll + for (int y = 0; y < kernel_h / up_y; y++) +#pragma unroll + for (int x = 0; x < kernel_w / up_x; x++) + v += sx[rel_in_y + y][rel_in_x + x] * + sk[kernel_y + y * up_y][kernel_x + x * up_x]; + + if (out_x < p.out_w & out_y < p.out_h) { + out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + + minor_idx] = v; + } + } + } + } +} + +torch::Tensor upfirdn2d_op(const torch::Tensor &input, + const torch::Tensor &kernel, int up_x, int up_y, + int down_x, int down_y, int pad_x0, int pad_x1, + int pad_y0, int pad_y1) { + int curDevice = -1; + cudaGetDevice(&curDevice); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + UpFirDn2DKernelParams p; + + auto x = input.contiguous(); + auto k = kernel.contiguous(); + + p.major_dim = x.size(0); + p.in_h = x.size(1); + p.in_w = x.size(2); + p.minor_dim = x.size(3); + p.kernel_h = k.size(0); + p.kernel_w = k.size(1); + p.up_x = up_x; + p.up_y = up_y; + p.down_x = down_x; + p.down_y = down_y; + p.pad_x0 = pad_x0; + p.pad_x1 = pad_x1; + p.pad_y0 = pad_y0; + p.pad_y1 = pad_y1; + + p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / + p.down_y; + p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / + p.down_x; + + auto out = + at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options()); + + int mode = -1; + + int tile_out_h = -1; + int tile_out_w = -1; + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 4 && p.kernel_w <= 4) { + mode = 1; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 3 && p.kernel_w <= 3) { + mode = 2; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 4 && p.kernel_w <= 4) { + mode = 3; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 2 && p.kernel_w <= 2) { + mode = 4; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && + p.kernel_h <= 4 && p.kernel_w <= 4) { + mode = 5; + tile_out_h = 8; + tile_out_w = 32; + } + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && + p.kernel_h <= 2 && p.kernel_w <= 2) { + mode = 6; + tile_out_h = 8; + tile_out_w = 32; + } + + dim3 block_size; + dim3 grid_size; + + if (tile_out_h > 0 && tile_out_w > 0) { + p.loop_major = (p.major_dim - 1) / 16384 + 1; + p.loop_x = 1; + block_size = dim3(32 * 8, 1, 1); + grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim, + (p.out_w - 1) / (p.loop_x * tile_out_w) + 1, + (p.major_dim - 1) / p.loop_major + 1); + } else { + p.loop_major = (p.major_dim - 1) / 16384 + 1; + p.loop_x = 4; + block_size = dim3(4, 32, 1); + grid_size = dim3((p.out_h * p.minor_dim - 1) / block_size.x + 1, + (p.out_w - 1) / (p.loop_x * block_size.y) + 1, + (p.major_dim - 1) / p.loop_major + 1); + } + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { + switch (mode) { + case 1: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 2: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 3: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 4: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 5: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 6: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + default: + upfirdn2d_kernel_large<<>>( + out.data_ptr(), x.data_ptr(), + k.data_ptr(), p); + } + }); + + return out; +} \ No newline at end of file diff --git a/vtoonify/model/stylegan/prepare_data.py b/vtoonify/model/stylegan/prepare_data.py new file mode 100644 index 0000000000000000000000000000000000000000..aa385d0ac13550e1ae5513f7a20b35997a5c3ea6 --- /dev/null +++ b/vtoonify/model/stylegan/prepare_data.py @@ -0,0 +1,105 @@ +import argparse +from io import BytesIO +import multiprocessing +from functools import partial + +import os +from PIL import Image +import lmdb +from tqdm import tqdm +from torchvision import datasets +from torchvision.transforms import functional as trans_fn + + +def resize_and_convert(img, size, resample, quality=100): + img = trans_fn.resize(img, size, resample) + img = trans_fn.center_crop(img, size) + buffer = BytesIO() + img.save(buffer, format="jpeg", quality=quality) + val = buffer.getvalue() + + return val + + +def resize_multiple( + img, sizes=(128, 256, 512, 1024), resample=Image.LANCZOS, quality=100 +): + imgs = [] + + for size in sizes: + imgs.append(resize_and_convert(img, size, resample, quality)) + + return imgs + + +def resize_worker(img_file, sizes, resample): + i, file = img_file + img = Image.open(file) + img = img.convert("RGB") + out = resize_multiple(img, sizes=sizes, resample=resample) + + return i, out + + +def prepare( + env, dataset, n_worker, sizes=(128, 256, 512, 1024), resample=Image.LANCZOS +): + resize_fn = partial(resize_worker, sizes=sizes, resample=resample) + + files = sorted(dataset.imgs, key=lambda x: x[0]) + files = [(i, file) for i, (file, label) in enumerate(files)] + total = 0 + + with multiprocessing.Pool(n_worker) as pool: + for i, imgs in tqdm(pool.imap_unordered(resize_fn, files)): + for size, img in zip(sizes, imgs): + key = f"{size}-{str(i).zfill(5)}".encode("utf-8") + + with env.begin(write=True) as txn: + txn.put(key, img) + + total += 1 + + with env.begin(write=True) as txn: + txn.put("length".encode("utf-8"), str(total).encode("utf-8")) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Preprocess images for model training") + parser.add_argument("--out", type=str, help="filename of the result lmdb dataset") + parser.add_argument( + "--size", + type=str, + default="128,256,512,1024", + help="resolutions of images for the dataset", + ) + parser.add_argument( + "--n_worker", + type=int, + default=8, + help="number of workers for preparing dataset", + ) + parser.add_argument( + "--resample", + type=str, + default="lanczos", + help="resampling methods for resizing images", + ) + parser.add_argument("path", type=str, help="path to the image dataset") + + args = parser.parse_args() + + if not os.path.exists(args.out): + os.makedirs(args.out) + + resample_map = {"lanczos": Image.LANCZOS, "bilinear": Image.BILINEAR} + resample = resample_map[args.resample] + + sizes = [int(s.strip()) for s in args.size.split(",")] + + print(f"Make dataset of image sizes:", ", ".join(str(s) for s in sizes)) + + imgset = datasets.ImageFolder(args.path) + + with lmdb.open(args.out, map_size=1024 ** 4, readahead=False) as env: + prepare(env, imgset, args.n_worker, sizes=sizes, resample=resample) diff --git a/vtoonify/model/stylegan/readme.md b/vtoonify/model/stylegan/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..c0f2bce780fe2d7a9239c944b165eee7bcdeb9cb --- /dev/null +++ b/vtoonify/model/stylegan/readme.md @@ -0,0 +1,7 @@ +# StyleGAN 2 in PyTorch + +Implementation of Analyzing and Improving the Image Quality of StyleGAN (https://arxiv.org/abs/1912.04958) in PyTorch + +Fork from [https://github.com/rosinality/stylegan2-pytorch](https://github.com/rosinality/stylegan2-pytorch) + +In VToonify, we modify it to accept z+ latent codes. diff --git a/vtoonify/model/vgg.py b/vtoonify/model/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..a1043d5bd8bdd0d1484d2270ae0d33c29495856c --- /dev/null +++ b/vtoonify/model/vgg.py @@ -0,0 +1,60 @@ +import torch +import torch.nn as nn +import torchvision + +# VGG architecter, used for the perceptual loss using a pretrained VGG network +class VGG19(torch.nn.Module): + def __init__(self, requires_grad=False): + super().__init__() + vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.slice6 = torch.nn.Sequential() + for x in range(2): + self.slice1.add_module(str(x), vgg_pretrained_features[x]) + for x in range(2, 7): + self.slice2.add_module(str(x), vgg_pretrained_features[x]) + for x in range(7, 12): + self.slice3.add_module(str(x), vgg_pretrained_features[x]) + for x in range(12, 21): + self.slice4.add_module(str(x), vgg_pretrained_features[x]) + for x in range(21, 32): + self.slice5.add_module(str(x), vgg_pretrained_features[x]) + for x in range(32, 36): + self.slice6.add_module(str(x), vgg_pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + self.pool = nn.AdaptiveAvgPool2d(output_size=1) + + self.mean = torch.tensor([0.485, 0.456, 0.406]).view(1,-1, 1, 1).cuda() * 2 - 1 + self.std = torch.tensor([0.229, 0.224, 0.225]).view(1,-1, 1, 1).cuda() * 2 + + def forward(self, X): # relui_1 + X = (X-self.mean)/self.std + h_relu1 = self.slice1(X) + h_relu2 = self.slice2(h_relu1) + h_relu3 = self.slice3(h_relu2) + h_relu4 = self.slice4(h_relu3) + h_relu5 = self.slice5[:-2](h_relu4) + out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] + return out + +# Perceptual loss that uses a pretrained VGG network +class VGGLoss(nn.Module): + def __init__(self): + super(VGGLoss, self).__init__() + self.vgg = VGG19().cuda() + self.criterion = nn.L1Loss() + self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0] + + def forward(self, x, y): + x_vgg, y_vgg = self.vgg(x), self.vgg(y) + loss = 0 + for i in range(len(x_vgg)): + loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()) + return loss \ No newline at end of file diff --git a/vtoonify/model/vtoonify.py b/vtoonify/model/vtoonify.py new file mode 100644 index 0000000000000000000000000000000000000000..6556a0a6c734be5f413f4683eb63c44f449c6af8 --- /dev/null +++ b/vtoonify/model/vtoonify.py @@ -0,0 +1,286 @@ +import torch +import numpy as np +import math +from torch import nn +from model.stylegan.model import ConvLayer, EqualLinear, Generator, ResBlock +from model.dualstylegan import AdaptiveInstanceNorm, AdaResBlock, DualStyleGAN +import torch.nn.functional as F + +# IC-GAN: stylegan discriminator +class ConditionalDiscriminator(nn.Module): + def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], use_condition=False, style_num=None): + super().__init__() + + channels = { + 4: 512, + 8: 512, + 16: 512, + 32: 512, + 64: 256 * channel_multiplier, + 128: 128 * channel_multiplier, + 256: 64 * channel_multiplier, + 512: 32 * channel_multiplier, + 1024: 16 * channel_multiplier, + } + + convs = [ConvLayer(3, channels[size], 1)] + + log_size = int(math.log(size, 2)) + + in_channel = channels[size] + + for i in range(log_size, 2, -1): + out_channel = channels[2 ** (i - 1)] + + convs.append(ResBlock(in_channel, out_channel, blur_kernel)) + + in_channel = out_channel + + self.convs = nn.Sequential(*convs) + + self.stddev_group = 4 + self.stddev_feat = 1 + self.use_condition = use_condition + + if self.use_condition: + self.condition_dim = 128 + # map style degree to 64-dimensional vector + self.label_mapper = nn.Sequential( + nn.Linear(1, 64), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Linear(64, 64), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Linear(64, self.condition_dim//2), + ) + # map style code index to 64-dimensional vector + self.style_mapper = nn.Embedding(style_num, self.condition_dim-self.condition_dim//2) + else: + self.condition_dim = 1 + + self.final_conv = ConvLayer(in_channel + 1, channels[4], 3) + self.final_linear = nn.Sequential( + EqualLinear(channels[4] * 4 * 4, channels[4], activation="fused_lrelu"), + EqualLinear(channels[4], self.condition_dim), + ) + + def forward(self, input, degree_label=None, style_ind=None): + out = self.convs(input) + + batch, channel, height, width = out.shape + group = min(batch, self.stddev_group) + stddev = out.view( + group, -1, self.stddev_feat, channel // self.stddev_feat, height, width + ) + stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) + stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) + stddev = stddev.repeat(group, 1, height, width) + out = torch.cat([out, stddev], 1) + + out = self.final_conv(out) + out = out.view(batch, -1) + + if self.use_condition: + h = self.final_linear(out) + condition = torch.cat((self.label_mapper(degree_label), self.style_mapper(style_ind)), dim=1) + out = (h * condition).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.condition_dim)) + else: + out = self.final_linear(out) + + return out + + +class VToonifyResBlock(nn.Module): + def __init__(self, fin): + super().__init__() + + self.conv = nn.Conv2d(fin, fin, 3, 1, 1) + self.conv2 = nn.Conv2d(fin, fin, 3, 1, 1) + self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + + def forward(self, x): + out = self.lrelu(self.conv(x)) + out = self.lrelu(self.conv2(out)) + out = (out + x) / math.sqrt(2) + return out + +class Fusion(nn.Module): + def __init__(self, in_channels, skip_channels, out_channels): + super().__init__() + + # create conv layers + self.conv = nn.Conv2d(in_channels + skip_channels, out_channels, 3, 1, 1, bias=True) + self.norm = AdaptiveInstanceNorm(in_channels + skip_channels, 128) + self.conv2 = nn.Conv2d(in_channels + skip_channels, 1, 3, 1, 1, bias=True) + #''' + self.linear = nn.Sequential( + nn.Linear(1, 64), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Linear(64, 128), + nn.LeakyReLU(negative_slope=0.2, inplace=True) + ) + + def forward(self, f_G, f_E, d_s=1): + # label of style degree + label = self.linear(torch.zeros(f_G.size(0),1).to(f_G.device) + d_s) + out = torch.cat([f_G, abs(f_G-f_E)], dim=1) + m_E = (F.relu(self.conv2(self.norm(out, label)))).tanh() + f_out = self.conv(torch.cat([f_G, f_E * m_E], dim=1)) + return f_out, m_E + +class VToonify(nn.Module): + def __init__(self, + in_size=256, + out_size=1024, + img_channels=3, + style_channels=512, + num_mlps=8, + channel_multiplier=2, + num_res_layers=6, + backbone = 'dualstylegan', + ): + + super().__init__() + + self.backbone = backbone + if self.backbone == 'dualstylegan': + # DualStyleGAN, with weights being fixed + self.generator = DualStyleGAN(out_size, style_channels, num_mlps, channel_multiplier) + else: + # StyleGANv2, with weights being fixed + self.generator = Generator(out_size, style_channels, num_mlps, channel_multiplier) + + self.in_size = in_size + self.style_channels = style_channels + channels = self.generator.channels + + # encoder + num_styles = int(np.log2(out_size)) * 2 - 2 + encoder_res = [2**i for i in range(int(np.log2(in_size)), 4, -1)] + self.encoder = nn.ModuleList() + self.encoder.append( + nn.Sequential( + nn.Conv2d(img_channels+19, 32, 3, 1, 1, bias=True), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Conv2d(32, channels[in_size], 3, 1, 1, bias=True), + nn.LeakyReLU(negative_slope=0.2, inplace=True))) + + for res in encoder_res: + in_channels = channels[res] + if res > 32: + out_channels = channels[res // 2] + block = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, 2, 1, bias=True), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=True), + nn.LeakyReLU(negative_slope=0.2, inplace=True)) + self.encoder.append(block) + else: + layers = [] + for _ in range(num_res_layers): + layers.append(VToonifyResBlock(in_channels)) + self.encoder.append(nn.Sequential(*layers)) + block = nn.Conv2d(in_channels, img_channels, 1, 1, 0, bias=True) + self.encoder.append(block) + + # trainable fusion module + self.fusion_out = nn.ModuleList() + self.fusion_skip = nn.ModuleList() + for res in encoder_res[::-1]: + num_channels = channels[res] + if self.backbone == 'dualstylegan': + self.fusion_out.append( + Fusion(num_channels, num_channels, num_channels)) + else: + self.fusion_out.append( + nn.Conv2d(num_channels * 2, num_channels, 3, 1, 1, bias=True)) + + self.fusion_skip.append( + nn.Conv2d(num_channels + 3, 3, 3, 1, 1, bias=True)) + + # Modified ModRes blocks in DualStyleGAN, with weights being fixed + if self.backbone == 'dualstylegan': + self.res = nn.ModuleList() + self.res.append(AdaResBlock(self.generator.channels[2 ** 2])) # for conv1, no use in this model + for i in range(3, 6): + out_channel = self.generator.channels[2 ** i] + self.res.append(AdaResBlock(out_channel, dilation=2**(5-i))) + self.res.append(AdaResBlock(out_channel, dilation=2**(5-i))) + + + def forward(self, x, style, d_s=None, return_mask=False, return_feat=False): + # map style to W+ space + if style is not None and style.ndim < 3: + if self.backbone == 'dualstylegan': + resstyles = self.generator.style(style).unsqueeze(1).repeat(1, self.generator.n_latent, 1) + adastyles = style.unsqueeze(1).repeat(1, self.generator.n_latent, 1) + elif style is not None: + nB, nL, nD = style.shape + if self.backbone == 'dualstylegan': + resstyles = self.generator.style(style.reshape(nB*nL, nD)).reshape(nB, nL, nD) + adastyles = style + if self.backbone == 'dualstylegan': + adastyles = adastyles.clone() + for i in range(7, self.generator.n_latent): + adastyles[:, i] = self.generator.res[i](adastyles[:, i]) + + # obtain multi-scale content features + feat = x + encoder_features = [] + # downsampling conv parts of E + for block in self.encoder[:-2]: + feat = block(feat) + encoder_features.append(feat) + encoder_features = encoder_features[::-1] + # Resblocks in E + for ii, block in enumerate(self.encoder[-2]): + feat = block(feat) + # adjust Resblocks with ModRes blocks + if self.backbone == 'dualstylegan': + feat = self.res[ii+1](feat, resstyles[:, ii+1], d_s) + # the last-layer feature of E (inputs of backbone) + out = feat + skip = self.encoder[-1](feat) + if return_feat: + return out, skip + + # 32x32 ---> higher res + _index = 1 + m_Es = [] + for conv1, conv2, to_rgb in zip( + self.stylegan().convs[6::2], self.stylegan().convs[7::2], self.stylegan().to_rgbs[3:]): + + # pass the mid-layer features of E to the corresponding resolution layers of G + if 2 ** (5+((_index-1)//2)) <= self.in_size: + fusion_index = (_index - 1) // 2 + f_E = encoder_features[fusion_index] + + if self.backbone == 'dualstylegan': + out, m_E = self.fusion_out[fusion_index](out, f_E, d_s) + skip = self.fusion_skip[fusion_index](torch.cat([skip, f_E*m_E], dim=1)) + m_Es += [m_E] + else: + out = self.fusion_out[fusion_index](torch.cat([out, f_E], dim=1)) + skip = self.fusion_skip[fusion_index](torch.cat([skip, f_E], dim=1)) + + # remove the noise input + batch, _, height, width = out.shape + noise = x.new_empty(batch, 1, height * 2, width * 2).normal_().detach() * 0.0 + + out = conv1(out, adastyles[:, _index+6], noise=noise) + out = conv2(out, adastyles[:, _index+7], noise=noise) + skip = to_rgb(out, adastyles[:, _index+8], skip) + _index += 2 + + image = skip + if return_mask and self.backbone == 'dualstylegan': + return image, m_Es + return image + + def stylegan(self): + if self.backbone == 'dualstylegan': + return self.generator.generator + else: + return self.generator + + def zplus2wplus(self, zplus): + return self.stylegan().style(zplus.reshape(zplus.shape[0]*zplus.shape[1], zplus.shape[2])).reshape(zplus.shape) \ No newline at end of file diff --git a/vtoonify/smooth_parsing_map.py b/vtoonify/smooth_parsing_map.py new file mode 100644 index 0000000000000000000000000000000000000000..7720d0c7786925db38d3e793d6a3a8f68f6e663e --- /dev/null +++ b/vtoonify/smooth_parsing_map.py @@ -0,0 +1,172 @@ +import os +#os.environ['CUDA_VISIBLE_DEVICES'] = "0" +import numpy as np +import cv2 +import math +import argparse +from tqdm import tqdm +import torch +from torch import nn +from torchvision import transforms +import torch.nn.functional as F +from model.raft.core.raft import RAFT +from model.raft.core.utils.utils import InputPadder +from model.bisenet.model import BiSeNet +from model.stylegan.model import Downsample + +class Options(): + def __init__(self): + + self.parser = argparse.ArgumentParser(description="Smooth Parsing Maps") + self.parser.add_argument("--window_size", type=int, default=5, help="temporal window size") + + self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model") + self.parser.add_argument("--raft_path", type=str, default='./checkpoint/raft-things.pth', help="path of the RAFT model") + + self.parser.add_argument("--video_path", type=str, help="path of the target video") + self.parser.add_argument("--output_path", type=str, default='./output/', help="path of the output parsing maps") + + def parse(self): + self.opt = self.parser.parse_args() + args = vars(self.opt) + print('Load options') + for name, value in sorted(args.items()): + print('%s: %s' % (str(name), str(value))) + return self.opt + +# from RAFT +def warp(x, flo): + """ + warp an image/tensor (im2) back to im1, according to the optical flow + x: [B, C, H, W] (im2) + flo: [B, 2, H, W] flow + """ + B, C, H, W = x.size() + # mesh grid + xx = torch.arange(0, W).view(1,-1).repeat(H,1) + yy = torch.arange(0, H).view(-1,1).repeat(1,W) + xx = xx.view(1,1,H,W).repeat(B,1,1,1) + yy = yy.view(1,1,H,W).repeat(B,1,1,1) + grid = torch.cat((xx,yy),1).float() + + + #x = x.cuda() + grid = grid.cuda() + vgrid = grid + flo # B,2,H,W + + # scale grid to [-1,1] + ##2019 code + vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:].clone()/max(W-1,1)-1.0 + vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:].clone()/max(H-1,1)-1.0 + + vgrid = vgrid.permute(0,2,3,1) + output = nn.functional.grid_sample(x, vgrid,align_corners=True) + mask = torch.autograd.Variable(torch.ones(x.size())).cuda() + mask = nn.functional.grid_sample(mask, vgrid,align_corners=True) + + ##2019 author + mask[mask<0.9999] = 0 + mask[mask>0] = 1 + + ##2019 code + # mask = torch.floor(torch.clamp(mask, 0 ,1)) + + return output*mask, mask + + +if __name__ == "__main__": + + parser = Options() + args = parser.parse() + print('*'*98) + + + device = "cuda" + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]), + ]) + + parser = argparse.ArgumentParser() + parser.add_argument('--model', help="restore checkpoint") + parser.add_argument('--small', action='store_true', help='use small model') + parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') + parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation') + + raft_model = torch.nn.DataParallel(RAFT(parser.parse_args(['--model', args.raft_path]))) + raft_model.load_state_dict(torch.load(args.raft_path)) + + raft_model = raft_model.module + raft_model.to(device) + raft_model.eval() + + parsingpredictor = BiSeNet(n_classes=19) + parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage)) + parsingpredictor.to(device).eval() + + down = Downsample(kernel=[1, 3, 3, 1], factor=2).to(device).eval() + + print('Load models successfully!') + + window = args.window_size + + video_cap = cv2.VideoCapture(args.video_path) + num = int(video_cap.get(7)) + + Is = [] + for i in range(num): + success, frame = video_cap.read() + if success == False: + break + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + with torch.no_grad(): + Is += [transform(frame).unsqueeze(dim=0).cpu()] + video_cap.release() + + # enlarge frames for more accurate parsing maps and optical flows + Is = F.upsample(torch.cat(Is, dim=0), scale_factor=2, mode='bilinear') + Is_ = torch.cat((Is[0:window], Is, Is[-window:]), dim=0) + + print('Load video with %d frames successfully!'%(len(Is))) + + Ps = [] + for i in tqdm(range(len(Is))): + with torch.no_grad(): + Ps += [parsingpredictor(2*Is[i:i+1].to(device))[0].detach().cpu()] + Ps = torch.cat(Ps, dim=0) + Ps_ = torch.cat((Ps[0:window], Ps, Ps[-window:]), dim=0) + + print('Predict parsing maps successfully!') + + + # temporal weights of the (2*args.window_size+1) frames + wt = torch.exp(-(torch.arange(2*window+1).float()-window)**2/(2*((window+0.5)**2))).reshape(2*window+1,1,1,1).to(device) + + parse = [] + for ii in tqdm(range(len(Is))): + i = ii + window + image2 = Is_[i-window:i+window+1].to(device) + image1 = Is_[i].repeat(2*window+1,1,1,1).to(device) + padder = InputPadder(image1.shape) + image1, image2 = padder.pad(image1, image2) + with torch.no_grad(): + flow_low, flow_up = raft_model((image1+1)*255.0/2, (image2+1)*255.0/2, iters=20, test_mode=True) + output, mask = warp(torch.cat((image2, Ps_[i-window:i+window+1].to(device)), dim=1), flow_up) + aligned_Is = output[:,0:3].detach() + aligned_Ps = output[:,3:].detach() + # the spatial weight + ws = torch.exp(-((aligned_Is-image1)**2).mean(dim=1, keepdims=True)/(2*(0.2**2))) * mask[:,0:1] + aligned_Ps[window] = Ps_[i].to(device) + # the weight between i and i shoud be 1.0 + ws[window,:,:,:] = 1.0 + weights = ws*wt + weights = weights / weights.sum(dim=(0), keepdims=True) + fused_Ps = (aligned_Ps * weights).sum(dim=0, keepdims=True) + parse += [down(fused_Ps).detach().cpu()] + parse = torch.cat(parse, dim=0) + + basename = os.path.basename(args.video_path).split('.')[0] + np.save(os.path.join(args.output_path, basename+'_parsingmap.npy'), parse.numpy()) + + print('Done!') \ No newline at end of file diff --git a/vtoonify/style_transfer.py b/vtoonify/style_transfer.py new file mode 100644 index 0000000000000000000000000000000000000000..3e6ba13ca84dc595dfa9eb9ef85a638889d8cdd3 --- /dev/null +++ b/vtoonify/style_transfer.py @@ -0,0 +1,232 @@ +import os +#os.environ['CUDA_VISIBLE_DEVICES'] = "0" +import argparse +import numpy as np +import cv2 +import dlib +import torch +from torchvision import transforms +import torch.nn.functional as F +from tqdm import tqdm +from model.vtoonify import VToonify +from model.bisenet.model import BiSeNet +from model.encoder.align_all_parallel import align_face +from util import save_image, load_image, visualize, load_psp_standalone, get_video_crop_parameter, tensor2cv2 + + +class TestOptions(): + def __init__(self): + + self.parser = argparse.ArgumentParser(description="Style Transfer") + self.parser.add_argument("--content", type=str, default='./data/077436.jpg', help="path of the content image/video") + self.parser.add_argument("--style_id", type=int, default=26, help="the id of the style image") + self.parser.add_argument("--style_degree", type=float, default=0.5, help="style degree for VToonify-D") + self.parser.add_argument("--color_transfer", action="store_true", help="transfer the color of the style") + self.parser.add_argument("--ckpt", type=str, default='./checkpoint/vtoonify_d_cartoon/vtoonify_s_d.pt', help="path of the saved model") + self.parser.add_argument("--output_path", type=str, default='./output/', help="path of the output images") + self.parser.add_argument("--scale_image", action="store_true", help="resize and crop the image to best fit the model") + self.parser.add_argument("--style_encoder_path", type=str, default='./checkpoint/encoder.pt', help="path of the style encoder") + self.parser.add_argument("--exstyle_path", type=str, default=None, help="path of the extrinsic style code") + self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model") + self.parser.add_argument("--video", action="store_true", help="if true, video stylization; if false, image stylization") + self.parser.add_argument("--cpu", action="store_true", help="if true, only use cpu") + self.parser.add_argument("--backbone", type=str, default='dualstylegan', help="dualstylegan | toonify") + self.parser.add_argument("--padding", type=int, nargs=4, default=[200,200,200,200], help="left, right, top, bottom paddings to the face center") + self.parser.add_argument("--batch_size", type=int, default=4, help="batch size of frames when processing video") + self.parser.add_argument("--parsing_map_path", type=str, default=None, help="path of the refined parsing map of the target video") + + def parse(self): + self.opt = self.parser.parse_args() + if self.opt.exstyle_path is None: + self.opt.exstyle_path = os.path.join(os.path.dirname(self.opt.ckpt), 'exstyle_code.npy') + args = vars(self.opt) + print('Load options') + for name, value in sorted(args.items()): + print('%s: %s' % (str(name), str(value))) + return self.opt + +if __name__ == "__main__": + + parser = TestOptions() + args = parser.parse() + print('*'*98) + + + device = "cpu" if args.cpu else "cuda" + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]), + ]) + + vtoonify = VToonify(backbone = args.backbone) + vtoonify.load_state_dict(torch.load(args.ckpt, map_location=lambda storage, loc: storage)['g_ema']) + vtoonify.to(device) + + parsingpredictor = BiSeNet(n_classes=19) + parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage)) + parsingpredictor.to(device).eval() + + modelname = './checkpoint/shape_predictor_68_face_landmarks.dat' + if not os.path.exists(modelname): + import wget, bz2 + wget.download('http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2', modelname+'.bz2') + zipfile = bz2.BZ2File(modelname+'.bz2') + data = zipfile.read() + open(modelname, 'wb').write(data) + landmarkpredictor = dlib.shape_predictor(modelname) + + pspencoder = load_psp_standalone(args.style_encoder_path, device) + + if args.backbone == 'dualstylegan': + exstyles = np.load(args.exstyle_path, allow_pickle='TRUE').item() + stylename = list(exstyles.keys())[args.style_id] + exstyle = torch.tensor(exstyles[stylename]).to(device) + with torch.no_grad(): + exstyle = vtoonify.zplus2wplus(exstyle) + + if args.video and args.parsing_map_path is not None: + x_p_hat = torch.tensor(np.load(args.parsing_map_path)) + + print('Load models successfully!') + + + filename = args.content + basename = os.path.basename(filename).split('.')[0] + scale = 1 + kernel_1d = np.array([[0.125],[0.375],[0.375],[0.125]]) + print('Processing ' + os.path.basename(filename) + ' with vtoonify_' + args.backbone[0]) + if args.video: + cropname = os.path.join(args.output_path, basename + '_input.mp4') + savename = os.path.join(args.output_path, basename + '_vtoonify_' + args.backbone[0] + '.mp4') + + video_cap = cv2.VideoCapture(filename) + num = int(video_cap.get(7)) + + first_valid_frame = True + batch_frames = [] + for i in tqdm(range(num)): + success, frame = video_cap.read() + if success == False: + assert('load video frames error') + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + # We proprocess the video by detecting the face in the first frame, + # and resizing the frame so that the eye distance is 64 pixels. + # Centered on the eyes, we crop the first frame to almost 400x400 (based on args.padding). + # All other frames use the same resizing and cropping parameters as the first frame. + if first_valid_frame: + if args.scale_image: + paras = get_video_crop_parameter(frame, landmarkpredictor, args.padding) + if paras is None: + continue + h,w,top,bottom,left,right,scale = paras + H, W = int(bottom-top), int(right-left) + # for HR video, we apply gaussian blur to the frames to avoid flickers caused by bilinear downsampling + # this can also prevent over-sharp stylization results. + if scale <= 0.75: + frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) + if scale <= 0.375: + frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) + frame = cv2.resize(frame, (w, h))[top:bottom, left:right] + else: + H, W = frame.shape[0], frame.shape[1] + + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + videoWriter = cv2.VideoWriter(cropname, fourcc, video_cap.get(5), (W, H)) + videoWriter2 = cv2.VideoWriter(savename, fourcc, video_cap.get(5), (4*W, 4*H)) + + # For each video, we detect and align the face in the first frame for pSp to obtain the style code. + # This style code is used for all other frames. + with torch.no_grad(): + I = align_face(frame, landmarkpredictor) + I = transform(I).unsqueeze(dim=0).to(device) + s_w = pspencoder(I) + s_w = vtoonify.zplus2wplus(s_w) + if vtoonify.backbone == 'dualstylegan': + if args.color_transfer: + s_w = exstyle + else: + s_w[:,:7] = exstyle[:,:7] + first_valid_frame = False + elif args.scale_image: + if scale <= 0.75: + frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) + if scale <= 0.375: + frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) + frame = cv2.resize(frame, (w, h))[top:bottom, left:right] + + videoWriter.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) + + batch_frames += [transform(frame).unsqueeze(dim=0).to(device)] + + if len(batch_frames) == args.batch_size or (i+1) == num: + x = torch.cat(batch_frames, dim=0) + batch_frames = [] + with torch.no_grad(): + # parsing network works best on 512x512 images, so we predict parsing maps on upsmapled frames + # followed by downsampling the parsing maps + if args.video and args.parsing_map_path is not None: + x_p = x_p_hat[i+1-x.size(0):i+1].to(device) + else: + x_p = F.interpolate(parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0], + scale_factor=0.5, recompute_scale_factor=False).detach() + # we give parsing maps lower weight (1/16) + inputs = torch.cat((x, x_p/16.), dim=1) + # d_s has no effect when backbone is toonify + y_tilde = vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = args.style_degree) + y_tilde = torch.clamp(y_tilde, -1, 1) + for k in range(y_tilde.size(0)): + videoWriter2.write(tensor2cv2(y_tilde[k].cpu())) + + videoWriter.release() + videoWriter2.release() + video_cap.release() + + + else: + cropname = os.path.join(args.output_path, basename + '_input.jpg') + savename = os.path.join(args.output_path, basename + '_vtoonify_' + args.backbone[0] + '.jpg') + + frame = cv2.imread(filename) + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + + # We detect the face in the image, and resize the image so that the eye distance is 64 pixels. + # Centered on the eyes, we crop the image to almost 400x400 (based on args.padding). + if args.scale_image: + paras = get_video_crop_parameter(frame, landmarkpredictor, args.padding) + if paras is not None: + h,w,top,bottom,left,right,scale = paras + H, W = int(bottom-top), int(right-left) + # for HR image, we apply gaussian blur to it to avoid over-sharp stylization results + if scale <= 0.75: + frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) + if scale <= 0.375: + frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) + frame = cv2.resize(frame, (w, h))[top:bottom, left:right] + + with torch.no_grad(): + I = align_face(frame, landmarkpredictor) + I = transform(I).unsqueeze(dim=0).to(device) + s_w = pspencoder(I) + s_w = vtoonify.zplus2wplus(s_w) + if vtoonify.backbone == 'dualstylegan': + if args.color_transfer: + s_w = exstyle + else: + s_w[:,:7] = exstyle[:,:7] + + x = transform(frame).unsqueeze(dim=0).to(device) + # parsing network works best on 512x512 images, so we predict parsing maps on upsmapled frames + # followed by downsampling the parsing maps + x_p = F.interpolate(parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0], + scale_factor=0.5, recompute_scale_factor=False).detach() + # we give parsing maps lower weight (1/16) + inputs = torch.cat((x, x_p/16.), dim=1) + # d_s has no effect when backbone is toonify + y_tilde = vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = args.style_degree) + y_tilde = torch.clamp(y_tilde, -1, 1) + + cv2.imwrite(cropname, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) + save_image(y_tilde[0].cpu(), savename) + + print('Transfer style successfully!') \ No newline at end of file diff --git a/vtoonify/train_vtoonify_d.py b/vtoonify/train_vtoonify_d.py new file mode 100644 index 0000000000000000000000000000000000000000..0c83e02d46097dad72b5e9f8ed239299d9da320a --- /dev/null +++ b/vtoonify/train_vtoonify_d.py @@ -0,0 +1,515 @@ +import os +#os.environ['CUDA_VISIBLE_DEVICES'] = "0" +import argparse +import math +import random + +import numpy as np +import torch +from torch import nn, optim +from torch.nn import functional as F +from torch.utils import data +import torch.distributed as dist +from torchvision import transforms, utils +from tqdm import tqdm +from PIL import Image +from util import * + +from model.stylegan import lpips +from model.stylegan.model import Generator, Downsample +from model.vtoonify import VToonify, ConditionalDiscriminator +from model.bisenet.model import BiSeNet +from model.simple_augment import random_apply_affine +from model.stylegan.distributed import ( + get_rank, + synchronize, + reduce_loss_dict, + reduce_sum, + get_world_size, +) + +class TrainOptions(): + def __init__(self): + + self.parser = argparse.ArgumentParser(description="Train VToonify-D") + self.parser.add_argument("--iter", type=int, default=2000, help="total training iterations") + self.parser.add_argument("--batch", type=int, default=8, help="batch sizes for each gpus") + self.parser.add_argument("--lr", type=float, default=0.0001, help="learning rate") + self.parser.add_argument("--local_rank", type=int, default=0, help="local rank for distributed training") + self.parser.add_argument("--start_iter", type=int, default=0, help="start iteration") + self.parser.add_argument("--save_every", type=int, default=30000, help="interval of saving a checkpoint") + self.parser.add_argument("--save_begin", type=int, default=30000, help="when to start saving a checkpoint") + self.parser.add_argument("--log_every", type=int, default=200, help="interval of saving a checkpoint") + + self.parser.add_argument("--adv_loss", type=float, default=0.01, help="the weight of adv loss") + self.parser.add_argument("--grec_loss", type=float, default=0.1, help="the weight of mse recontruction loss") + self.parser.add_argument("--perc_loss", type=float, default=0.01, help="the weight of perceptual loss") + self.parser.add_argument("--tmp_loss", type=float, default=1.0, help="the weight of temporal consistency loss") + self.parser.add_argument("--msk_loss", type=float, default=0.0005, help="the weight of attention mask loss") + + self.parser.add_argument("--fix_degree", action="store_true", help="use a fixed style degree") + self.parser.add_argument("--fix_style", action="store_true", help="use a fixed style image") + self.parser.add_argument("--fix_color", action="store_true", help="use the original color (no color transfer)") + self.parser.add_argument("--exstyle_path", type=str, default='./checkpoint/cartoon/refined_exstyle_code.npy', help="path of the extrinsic style code") + self.parser.add_argument("--style_id", type=int, default=26, help="the id of the style image") + self.parser.add_argument("--style_degree", type=float, default=0.5, help="style degree for VToonify-D") + + self.parser.add_argument("--encoder_path", type=str, default=None, help="path to the pretrained encoder model") + self.parser.add_argument("--direction_path", type=str, default='./checkpoint/directions.npy', help="path to the editing direction latents") + self.parser.add_argument("--stylegan_path", type=str, default='./checkpoint/cartoon/generator.pt', help="path to the stylegan model") + self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model") + self.parser.add_argument("--style_encoder_path", type=str, default='./checkpoint/encoder.pt', help="path of the style encoder") + + self.parser.add_argument("--name", type=str, default='vtoonify_d_cartoon', help="saved model name") + self.parser.add_argument("--pretrain", action="store_true", help="if true, only pretrain the encoder") + + def parse(self): + self.opt = self.parser.parse_args() + if self.opt.encoder_path is None: + self.opt.encoder_path = os.path.join('./checkpoint/', self.opt.name, 'pretrain.pt') + args = vars(self.opt) + if self.opt.local_rank == 0: + print('Load options') + for name, value in sorted(args.items()): + print('%s: %s' % (str(name), str(value))) + return self.opt + + +# pretrain E of vtoonify. +# We train E so that its the last-layer feature matches the original 8-th-layer input feature of G1 +# See Model initialization in Sec. 4.2.2 for the detail +def pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, styles, device): + pbar = range(args.iter) + + if get_rank() == 0: + pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01) + + recon_loss = torch.tensor(0.0, device=device) + loss_dict = {} + + if args.distributed: + g_module = generator.module + else: + g_module = generator + + accum = 0.5 ** (32 / (10 * 1000)) + + requires_grad(g_module.encoder, True) + + for idx in pbar: + i = idx + args.start_iter + + if i > args.iter: + print("Done!") + break + + # during pretraining, the last 11 layers of DualStyleGAN (for color transfer) is not used. + # so args.fix_color is not used. the last 11 elements in weight are not used. + if args.fix_degree: + d_s = args.style_degree + else: + d_s = 0 if i <= args.iter / 4.0 else np.random.rand(1)[0] + weight = [d_s] * 18 + + # sample pre-saved w''=E_s(s) + if args.fix_style: + style = styles[args.style_id:args.style_id+1].repeat(args.batch,1,1) + else: + style = styles[torch.randint(0, styles.size(0), (args.batch,))] + + with torch.no_grad(): + # during pretraining, no geometric transformations are applied. + noise_sample = torch.randn(args.batch, 512).cuda() + ws_ = g_ema.stylegan().style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w + ws_[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n + img_gen, _ = g_ema.stylegan()([ws_], input_is_latent=True, truncation=0.5, truncation_latent=0) + img_gen = torch.clamp(img_gen, -1, 1).detach() # x'' + img_gen512 = down(img_gen.detach()) + img_gen256 = down(img_gen512.detach()) # image part of x''_down + mask512 = parsingpredictor(2*torch.clamp(img_gen512, -1, 1))[0] + real_input = torch.cat((img_gen256, down(mask512)/16.0), dim=1) # x''_down + # f_G1^(8)(w', w'', d_s) + real_feat, real_skip = g_ema.generator([ws_], style, input_is_latent=True, return_feat=True, + truncation=0.5, truncation_latent=0, use_res=True, interp_weights=weight) + + real_input = real_input.detach() + real_feat = real_feat.detach() + real_skip = real_skip.detach() + + # f_E^(last)(x''_down, w'', d_s) + fake_feat, fake_skip = generator(real_input, style, d_s, return_feat=True) + + # L_E in Eq.(8) + recon_loss = F.mse_loss(fake_feat, real_feat) + F.mse_loss(fake_skip, real_skip) + + loss_dict["emse"] = recon_loss + + generator.zero_grad() + recon_loss.backward() + g_optim.step() + + accumulate(g_ema.encoder, g_module.encoder, accum) + + loss_reduced = reduce_loss_dict(loss_dict) + + emse_loss_val = loss_reduced["emse"].mean().item() + + if get_rank() == 0: + pbar.set_description( + ( + f"iter: {i:d}; emse: {emse_loss_val:.3f}" + ) + ) + + if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter: + if (i+1) == args.iter: + savename = f"checkpoint/%s/pretrain.pt"%(args.name) + else: + savename = f"checkpoint/%s/pretrain-%05d.pt"%(args.name, i+1) + torch.save( + { + #"g": g_module.encoder.state_dict(), + "g_ema": g_ema.encoder.state_dict(), + }, + savename, + ) + + +# generate paired data and train vtoonify, see Sec. 4.2.2 for the detail +def train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, styles, device): + pbar = range(args.iter) + + if get_rank() == 0: + pbar = tqdm(pbar, initial=args.start_iter, smoothing=0.01, ncols=130, dynamic_ncols=False) + + d_loss = torch.tensor(0.0, device=device) + g_loss = torch.tensor(0.0, device=device) + grec_loss = torch.tensor(0.0, device=device) + gfeat_loss = torch.tensor(0.0, device=device) + temporal_loss = torch.tensor(0.0, device=device) + gmask_loss = torch.tensor(0.0, device=device) + loss_dict = {} + + surffix = '_s' + if args.fix_style: + surffix += '%03d'%(args.style_id) + surffix += '_d' + if args.fix_degree: + surffix += '%1.1f'%(args.style_degree) + if not args.fix_color: + surffix += '_c' + + if args.distributed: + g_module = generator.module + d_module = discriminator.module + + else: + g_module = generator + d_module = discriminator + + accum = 0.5 ** (32 / (10 * 1000)) + + for idx in pbar: + i = idx + args.start_iter + + if i > args.iter: + print("Done!") + break + + # sample style degree + if args.fix_degree or idx == 0 or i == 0: + d_s = args.style_degree + else: + d_s = np.random.randint(0,6) / 5.0 + if args.fix_color: + weight = [d_s] * 7 + [0] * 11 + else: + weight = [d_s] * 7 + [1] * 11 + # style degree condition for discriminator + degree_label = torch.zeros(args.batch, 1).to(device) + d_s + + # style index condition for discriminator + style_ind = torch.randint(0, styles.size(0), (args.batch,)) + if args.fix_style or idx == 0 or i == 0: + style_ind = style_ind * 0 + args.style_id + # sample pre-saved E_s(s) + style = styles[style_ind] + + with torch.no_grad(): + noise_sample = torch.randn(args.batch, 512).cuda() + wc = g_ema.stylegan().style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w + wc[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n + wc = wc.detach() + xc, _ = g_ema.stylegan()([wc], input_is_latent=True, truncation=0.5, truncation_latent=0) + xc = torch.clamp(xc, -1, 1).detach() # x'' + if not args.fix_color and args.fix_style: # only transfer this fixed style's color + xl = style.clone() + else: + xl = pspencoder(F.adaptive_avg_pool2d(xc, 256)) + xl = g_ema.zplus2wplus(xl) # E_s(x''_down) + xl = torch.cat((style[:,0:7], xl[:,7:18]), dim=1).detach() # w'' = concatenate E_s(s) and E_s(x''_down) + xs, _ = g_ema.generator([wc], xl, input_is_latent=True, + truncation=0.5, truncation_latent=0, use_res=True, interp_weights=weight) + xs = torch.clamp(xs, -1, 1).detach() # y'=G1(w', w'', d_s, d_c) + # apply color jitter to w'. we fuse w' of the current iteration with w' of the last iteration + if idx > 0 and i >= (args.iter/2.0) and (not args.fix_color and not args.fix_style): + wcfuse = wc.clone() + wcfuse[:,7:] = wc_[:,7:] * (i/(args.iter/2.0)-1) + wcfuse[:,7:] * (2-i/(args.iter/2.0)) + xc, _ = g_ema.stylegan()([wcfuse], input_is_latent=True, truncation=0.5, truncation_latent=0) + xc = torch.clamp(xc, -1, 1).detach() # x' + wc_ = wc.clone() # wc_ is the w' in the last iteration + # during training, random geometric transformations are applied. + imgs, _ = random_apply_affine(torch.cat((xc.detach(),xs), dim=1), 0.2, None) + real_input1024 = imgs[:,0:3].detach() # image part of x + real_input512 = down(real_input1024).detach() + real_input256 = down(real_input512).detach() + mask512 = parsingpredictor(2*real_input512)[0] + mask256 = down(mask512).detach() + mask = F.adaptive_avg_pool2d(mask512, 1024).detach() # parsing part of x + real_output = imgs[:,3:].detach() # y + real_input = torch.cat((real_input256, mask256/16.0), dim=1) # x_down + # for log, sample a fixed input-output pair (x_down, y, w'', d_s) + if idx == 0 or i == 0: + samplein = real_input.clone().detach() + sampleout = real_output.clone().detach() + samplexl = xl.clone().detach() + sampleds = d_s + + ###### This part is for training discriminator + + requires_grad(g_module.encoder, False) + requires_grad(g_module.fusion_out, False) + requires_grad(g_module.fusion_skip, False) + requires_grad(discriminator, True) + + fake_output = generator(real_input, xl, d_s) + fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256), degree_label, style_ind) + real_pred = discriminator(F.adaptive_avg_pool2d(real_output, 256), degree_label, style_ind) + + # L_adv in Eq.(3) + d_loss = d_logistic_loss(real_pred, fake_pred) * args.adv_loss + loss_dict["d"] = d_loss + + discriminator.zero_grad() + d_loss.backward() + d_optim.step() + + ###### This part is for training generator (encoder and fusion modules) + + requires_grad(g_module.encoder, True) + requires_grad(g_module.fusion_out, True) + requires_grad(g_module.fusion_skip, True) + requires_grad(discriminator, False) + + fake_output, m_Es = generator(real_input, xl, d_s, return_mask=True) + fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256), degree_label, style_ind) + + # L_adv in Eq.(3) + g_loss = g_nonsaturating_loss(fake_pred) * args.adv_loss + # L_rec in Eq.(2) + grec_loss = F.mse_loss(fake_output, real_output) * args.grec_loss + gfeat_loss = percept(F.adaptive_avg_pool2d(fake_output, 512), # 1024 will out of memory + F.adaptive_avg_pool2d(real_output, 512)).sum() * args.perc_loss # 256 will get blurry output + + # L_msk in Eq.(9) + gmask_loss = torch.tensor(0.0, device=device) + if not args.fix_degree or args.msk_loss > 0: + for jj, m_E in enumerate(m_Es): + gd_s = (1 - d_s) ** 2 * 0.9 + 0.1 + gmask_loss += F.relu(torch.mean(m_E)-gd_s) * args.msk_loss + + loss_dict["g"] = g_loss + loss_dict["gr"] = grec_loss + loss_dict["gf"] = gfeat_loss + loss_dict["msk"] = gmask_loss + + w = random.randint(0,1024-896) + h = random.randint(0,1024-896) + crop_input = torch.cat((real_input1024[:,:,w:w+896,h:h+896], mask[:,:,w:w+896,h:h+896]/16.0), dim=1).detach() + crop_input = down(down(crop_input)) + crop_fake_output = fake_output[:,:,w:w+896,h:h+896] + fake_crop_output = generator(crop_input, xl, d_s) + # L_tmp in Eq.(4), gradually increase the weight of L_tmp + temporal_loss = ((fake_crop_output-crop_fake_output)**2).mean() * max(idx/(args.iter/2.0)-1, 0) * args.tmp_loss + loss_dict["tp"] = temporal_loss + + generator.zero_grad() + (g_loss + grec_loss + gfeat_loss + temporal_loss + gmask_loss).backward() + g_optim.step() + + accumulate(g_ema.encoder, g_module.encoder, accum) + accumulate(g_ema.fusion_out, g_module.fusion_out, accum) + accumulate(g_ema.fusion_skip, g_module.fusion_skip, accum) + + loss_reduced = reduce_loss_dict(loss_dict) + + d_loss_val = loss_reduced["d"].mean().item() + g_loss_val = loss_reduced["g"].mean().item() + gr_loss_val = loss_reduced["gr"].mean().item() + gf_loss_val = loss_reduced["gf"].mean().item() + tmp_loss_val = loss_reduced["tp"].mean().item() + msk_loss_val = loss_reduced["msk"].mean().item() + + if get_rank() == 0: + pbar.set_description( + ( + f"iter: {i:d}; advd: {d_loss_val:.3f}; advg: {g_loss_val:.3f}; mse: {gr_loss_val:.3f}; " + f"perc: {gf_loss_val:.3f}; tmp: {tmp_loss_val:.3f}; msk: {msk_loss_val:.3f}" + ) + ) + + if i == 0 or (i+1) % args.log_every == 0 or (i+1) == args.iter: + with torch.no_grad(): + g_ema.eval() + sample1 = g_ema(samplein, samplexl, sampleds) + if args.fix_degree: + sample = F.interpolate(torch.cat((sampleout, sample1), dim=0), 256) + else: + sample2 = g_ema(samplein, samplexl, d_s) + sample = F.interpolate(torch.cat((sampleout, sample1, sample2), dim=0), 256) + utils.save_image( + sample, + f"log/%s/%05d.jpg"%(args.name, (i+1)), + nrow=int(args.batch), + normalize=True, + range=(-1, 1), + ) + + if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter: + if (i+1) == args.iter: + savename = f"checkpoint/%s/vtoonify%s.pt"%(args.name, surffix) + else: + savename = f"checkpoint/%s/vtoonify%s_%05d.pt"%(args.name, surffix, i+1) + torch.save( + { + #"g": g_module.state_dict(), + #"d": d_module.state_dict(), + "g_ema": g_ema.state_dict(), + }, + savename, + ) + + + +if __name__ == "__main__": + + device = "cuda" + parser = TrainOptions() + args = parser.parse() + if args.local_rank == 0: + print('*'*98) + if not os.path.exists("log/%s/"%(args.name)): + os.makedirs("log/%s/"%(args.name)) + if not os.path.exists("checkpoint/%s/"%(args.name)): + os.makedirs("checkpoint/%s/"%(args.name)) + + n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 + args.distributed = n_gpu > 1 + + if args.distributed: + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend="nccl", init_method="env://") + synchronize() + + generator = VToonify(backbone = 'dualstylegan').to(device) + generator.apply(weights_init) + g_ema = VToonify(backbone = 'dualstylegan').to(device) + g_ema.eval() + + ckpt = torch.load(args.stylegan_path, map_location=lambda storage, loc: storage) + generator.generator.load_state_dict(ckpt["g_ema"], strict=False) + # load ModRes blocks of DualStyleGAN into the modified ModRes blocks (with dilation) + generator.res.load_state_dict(generator.generator.res.state_dict(), strict=False) + g_ema.generator.load_state_dict(ckpt["g_ema"], strict=False) + g_ema.res.load_state_dict(g_ema.generator.res.state_dict(), strict=False) + requires_grad(generator.generator, False) + requires_grad(generator.res, False) + requires_grad(g_ema.generator, False) + requires_grad(g_ema.res, False) + + if not args.pretrain: + generator.encoder.load_state_dict(torch.load(args.encoder_path, map_location=lambda storage, loc: storage)["g_ema"]) + # we initialize the fusion modules to map f_G \otimes f_E to f_G. + for k in generator.fusion_out: + k.conv.weight.data *= 0.01 + k.conv.weight[:,0:k.conv.weight.shape[0],1,1].data += torch.eye(k.conv.weight.shape[0]).cuda() + for k in generator.fusion_skip: + k.weight.data *= 0.01 + k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda() + + accumulate(g_ema.encoder, generator.encoder, 0) + accumulate(g_ema.fusion_out, generator.fusion_out, 0) + accumulate(g_ema.fusion_skip, generator.fusion_skip, 0) + + g_parameters = list(generator.encoder.parameters()) + if not args.pretrain: + g_parameters = g_parameters + list(generator.fusion_out.parameters()) + list(generator.fusion_skip.parameters()) + + g_optim = optim.Adam( + g_parameters, + lr=args.lr, + betas=(0.9, 0.99), + ) + + if args.distributed: + generator = nn.parallel.DistributedDataParallel( + generator, + device_ids=[args.local_rank], + output_device=args.local_rank, + broadcast_buffers=False, + find_unused_parameters=True, + ) + + parsingpredictor = BiSeNet(n_classes=19) + parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage)) + parsingpredictor.to(device).eval() + requires_grad(parsingpredictor, False) + + # we apply gaussian blur to the images to avoid flickers caused during downsampling + down = Downsample(kernel=[1, 3, 3, 1], factor=2).to(device) + requires_grad(down, False) + + directions = torch.tensor(np.load(args.direction_path)).to(device) + + # load style codes of DualStyleGAN + exstyles = np.load(args.exstyle_path, allow_pickle='TRUE').item() + if args.local_rank == 0 and not os.path.exists('checkpoint/%s/exstyle_code.npy'%(args.name)): + np.save('checkpoint/%s/exstyle_code.npy'%(args.name), exstyles, allow_pickle=True) + styles = [] + with torch.no_grad(): + for stylename in exstyles.keys(): + exstyle = torch.tensor(exstyles[stylename]).to(device) + exstyle = g_ema.zplus2wplus(exstyle) + styles += [exstyle] + styles = torch.cat(styles, dim=0) + + if not args.pretrain: + discriminator = ConditionalDiscriminator(256, use_condition=True, style_num = styles.size(0)).to(device) + + d_optim = optim.Adam( + discriminator.parameters(), + lr=args.lr, + betas=(0.9, 0.99), + ) + + if args.distributed: + discriminator = nn.parallel.DistributedDataParallel( + discriminator, + device_ids=[args.local_rank], + output_device=args.local_rank, + broadcast_buffers=False, + find_unused_parameters=True, + ) + + percept = lpips.PerceptualLoss(model="net-lin", net="vgg", use_gpu=device.startswith("cuda"), gpu_ids=[args.local_rank]) + requires_grad(percept.model.net, False) + + pspencoder = load_psp_standalone(args.style_encoder_path, device) + + if args.local_rank == 0: + print('Load models and data successfully loaded!') + + if args.pretrain: + pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, styles, device) + else: + train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, styles, device) diff --git a/vtoonify/train_vtoonify_t.py b/vtoonify/train_vtoonify_t.py new file mode 100644 index 0000000000000000000000000000000000000000..147d5f38a5b25822ab05f089173cd96c6aa22c12 --- /dev/null +++ b/vtoonify/train_vtoonify_t.py @@ -0,0 +1,432 @@ +import os +#os.environ['CUDA_VISIBLE_DEVICES'] = "0" +import argparse +import math +import random + +import numpy as np +import torch +from torch import nn, optim +from torch.nn import functional as F +from torch.utils import data +import torch.distributed as dist +from torchvision import transforms, utils +from tqdm import tqdm +from PIL import Image +from util import * +from model.stylegan import lpips +from model.stylegan.model import Generator, Downsample +from model.vtoonify import VToonify, ConditionalDiscriminator +from model.bisenet.model import BiSeNet +from model.simple_augment import random_apply_affine +from model.stylegan.distributed import ( + get_rank, + synchronize, + reduce_loss_dict, + reduce_sum, + get_world_size, +) + +# In the paper, --weight for each style is set as follows, +# cartoon: default +# caricature: default +# pixar: 1 1 1 1 1 1 1 1 1 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 +# comic: 0.5 0.5 0.5 0.5 0.5 0.5 0.5 1 1 1 1 1 1 1 1 1 1 1 +# arcane: 0.5 0.5 0.5 0.5 0.5 0.5 0.5 1 1 1 1 1 1 1 1 1 1 1 + +class TrainOptions(): + def __init__(self): + + self.parser = argparse.ArgumentParser(description="Train VToonify-T") + self.parser.add_argument("--iter", type=int, default=2000, help="total training iterations") + self.parser.add_argument("--batch", type=int, default=8, help="batch sizes for each gpus") + self.parser.add_argument("--lr", type=float, default=0.0001, help="learning rate") + self.parser.add_argument("--local_rank", type=int, default=0, help="local rank for distributed training") + self.parser.add_argument("--start_iter", type=int, default=0, help="start iteration") + self.parser.add_argument("--save_every", type=int, default=30000, help="interval of saving a checkpoint") + self.parser.add_argument("--save_begin", type=int, default=30000, help="when to start saving a checkpoint") + self.parser.add_argument("--log_every", type=int, default=200, help="interval of saving an intermediate image result") + + self.parser.add_argument("--adv_loss", type=float, default=0.01, help="the weight of adv loss") + self.parser.add_argument("--grec_loss", type=float, default=0.1, help="the weight of mse recontruction loss") + self.parser.add_argument("--perc_loss", type=float, default=0.01, help="the weight of perceptual loss") + self.parser.add_argument("--tmp_loss", type=float, default=1.0, help="the weight of temporal consistency loss") + + self.parser.add_argument("--encoder_path", type=str, default=None, help="path to the pretrained encoder model") + self.parser.add_argument("--direction_path", type=str, default='./checkpoint/directions.npy', help="path to the editing direction latents") + self.parser.add_argument("--stylegan_path", type=str, default='./checkpoint/stylegan2-ffhq-config-f.pt', help="path to the stylegan model") + self.parser.add_argument("--finetunegan_path", type=str, default='./checkpoint/cartoon/finetune-000600.pt', help="path to the finetuned stylegan model") + self.parser.add_argument("--weight", type=float, nargs=18, default=[1]*9+[0]*9, help="the weight for blending two models") + self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model") + self.parser.add_argument("--style_encoder_path", type=str, default='./checkpoint/encoder.pt', help="path of the style encoder") + + self.parser.add_argument("--name", type=str, default='vtoonify_t_cartoon', help="saved model name") + self.parser.add_argument("--pretrain", action="store_true", help="if true, only pretrain the encoder") + + def parse(self): + self.opt = self.parser.parse_args() + if self.opt.encoder_path is None: + self.opt.encoder_path = os.path.join('./checkpoint/', self.opt.name, 'pretrain.pt') + args = vars(self.opt) + if self.opt.local_rank == 0: + print('Load options') + for name, value in sorted(args.items()): + print('%s: %s' % (str(name), str(value))) + return self.opt + + +# pretrain E of vtoonify. +# We train E so that its the last-layer feature matches the original 8-th-layer input feature of G1 +# See Model initialization in Sec. 4.1.2 for the detail +def pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, basemodel, device): + pbar = range(args.iter) + + if get_rank() == 0: + pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01) + + recon_loss = torch.tensor(0.0, device=device) + loss_dict = {} + + if args.distributed: + g_module = generator.module + else: + g_module = generator + + accum = 0.5 ** (32 / (10 * 1000)) + + requires_grad(g_module.encoder, True) + + for idx in pbar: + i = idx + args.start_iter + + if i > args.iter: + print("Done!") + break + + with torch.no_grad(): + # during pretraining, no geometric transformations are applied. + noise_sample = torch.randn(args.batch, 512).cuda() + ws_ = basemodel.style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w + ws_[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w''=w'=w+n + img_gen, _ = basemodel([ws_], input_is_latent=True, truncation=0.5, truncation_latent=0) # image part of x' + img_gen = torch.clamp(img_gen, -1, 1).detach() + img_gen512 = down(img_gen.detach()) + img_gen256 = down(img_gen512.detach()) # image part of x'_down + mask512 = parsingpredictor(2*torch.clamp(img_gen512, -1, 1))[0] + real_input = torch.cat((img_gen256, down(mask512)/16.0), dim=1).detach() # x'_down + # f_G1^(8)(w'') + real_feat, real_skip = g_ema.generator([ws_], input_is_latent=True, return_feature_ind = 6, truncation=0.5, truncation_latent=0) + real_feat = real_feat.detach() + real_skip = real_skip.detach() + + # f_E^(last)(x'_down) + fake_feat, fake_skip = generator(real_input, style=None, return_feat=True) + + # L_E in Eq.(1) + recon_loss = F.mse_loss(fake_feat, real_feat) + F.mse_loss(fake_skip, real_skip) + + loss_dict["emse"] = recon_loss + + generator.zero_grad() + recon_loss.backward() + g_optim.step() + + accumulate(g_ema.encoder, g_module.encoder, accum) + + loss_reduced = reduce_loss_dict(loss_dict) + + emse_loss_val = loss_reduced["emse"].mean().item() + + if get_rank() == 0: + pbar.set_description( + ( + f"iter: {i:d}; emse: {emse_loss_val:.3f}" + ) + ) + + if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter: + if (i+1) == args.iter: + savename = f"checkpoint/%s/pretrain.pt"%(args.name) + else: + savename = f"checkpoint/%s/pretrain-%05d.pt"%(args.name, i+1) + torch.save( + { + #"g": g_module.encoder.state_dict(), + "g_ema": g_ema.encoder.state_dict(), + }, + savename, + ) + + +# generate paired data and train vtoonify, see Sec. 4.1.2 for the detail +def train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, basemodel, device): + pbar = range(args.iter) + + if get_rank() == 0: + pbar = tqdm(pbar, initial=args.start_iter, smoothing=0.01, ncols=120, dynamic_ncols=False) + + d_loss = torch.tensor(0.0, device=device) + g_loss = torch.tensor(0.0, device=device) + grec_loss = torch.tensor(0.0, device=device) + gfeat_loss = torch.tensor(0.0, device=device) + temporal_loss = torch.tensor(0.0, device=device) + loss_dict = {} + + if args.distributed: + g_module = generator.module + d_module = discriminator.module + + else: + g_module = generator + d_module = discriminator + + accum = 0.5 ** (32 / (10 * 1000)) + + for idx in pbar: + i = idx + args.start_iter + + if i > args.iter: + print("Done!") + break + + ###### This part is for data generation. Generate pair (x, y, w'') as in Fig. 5 of the paper + with torch.no_grad(): + noise_sample = torch.randn(args.batch, 512).cuda() + wc = basemodel.style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w + wc[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n + wc = wc.detach() + xc, _ = basemodel([wc], input_is_latent=True, truncation=0.5, truncation_latent=0) + xc = torch.clamp(xc, -1, 1).detach() # x' + xl = pspencoder(F.adaptive_avg_pool2d(xc, 256)) + xl = basemodel.style(xl.reshape(xl.shape[0]*xl.shape[1], xl.shape[2])).reshape(xl.shape) # E_s(x'_down) + xl = torch.cat((wc[:,0:7]*0.5, xl[:,7:18]), dim=1).detach() # w'' = concatenate w' and E_s(x'_down) + xs, _ = g_ema.generator([xl], input_is_latent=True) + xs = torch.clamp(xs, -1, 1).detach() # y' + # during training, random geometric transformations are applied. + imgs, _ = random_apply_affine(torch.cat((xc.detach(),xs), dim=1), 0.2, None) + real_input1024 = imgs[:,0:3].detach() # image part of x + real_input512 = down(real_input1024).detach() + real_input256 = down(real_input512).detach() + mask512 = parsingpredictor(2*real_input512)[0] + mask256 = down(mask512).detach() + mask = F.adaptive_avg_pool2d(mask512, 1024).detach() # parsing part of x + real_output = imgs[:,3:].detach() # y + real_input = torch.cat((real_input256, mask256/16.0), dim=1) # x_down + # for log, sample a fixed input-output pair (x_down, y, w'') + if idx == 0 or i == 0: + samplein = real_input.clone().detach() + sampleout = real_output.clone().detach() + samplexl = xl.clone().detach() + + ###### This part is for training discriminator + + requires_grad(g_module.encoder, False) + requires_grad(g_module.fusion_out, False) + requires_grad(g_module.fusion_skip, False) + requires_grad(discriminator, True) + + fake_output = generator(real_input, xl) + fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256)) + real_pred = discriminator(F.adaptive_avg_pool2d(real_output, 256)) + + # L_adv in Eq.(3) + d_loss = d_logistic_loss(real_pred, fake_pred) * args.adv_loss + loss_dict["d"] = d_loss + + discriminator.zero_grad() + d_loss.backward() + d_optim.step() + + ###### This part is for training generator (encoder and fusion modules) + + requires_grad(g_module.encoder, True) + requires_grad(g_module.fusion_out, True) + requires_grad(g_module.fusion_skip, True) + requires_grad(discriminator, False) + + fake_output = generator(real_input, xl) + fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256)) + # L_adv in Eq.(3) + g_loss = g_nonsaturating_loss(fake_pred) * args.adv_loss + # L_rec in Eq.(2) + grec_loss = F.mse_loss(fake_output, real_output) * args.grec_loss + gfeat_loss = percept(F.adaptive_avg_pool2d(fake_output, 512), # 1024 will out of memory + F.adaptive_avg_pool2d(real_output, 512)).sum() * args.perc_loss # 256 will get blurry output + + loss_dict["g"] = g_loss + loss_dict["gr"] = grec_loss + loss_dict["gf"] = gfeat_loss + + w = random.randint(0,1024-896) + h = random.randint(0,1024-896) + crop_input = torch.cat((real_input1024[:,:,w:w+896,h:h+896], mask[:,:,w:w+896,h:h+896]/16.0), dim=1).detach() + crop_input = down(down(crop_input)) + crop_fake_output = fake_output[:,:,w:w+896,h:h+896] + fake_crop_output = generator(crop_input, xl) + # L_tmp in Eq.(4), gradually increase the weight of L_tmp + temporal_loss = ((fake_crop_output-crop_fake_output)**2).mean() * max(idx/(args.iter/2.0)-1, 0) * args.tmp_loss + loss_dict["tp"] = temporal_loss + + generator.zero_grad() + (g_loss + grec_loss + gfeat_loss + temporal_loss).backward() + g_optim.step() + + accumulate(g_ema.encoder, g_module.encoder, accum) + accumulate(g_ema.fusion_out, g_module.fusion_out, accum) + accumulate(g_ema.fusion_skip, g_module.fusion_skip, accum) + + loss_reduced = reduce_loss_dict(loss_dict) + + d_loss_val = loss_reduced["d"].mean().item() + g_loss_val = loss_reduced["g"].mean().item() + gr_loss_val = loss_reduced["gr"].mean().item() + gf_loss_val = loss_reduced["gf"].mean().item() + tmp_loss_val = loss_reduced["tp"].mean().item() + + if get_rank() == 0: + pbar.set_description( + ( + f"iter: {i:d}; advd: {d_loss_val:.3f}; advg: {g_loss_val:.3f}; mse: {gr_loss_val:.3f}; " + f"perc: {gf_loss_val:.3f}; tmp: {tmp_loss_val:.3f}" + ) + ) + + if i % args.log_every == 0 or (i+1) == args.iter: + with torch.no_grad(): + g_ema.eval() + sample = g_ema(samplein, samplexl) + sample = F.interpolate(torch.cat((sampleout, sample), dim=0), 256) + utils.save_image( + sample, + f"log/%s/%05d.jpg"%(args.name, i), + nrow=int(args.batch), + normalize=True, + range=(-1, 1), + ) + + if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter: + if (i+1) == args.iter: + savename = f"checkpoint/%s/vtoonify.pt"%(args.name) + else: + savename = f"checkpoint/%s/vtoonify_%05d.pt"%(args.name, i+1) + torch.save( + { + #"g": g_module.state_dict(), + #"d": d_module.state_dict(), + "g_ema": g_ema.state_dict(), + }, + savename, + ) + + + +if __name__ == "__main__": + + device = "cuda" + parser = TrainOptions() + args = parser.parse() + if args.local_rank == 0: + print('*'*98) + if not os.path.exists("log/%s/"%(args.name)): + os.makedirs("log/%s/"%(args.name)) + if not os.path.exists("checkpoint/%s/"%(args.name)): + os.makedirs("checkpoint/%s/"%(args.name)) + + n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 + args.distributed = n_gpu > 1 + + if args.distributed: + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend="nccl", init_method="env://") + synchronize() + + generator = VToonify(backbone = 'toonify').to(device) + generator.apply(weights_init) + g_ema = VToonify(backbone = 'toonify').to(device) + g_ema.eval() + + basemodel = Generator(1024, 512, 8, 2).to(device) # G0 + finetunemodel = Generator(1024, 512, 8, 2).to(device) + basemodel.load_state_dict(torch.load(args.stylegan_path, map_location=lambda storage, loc: storage)['g_ema']) + finetunemodel.load_state_dict(torch.load(args.finetunegan_path, map_location=lambda storage, loc: storage)['g_ema']) + fused_state_dict = blend_models(finetunemodel, basemodel, args.weight) # G1 + generator.generator.load_state_dict(fused_state_dict) # load G1 + g_ema.generator.load_state_dict(fused_state_dict) + requires_grad(basemodel, False) + requires_grad(generator.generator, False) + requires_grad(g_ema.generator, False) + + if not args.pretrain: + generator.encoder.load_state_dict(torch.load(args.encoder_path, map_location=lambda storage, loc: storage)["g_ema"]) + # we initialize the fusion modules to map f_G \otimes f_E to f_G. + for k in generator.fusion_out: + k.weight.data *= 0.01 + k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda() + for k in generator.fusion_skip: + k.weight.data *= 0.01 + k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda() + + accumulate(g_ema.encoder, generator.encoder, 0) + accumulate(g_ema.fusion_out, generator.fusion_out, 0) + accumulate(g_ema.fusion_skip, generator.fusion_skip, 0) + + g_parameters = list(generator.encoder.parameters()) + if not args.pretrain: + g_parameters = g_parameters + list(generator.fusion_out.parameters()) + list(generator.fusion_skip.parameters()) + + g_optim = optim.Adam( + g_parameters, + lr=args.lr, + betas=(0.9, 0.99), + ) + + if args.distributed: + generator = nn.parallel.DistributedDataParallel( + generator, + device_ids=[args.local_rank], + output_device=args.local_rank, + broadcast_buffers=False, + find_unused_parameters=True, + ) + + parsingpredictor = BiSeNet(n_classes=19) + parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage)) + parsingpredictor.to(device).eval() + requires_grad(parsingpredictor, False) + + # we apply gaussian blur to the images to avoid flickers caused during downsampling + down = Downsample(kernel=[1, 3, 3, 1], factor=2).to(device) + requires_grad(down, False) + + directions = torch.tensor(np.load(args.direction_path)).to(device) + + if not args.pretrain: + discriminator = ConditionalDiscriminator(256).to(device) + + d_optim = optim.Adam( + discriminator.parameters(), + lr=args.lr, + betas=(0.9, 0.99), + ) + + if args.distributed: + discriminator = nn.parallel.DistributedDataParallel( + discriminator, + device_ids=[args.local_rank], + output_device=args.local_rank, + broadcast_buffers=False, + find_unused_parameters=True, + ) + + percept = lpips.PerceptualLoss(model="net-lin", net="vgg", use_gpu=device.startswith("cuda"), gpu_ids=[args.local_rank]) + requires_grad(percept.model.net, False) + + pspencoder = load_psp_standalone(args.style_encoder_path, device) + + if args.local_rank == 0: + print('Load models and data successfully loaded!') + + if args.pretrain: + pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, basemodel, device) + else: + train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, basemodel, device) diff --git a/vtoonify/util.py b/vtoonify/util.py new file mode 100644 index 0000000000000000000000000000000000000000..01ad2930c55d07866dee02e019d359bb78f65fc7 --- /dev/null +++ b/vtoonify/util.py @@ -0,0 +1,229 @@ +import numpy as np +import matplotlib.pyplot as plt +from PIL import Image +import cv2 +import random +import math +import argparse +import torch +from torch.utils import data +from torch.nn import functional as F +from torch import autograd +from torch.nn import init +import torchvision.transforms as transforms +from model.stylegan.op import conv2d_gradfix +from model.encoder.encoders.psp_encoders import GradualStyleEncoder +from model.encoder.align_all_parallel import get_landmark + +def visualize(img_arr, dpi): + plt.figure(figsize=(10,10),dpi=dpi) + plt.imshow(((img_arr.detach().cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)) + plt.axis('off') + plt.show() + +def save_image(img, filename): + tmp = ((img.detach().cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8) + cv2.imwrite(filename, cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR)) + +def load_image(filename): + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]), + ]) + + img = Image.open(filename) + img = transform(img) + return img.unsqueeze(dim=0) + +def data_sampler(dataset, shuffle, distributed): + if distributed: + return data.distributed.DistributedSampler(dataset, shuffle=shuffle) + + if shuffle: + return data.RandomSampler(dataset) + + else: + return data.SequentialSampler(dataset) + + +def requires_grad(model, flag=True): + for p in model.parameters(): + p.requires_grad = flag + + +def accumulate(model1, model2, decay=0.999): + par1 = dict(model1.named_parameters()) + par2 = dict(model2.named_parameters()) + + for k in par1.keys(): + par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay) + + +def sample_data(loader): + while True: + for batch in loader: + yield batch + + +def d_logistic_loss(real_pred, fake_pred): + real_loss = F.softplus(-real_pred) + fake_loss = F.softplus(fake_pred) + + return real_loss.mean() + fake_loss.mean() + + +def d_r1_loss(real_pred, real_img): + with conv2d_gradfix.no_weight_gradients(): + grad_real, = autograd.grad( + outputs=real_pred.sum(), inputs=real_img, create_graph=True + ) + grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean() + + return grad_penalty + + +def g_nonsaturating_loss(fake_pred): + loss = F.softplus(-fake_pred).mean() + + return loss + + +def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01): + noise = torch.randn_like(fake_img) / math.sqrt( + fake_img.shape[2] * fake_img.shape[3] + ) + grad, = autograd.grad( + outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True + ) + path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1)) + + path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length) + + path_penalty = (path_lengths - path_mean).pow(2).mean() + + return path_penalty, path_mean.detach(), path_lengths + + +def make_noise(batch, latent_dim, n_noise, device): + if n_noise == 1: + return torch.randn(batch, latent_dim, device=device) + + noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0) + + return noises + + +def mixing_noise(batch, latent_dim, prob, device): + if prob > 0 and random.random() < prob: + return make_noise(batch, latent_dim, 2, device) + + else: + return [make_noise(batch, latent_dim, 1, device)] + + +def set_grad_none(model, targets): + for n, p in model.named_parameters(): + if n in targets: + p.grad = None + + +def weights_init(m): + classname = m.__class__.__name__ + if classname.find('BatchNorm2d') != -1: + if hasattr(m, 'weight') and m.weight is not None: + init.normal_(m.weight.data, 1.0, 0.02) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + + +def load_psp_standalone(checkpoint_path, device='cuda'): + ckpt = torch.load(checkpoint_path, map_location='cpu') + opts = ckpt['opts'] + if 'output_size' not in opts: + opts['output_size'] = 1024 + opts['n_styles'] = int(math.log(opts['output_size'], 2)) * 2 - 2 + opts = argparse.Namespace(**opts) + psp = GradualStyleEncoder(50, 'ir_se', opts) + psp_dict = {k.replace('encoder.', ''): v for k, v in ckpt['state_dict'].items() if k.startswith('encoder.')} + psp.load_state_dict(psp_dict) + psp.eval() + psp = psp.to(device) + latent_avg = ckpt['latent_avg'].to(device) + + def add_latent_avg(model, inputs, outputs): + return outputs + latent_avg.repeat(outputs.shape[0], 1, 1) + + psp.register_forward_hook(add_latent_avg) + return psp + +def get_video_crop_parameter(filepath, predictor, padding=[200,200,200,200]): + if type(filepath) == str: + img = dlib.load_rgb_image(filepath) + else: + img = filepath + lm = get_landmark(img, predictor) + if lm is None: + return None + lm_chin = lm[0 : 17] # left-right + lm_eyebrow_left = lm[17 : 22] # left-right + lm_eyebrow_right = lm[22 : 27] # left-right + lm_nose = lm[27 : 31] # top-down + lm_nostrils = lm[31 : 36] # top-down + lm_eye_left = lm[36 : 42] # left-clockwise + lm_eye_right = lm[42 : 48] # left-clockwise + lm_mouth_outer = lm[48 : 60] # left-clockwise + lm_mouth_inner = lm[60 : 68] # left-clockwise + + scale = 64. / (np.mean(lm_eye_right[:,0])-np.mean(lm_eye_left[:,0])) + center = ((np.mean(lm_eye_right, axis=0)+np.mean(lm_eye_left, axis=0)) / 2) * scale + h, w = round(img.shape[0] * scale), round(img.shape[1] * scale) + left = max(round(center[0] - padding[0]), 0) // 8 * 8 + right = min(round(center[0] + padding[1]), w) // 8 * 8 + top = max(round(center[1] - padding[2]), 0) // 8 * 8 + bottom = min(round(center[1] + padding[3]), h) // 8 * 8 + return h,w,top,bottom,left,right,scale + +def tensor2cv2(img): + tmp = ((img.cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8) + return cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR) + +# get parameters from the stylegan and mark them with their layers +def gather_params(G): + params = dict( + [(res, {}) for res in range(18)] + [("others", {})] + ) + for n, p in sorted(list(G.named_buffers()) + list(G.named_parameters())): + if n.startswith("convs"): + layer = int(n.split(".")[1]) + 1 + params[layer][n] = p + elif n.startswith("to_rgbs"): + layer = int(n.split(".")[1]) * 2 + 3 + params[layer][n] = p + elif n.startswith("conv1"): + params[0][n] = p + elif n.startswith("to_rgb1"): + params[1][n] = p + else: + params["others"][n] = p + return params + +# blend the ffhq stylegan model and the finetuned model for toonify +# see ``Resolution Dependent GAN Interpolation for Controllable Image Synthesis Between Domains'' +def blend_models(G_low, G_high, weight=[1]*7+[0]*11): + params_low = gather_params(G_low) + params_high = gather_params(G_high) + + for res in range(18): + for n, p in params_high[res].items(): + params_high[res][n] = params_high[res][n] * (1-weight[res]) + params_low[res][n] * weight[res] + + state_dict = {} + for _, p in params_high.items(): + state_dict.update(p) + + return state_dict + diff --git a/vtoonify_model.py b/vtoonify_model.py new file mode 100644 index 0000000000000000000000000000000000000000..a19aeb71b2023721d11ec37b408d0ceea781bb4c --- /dev/null +++ b/vtoonify_model.py @@ -0,0 +1,235 @@ +from __future__ import annotations +import gradio as gr +import pathlib +import sys +sys.path.insert(0, 'vtoonify') + +from util import load_psp_standalone, get_video_crop_parameter, tensor2cv2 +import torch +import torch.nn as nn +import numpy as np +import dlib +import cv2 +from model.vtoonify import VToonify +from model.bisenet.model import BiSeNet +import torch.nn.functional as F +from torchvision import transforms +from model.encoder.align_all_parallel import align_face +import gc + +MODEL_REPO = 'PKUWilliamYang/VToonify' + +class Model(): + def __init__(self, device): + super().__init__() + + self.device = device + self.style_types = { + 'cartoon1': ['vtoonify_d_cartoon/vtoonify_s026_d0.5.pt', 26], + 'cartoon1-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 26], + 'cartoon2-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 64], + 'cartoon3-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 153], + 'cartoon4': ['vtoonify_d_cartoon/vtoonify_s299_d0.5.pt', 299], + 'cartoon4-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 299], + 'cartoon5-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 8], + 'comic1-d': ['vtoonify_d_comic/vtoonify_s_d.pt', 28], + 'comic2-d': ['vtoonify_d_comic/vtoonify_s_d.pt', 18], + 'comic3-d': ['vtoonify_d_illustration/vtoonify_s054_d_c.pt', 54], + 'arcane1': ['vtoonify_d_arcane/vtoonify_s000_d0.5.pt', 0], + 'arcane1-d': ['vtoonify_d_arcane/vtoonify_s_d.pt', 0], + 'arcane2': ['vtoonify_d_arcane/vtoonify_s077_d0.5.pt', 77], + 'arcane2-d': ['vtoonify_d_arcane/vtoonify_s_d.pt', 77], + 'caricature1': ['vtoonify_d_caricature/vtoonify_s039_d0.5.pt', 39], + 'caricature2': ['vtoonify_d_caricature/vtoonify_s068_d0.5.pt', 68], + 'pixar': ['vtoonify_d_pixar/vtoonify_s052_d0.5.pt', 52], + 'pixar-d': ['vtoonify_d_pixar/vtoonify_s_d.pt', 52], + } + + self.landmarkpredictor = self._create_dlib_landmark_model() + self.parsingpredictor = self._create_parsing_model() + self.pspencoder = self._load_encoder() + self.transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]), + ]) + + self.vtoonify, self.exstyle = self._load_default_model() + self.color_transfer = False + + @staticmethod + def _create_dlib_landmark_model(): + return dlib.shape_predictor(huggingface_hub.hf_hub_download(MODEL_REPO, + 'models/shape_predictor_68_face_landmarks.dat')) + + def _create_parsing_model(self): + parsingpredictor = BiSeNet(n_classes=19) + parsingpredictor.load_state_dict(torch.load(huggingface_hub.hf_hub_download(MODEL_REPO, 'models/faceparsing.pth'), + map_location=lambda storage, loc: storage)) + parsingpredictor.to(self.device).eval() + return parsingpredictor + + def _load_encoder(self) -> nn.Module: + style_encoder_path = huggingface_hub.hf_hub_download(MODEL_REPO,'models/encoder.pt') + return load_psp_standalone(style_encoder_path, self.device) + + def _load_default_model(self) -> tuple[torch.Tensor, str]: + vtoonify = VToonify(backbone = 'dualstylegan') + vtoonify.load_state_dict(torch.load(huggingface_hub.hf_hub_download(MODEL_REPO, + 'models/vtoonify_d_cartoon/vtoonify_s026_d0.5.pt'), + map_location=lambda storage, loc: storage)['g_ema']) + vtoonify.to(self.device) + tmp = np.load(huggingface_hub.hf_hub_download(MODEL_REPO,'models/vtoonify_d_cartoon/exstyle_code.npy'), allow_pickle=True).item() + exstyle = torch.tensor(tmp[list(tmp.keys())[26]]).to(self.device) + with torch.no_grad(): + exstyle = vtoonify.zplus2wplus(exstyle) + return vtoonify, exstyle + + def load_model(self, style_type: str) -> tuple[torch.Tensor, str]: + if style_type == 'comic3-d': + self.color_transfer = True + else: + self.color_transfer = False + model_path, ind = self.style_types[style_type] + style_path = os.path.join('models',os.path.dirname(model_path),'exstyle_code.npy') + self.vtoonify.load_state_dict(torch.load(huggingface_hub.hf_hub_download(MODEL_REPO,'models/'+model_path), + map_location=lambda storage, loc: storage)['g_ema']) + tmp = np.load(huggingface_hub.hf_hub_download(MODEL_REPO, style_path), allow_pickle=True).item() + exstyle = torch.tensor(tmp[list(tmp.keys())[ind]]).to(self.device) + with torch.no_grad(): + exstyle = self.vtoonify.zplus2wplus(exstyle) + return exstyle, 'Model of %s loaded.'%(style_type) + + def detect_and_align(self, frame, top, bottom, left, right, return_para=False): + message = 'Error: no face detected!' + paras = get_video_crop_parameter(frame, self.landmarkpredictor, [left, right, top, bottom]) + instyle = torch.zeros(1,18,512).to(self.device) + h, w, scale = 0, 0, 0 + if paras is not None: + h,w,top,bottom,left,right,scale = paras + H, W = int(bottom-top), int(right-left) + # for HR image, we apply gaussian blur to it to avoid over-sharp stylization results + kernel_1d = np.array([[0.125],[0.375],[0.375],[0.125]]) + if scale <= 0.75: + frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) + if scale <= 0.375: + frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) + frame = cv2.resize(frame, (w, h))[top:bottom, left:right] + with torch.no_grad(): + I = align_face(frame, self.landmarkpredictor) + if I is not None: + I = self.transform(I).unsqueeze(dim=0).to(self.device) + instyle = self.pspencoder(I) + instyle = self.vtoonify.zplus2wplus(instyle) + message = 'Successfully rescale the frame to (%d, %d)'%(bottom-top, right-left) + else: + frame = np.zeros((256,256,3), np.uint8) + else: + frame = np.zeros((256,256,3), np.uint8) + if return_para: + return frame, instyle, message, w, h, top, bottom, left, right, scale + return frame, instyle, message + + #@torch.inference_mode() + def detect_and_align_image(self, image: str, top: int, bottom: int, left: int, right: int + ) -> tuple[np.ndarray, torch.Tensor, str]: + + frame = cv2.imread(image) + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + return self.detect_and_align(frame, top, bottom, left, right) + + def detect_and_align_video(self, video: str, top: int, bottom: int, left: int, right: int + ) -> tuple[np.ndarray, torch.Tensor, str]: + + video_cap = cv2.VideoCapture(video) + success, frame = video_cap.read() + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + video_cap.release() + return self.detect_and_align(frame, top, bottom, left, right) + + def detect_and_align_full_video(self, video: str, top: int, bottom: int, left: int, right: int) -> tuple[str, torch.Tensor, str]: + message = 'Error: no face detected!' + instyle = torch.zeros(1,18,512).to(self.device) + video_cap = cv2.VideoCapture(video) + num = int(video_cap.get(7)) + success, frame = video_cap.read() + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame, instyle, message, w, h, top, bottom, left, right, scale = self.detect_and_align(frame, top, bottom, left, right, True) + if instyle is None: + return 'default.mp4', instyle, message + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + videoWriter = cv2.VideoWriter('input.mp4', fourcc, video_cap.get(5), (int(right-left), int(bottom-top))) + kernel_1d = np.array([[0.125],[0.375],[0.375],[0.125]]) + for i in range(num-1): + success, frame = video_cap.read() + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + if scale <= 0.75: + frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) + if scale <= 0.375: + frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) + frame = cv2.resize(frame, (w, h))[top:bottom, left:right] + videoWriter.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) + + videoWriter.release() + video_cap.release() + + return 'input.mp4', instyle, 'Successfully rescale the video to (%d, %d)'%(bottom-top, right-left) + + def image_toonify(self, aligned_face: np.ndarray, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float) -> np.ndarray: + if exstyle is None: + exstyle = self.exstyle + with torch.no_grad(): + if self.color_transfer: + s_w = exstyle + else: + s_w = instyle.clone() + s_w[:,:7] = exstyle[:,:7] + + x = self.transform(aligned_face).unsqueeze(dim=0).to(self.device) + x_p = F.interpolate(self.parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0], + scale_factor=0.5, recompute_scale_factor=False).detach() + inputs = torch.cat((x, x_p/16.), dim=1) + y_tilde = self.vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = style_degree) + y_tilde = torch.clamp(y_tilde, -1, 1) + + return ((y_tilde[0].cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8) + + def video_tooniy(self, aligned_video: str, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float) -> str: + if exstyle is None: + exstyle = self.exstyle + video_cap = cv2.VideoCapture(aligned_video) + num = int(video_cap.get(7)) + fourcc = cv2.VideoWriter_fourcc(*'MP4V') + videoWriter = cv2.VideoWriter('output.mp4', fourcc, + video_cap.get(5), (int(video_cap.get(3)*4), + int(video_cap.get(4)*4))) + + batch_frames = [] + batch_size = 4 + with torch.no_grad(): + if self.color_transfer: + s_w = exstyle + else: + s_w = instyle.clone() + s_w[:,:7] = exstyle[:,:7] + for i in range(num): + success, frame = video_cap.read() + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + batch_frames += [self.transform(frame).unsqueeze(dim=0).to(self.device)] + if len(batch_frames) == batch_size or (i+1) == num: + x = torch.cat(batch_frames, dim=0) + batch_frames = [] + with torch.no_grad(): + x_p = F.interpolate(self.parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0], + scale_factor=0.5, recompute_scale_factor=False).detach() + inputs = torch.cat((x, x_p/16.), dim=1) + y_tilde = self.vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), style_degree) + y_tilde = torch.clamp(y_tilde, -1, 1) + for k in range(y_tilde.size(0)): + videoWriter.write(tensor2cv2(y_tilde[k].cpu())) + gc.collect() + + videoWriter.release() + video_cap.release() + return 'output.mp4' + +