MarkusEssl commited on
Commit
5f1e4dc
1 Parent(s): a9ac25d
.gitattributes CHANGED
@@ -6,5 +6,5 @@
6
  *.css filter=lfs diff=lfs merge=lfs -text
7
  *.pt filter=lfs diff=lfs merge=lfs -text
8
  *.sh filter=lfs diff=lfs merge=lfs -text
9
- /SOURCE/yolo_files/utils/google_app_engine/Dockerfile filter=lfs diff=lfs merge=lfs -text
10
  *.pth filter=lfs diff=lfs merge=lfs -text
 
6
  *.css filter=lfs diff=lfs merge=lfs -text
7
  *.pt filter=lfs diff=lfs merge=lfs -text
8
  *.sh filter=lfs diff=lfs merge=lfs -text
9
+ /utils/google_app_engine/Dockerfile filter=lfs diff=lfs merge=lfs -text
10
  *.pth filter=lfs diff=lfs merge=lfs -text
pages/3_Signify.py CHANGED
@@ -10,7 +10,7 @@ import torchvision.transforms as transforms
10
  from PIL import Image
11
  from signify.siamese import SiameseNetwork
12
 
13
- #from SOURCE.yolo_files import detect
14
 
15
  MEDIA_ROOT = 'results/media/signatures/'
16
  SIGNATURE_ROOT = 'results/media/UserSignaturesSquare/'
 
10
  from PIL import Image
11
  from signify.siamese import SiameseNetwork
12
 
13
+ #from import detect
14
 
15
  MEDIA_ROOT = 'results/media/signatures/'
16
  SIGNATURE_ROOT = 'results/media/UserSignaturesSquare/'
signify/gan/data/__init__.py CHANGED
@@ -24,7 +24,7 @@ def find_dataset_using_name(dataset_name):
24
  be instantiated. It has to be a subclass of BaseDataset,
25
  and it is case-insensitive.
26
  """
27
- dataset_filename = "SOURCE.gan_files.data." + dataset_name + "_dataset"
28
  datasetlib = importlib.import_module(dataset_filename)
29
 
30
  dataset = None
 
24
  be instantiated. It has to be a subclass of BaseDataset,
25
  and it is case-insensitive.
26
  """
27
+ dataset_filename = "data." + dataset_name + "_dataset"
28
  datasetlib = importlib.import_module(dataset_filename)
29
 
30
  dataset = None
signify/gan/data/aligned_dataset.py CHANGED
@@ -1,7 +1,13 @@
 
 
 
 
1
  import os
2
- from SOURCE.gan_files.data.base_dataset import BaseDataset, get_params, get_transform
3
- from SOURCE.gan_files.data.image_folder import make_dataset
4
  from PIL import Image
 
 
 
5
 
6
 
7
  class AlignedDataset(BaseDataset):
 
1
+ import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
  import os
6
+
 
7
  from PIL import Image
8
+ from data.base_dataset import (BaseDataset, get_params,
9
+ get_transform)
10
+ from data.image_folder import make_dataset
11
 
12
 
13
  class AlignedDataset(BaseDataset):
signify/gan/data/colorization_dataset.py CHANGED
@@ -1,10 +1,15 @@
 
 
 
 
1
  import os
2
- from SOURCE.gan_files.data.base_dataset import BaseDataset, get_transform
3
- from SOURCE.gan_files.data.image_folder import make_dataset
4
- from skimage import color # require skimage
5
- from PIL import Image
6
  import numpy as np
7
  import torchvision.transforms as transforms
 
 
 
 
8
 
9
 
10
  class ColorizationDataset(BaseDataset):
 
1
+ import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
  import os
6
+
 
 
 
7
  import numpy as np
8
  import torchvision.transforms as transforms
9
+ from PIL import Image
10
+ from skimage import color # require skimage
11
+ from data.base_dataset import BaseDataset, get_transform
12
+ from data.image_folder import make_dataset
13
 
14
 
15
  class ColorizationDataset(BaseDataset):
signify/gan/data/single_dataset.py CHANGED
@@ -1,6 +1,10 @@
1
- from SOURCE.gan_files.data.base_dataset import BaseDataset, get_transform
2
- from SOURCE.gan_files.data.image_folder import make_dataset
 
 
3
  from PIL import Image
 
 
4
 
5
 
6
  class SingleDataset(BaseDataset):
 
1
+ import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
  from PIL import Image
6
+ from data.base_dataset import BaseDataset, get_transform
7
+ from data.image_folder import make_dataset
8
 
9
 
10
  class SingleDataset(BaseDataset):
signify/gan/data/template_dataset.py CHANGED
@@ -11,7 +11,12 @@ You need to implement the following functions:
11
  -- <__getitem__>: Return a data point and its metadata information.
12
  -- <__len__>: Return the number of images.
13
  """
14
- from SOURCE.gan_files.data.base_dataset import BaseDataset, get_transform
 
 
 
 
 
15
  # from data.image_folder import make_dataset
16
  # from PIL import Image
17
 
 
11
  -- <__getitem__>: Return a data point and its metadata information.
12
  -- <__len__>: Return the number of images.
13
  """
14
+ import sys
15
+
16
+ sys.path.append("signify/gan")
17
+
18
+ from data.base_dataset import BaseDataset, get_transform
19
+
20
  # from data.image_folder import make_dataset
21
  # from PIL import Image
22
 
signify/gan/data/unaligned_dataset.py CHANGED
@@ -1,9 +1,14 @@
 
 
 
 
1
  import os
2
- from SOURCE.gan_files.data.base_dataset import BaseDataset, get_transform
3
- from SOURCE.gan_files.data.image_folder import make_dataset
4
- from PIL import Image
5
  import random
6
 
 
 
 
 
7
 
8
  class UnalignedDataset(BaseDataset):
9
  """
 
1
+ import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
  import os
 
 
 
6
  import random
7
 
8
+ from PIL import Image
9
+ from data.base_dataset import BaseDataset, get_transform
10
+ from data.image_folder import make_dataset
11
+
12
 
13
  class UnalignedDataset(BaseDataset):
14
  """
signify/gan/models/__init__.py CHANGED
@@ -30,7 +30,7 @@ def find_model_using_name(model_name):
30
  be instantiated. It has to be a subclass of BaseModel,
31
  and it is case-insensitive.
32
  """
33
- model_filename = "SOURCE.gan_files.models." + model_name + "_model"
34
  modellib = importlib.import_module(model_filename)
35
  model = None
36
  target_model_name = model_name.replace('_', '') + 'model'
 
30
  be instantiated. It has to be a subclass of BaseModel,
31
  and it is case-insensitive.
32
  """
33
+ model_filename = "models." + model_name + "_model"
34
  modellib = importlib.import_module(model_filename)
35
  model = None
36
  target_model_name = model_name.replace('_', '') + 'model'
signify/gan/models/base_model.py CHANGED
@@ -1,8 +1,13 @@
 
 
 
 
1
  import os
2
- import torch
3
- from collections import OrderedDict
4
  from abc import ABC, abstractmethod
5
- from SOURCE.gan_files.models import networks
 
 
 
6
 
7
 
8
  class BaseModel(ABC):
 
1
+ import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
  import os
 
 
6
  from abc import ABC, abstractmethod
7
+ from collections import OrderedDict
8
+
9
+ import torch
10
+ from models import networks
11
 
12
 
13
  class BaseModel(ABC):
signify/gan/models/colorization_model.py CHANGED
@@ -1,7 +1,11 @@
 
 
 
 
1
  import numpy as np
2
  import torch
3
  from skimage import color # used for lab2rgb
4
- from SOURCE.gan_files.models.pix2pix_model import Pix2PixModel
5
 
6
 
7
  class ColorizationModel(Pix2PixModel):
 
1
+ import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
  import numpy as np
6
  import torch
7
  from skimage import color # used for lab2rgb
8
+ from models.pix2pix_model import Pix2PixModel
9
 
10
 
11
  class ColorizationModel(Pix2PixModel):
signify/gan/models/cycle_gan_model.py CHANGED
@@ -1,8 +1,13 @@
1
- import torch
 
 
 
2
  import itertools
 
 
 
 
3
  from util.image_pool import ImagePool
4
- from SOURCE.gan_files.models.base_model import BaseModel
5
- from SOURCE.gan_files.models import networks
6
 
7
 
8
  class CycleGANModel(BaseModel):
 
1
+ import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
  import itertools
6
+
7
+ import torch
8
+ from models import networks
9
+ from models.base_model import BaseModel
10
  from util.image_pool import ImagePool
 
 
11
 
12
 
13
  class CycleGANModel(BaseModel):
signify/gan/models/pix2pix_model.py CHANGED
@@ -1,6 +1,10 @@
 
 
 
 
1
  import torch
2
- from SOURCE.gan_files.models.base_model import BaseModel
3
- from SOURCE.gan_files.models import networks
4
 
5
 
6
  class Pix2PixModel(BaseModel):
 
1
+ import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
  import torch
6
+ from models import networks
7
+ from models.base_model import BaseModel
8
 
9
 
10
  class Pix2PixModel(BaseModel):
signify/gan/models/template_model.py CHANGED
@@ -15,9 +15,13 @@ You need to implement the following functions:
15
  <forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
16
  <optimize_parameters>: Update network weights; it will be called in every training iteration.
17
  """
 
 
 
 
18
  import torch
19
- from SOURCE.gan_files.models.base_model import BaseModel
20
- from SOURCE.gan_files.models import networks
21
 
22
 
23
  class TemplateModel(BaseModel):
 
15
  <forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
16
  <optimize_parameters>: Update network weights; it will be called in every training iteration.
17
  """
18
+ import sys
19
+
20
+ sys.path.append("signify/gan")
21
+
22
  import torch
23
+ from models import networks
24
+ from models.base_model import BaseModel
25
 
26
 
27
  class TemplateModel(BaseModel):
signify/gan/models/test_model.py CHANGED
@@ -1,5 +1,9 @@
1
- from SOURCE.gan_files.models.base_model import BaseModel
2
- from SOURCE.gan_files.models import networks
 
 
 
 
3
 
4
 
5
  class TestModel(BaseModel):
 
1
+ import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
+ from models import networks
6
+ from models.base_model import BaseModel
7
 
8
 
9
  class TestModel(BaseModel):
signify/gan/options/base_options.py CHANGED
@@ -1,9 +1,14 @@
 
 
 
 
1
  import argparse
2
  import os
3
- from SOURCE.gan_files.util import util
 
 
4
  import torch
5
- import SOURCE.gan_files.models as models
6
- import SOURCE.gan_files.data as data
7
 
8
 
9
  class BaseOptions():
@@ -24,7 +29,7 @@ class BaseOptions():
24
  # parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
25
  parser.add_argument('--name', type=str, default='gan_signdata_kaggle', help='name of the experiment. It decides where to store samples and models')
26
  parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
27
- parser.add_argument('--checkpoints_dir', type=str, default='SOURCE/gan_files/checkpoints/', help='models are saved here')
28
  # model parameters
29
  parser.add_argument('--model', type=str, default='test', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
30
  parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
 
1
+ import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
  import argparse
6
  import os
7
+
8
+ import data as data
9
+ import models as models
10
  import torch
11
+ from util import util
 
12
 
13
 
14
  class BaseOptions():
 
29
  # parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
30
  parser.add_argument('--name', type=str, default='gan_signdata_kaggle', help='name of the experiment. It decides where to store samples and models')
31
  parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
32
+ parser.add_argument('--checkpoints_dir', type=str, default='checkpoints/', help='models are saved here')
33
  # model parameters
34
  parser.add_argument('--model', type=str, default='test', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
35
  parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
signify/gan/options/test_options.py CHANGED
@@ -1,5 +1,10 @@
1
  # from .base_options import BaseOptions
2
- from SOURCE.gan_files.options.base_options import BaseOptions
 
 
 
 
 
3
 
4
  class TestOptions(BaseOptions):
5
  """This class includes test options.
 
1
  # from .base_options import BaseOptions
2
+ import sys
3
+
4
+ sys.path.append("signify/gan")
5
+
6
+ from options.base_options import BaseOptions
7
+
8
 
9
  class TestOptions(BaseOptions):
10
  """This class includes test options.
signify/gan/options/train_options.py CHANGED
@@ -1,4 +1,8 @@
1
- from SOURCE.gan_files.options.base_options import BaseOptions
 
 
 
 
2
 
3
 
4
  class TrainOptions(BaseOptions):
 
1
+ import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
+ from options.base_options import BaseOptions
6
 
7
 
8
  class TrainOptions(BaseOptions):
signify/gan/util/visualizer.py CHANGED
@@ -1,11 +1,15 @@
1
- import numpy as np
2
- import os
3
  import sys
 
 
 
4
  import ntpath
 
 
5
  import time
6
- from SOURCE.gan_files.util import util, html
7
- from subprocess import Popen, PIPE
8
 
 
 
9
 
10
  if sys.version_info[0] == 2:
11
  VisdomExceptionBase = Exception
 
 
 
1
  import sys
2
+
3
+ sys.path.append("signify/gan")
4
+
5
  import ntpath
6
+ import os
7
+ import sys
8
  import time
9
+ from subprocess import PIPE, Popen
 
10
 
11
+ import numpy as np
12
+ from util import html, util
13
 
14
  if sys.version_info[0] == 2:
15
  VisdomExceptionBase = Exception
signify/yolo/detect.py CHANGED
@@ -1,5 +1,4 @@
1
  import sys
2
-
3
  sys.path.append("signify/yolo")
4
 
5
 
@@ -24,7 +23,7 @@ from utils.torch_utils import load_classifier, select_device, time_synchronized
24
 
25
  def detect(image_path):
26
  opt = {
27
- 'weights': 'SOURCE/yolo_files/best.pt',
28
  'source': image_path,
29
  'img_size': 640,
30
  'conf_thres': 0.25,
 
1
  import sys
 
2
  sys.path.append("signify/yolo")
3
 
4
 
 
23
 
24
  def detect(image_path):
25
  opt = {
26
+ 'weights': 'best.pt',
27
  'source': image_path,
28
  'img_size': 640,
29
  'conf_thres': 0.25,
signify/yolo/models/common.py CHANGED
@@ -12,10 +12,10 @@ import torch.nn as nn
12
  from PIL import Image
13
  from torch.cuda import amp
14
 
15
- from SOURCE.yolo_files.utils.datasets import letterbox
16
- from SOURCE.yolo_files.utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box
17
- from SOURCE.yolo_files.utils.plots import colors, plot_one_box
18
- from SOURCE.yolo_files.utils.torch_utils import time_synchronized
19
 
20
 
21
  def autopad(k, p=None): # kernel, padding
 
12
  from PIL import Image
13
  from torch.cuda import amp
14
 
15
+ from utils.datasets import letterbox
16
+ from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box
17
+ from utils.plots import colors, plot_one_box
18
+ from utils.torch_utils import time_synchronized
19
 
20
 
21
  def autopad(k, p=None): # kernel, padding
signify/yolo/models/experimental.py CHANGED
@@ -1,11 +1,12 @@
1
  # YOLOv5 experimental modules
 
 
2
 
3
  import numpy as np
4
  import torch
5
  import torch.nn as nn
6
-
7
- from SOURCE.yolo_files.models.common import Conv, DWConv
8
- from SOURCE.yolo_files.utils.google_utils import attempt_download
9
 
10
 
11
  class CrossConv(nn.Module):
@@ -111,7 +112,7 @@ class Ensemble(nn.ModuleList):
111
 
112
 
113
  def attempt_load(weights, map_location=None, inplace=True):
114
- from SOURCE.yolo_files.models.yolo import Detect, Model
115
 
116
  # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
117
  model = Ensemble()
 
1
  # YOLOv5 experimental modules
2
+ import sys
3
+ sys.path.append("signify/yolo")
4
 
5
  import numpy as np
6
  import torch
7
  import torch.nn as nn
8
+ from models.common import Conv, DWConv
9
+ from utils.google_utils import attempt_download
 
10
 
11
 
12
  class CrossConv(nn.Module):
 
112
 
113
 
114
  def attempt_load(weights, map_location=None, inplace=True):
115
+ from models.yolo import Detect, Model
116
 
117
  # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
118
  model = Ensemble()
signify/yolo/models/export.py CHANGED
@@ -3,6 +3,9 @@
3
  Usage:
4
  $ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1
5
  """
 
 
 
6
 
7
  import argparse
8
  import sys
@@ -11,16 +14,17 @@ from pathlib import Path
11
 
12
  sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories
13
 
 
14
  import torch
15
  import torch.nn as nn
 
 
 
 
 
 
16
  from torch.utils.mobile_optimizer import optimize_for_mobile
17
 
18
- import SOURCE.yolo_files.models
19
- from SOURCE.yolo_files.models.experimental import attempt_load
20
- from SOURCE.yolo_files.utils.activations import Hardswish, SiLU
21
- from SOURCE.yolo_files.utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging
22
- from SOURCE.yolo_files.utils.torch_utils import select_device
23
-
24
  if __name__ == '__main__':
25
  parser = argparse.ArgumentParser()
26
  parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path')
 
3
  Usage:
4
  $ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1
5
  """
6
+ import sys
7
+
8
+ sys.path.append("signify/yolo")
9
 
10
  import argparse
11
  import sys
 
14
 
15
  sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories
16
 
17
+ import models
18
  import torch
19
  import torch.nn as nn
20
+ from models.experimental import attempt_load
21
+ from utils.activations import Hardswish, SiLU
22
+ from utils.general import (check_img_size,
23
+ check_requirements, colorstr,
24
+ file_size, set_logging)
25
+ from utils.torch_utils import select_device
26
  from torch.utils.mobile_optimizer import optimize_for_mobile
27
 
 
 
 
 
 
 
28
  if __name__ == '__main__':
29
  parser = argparse.ArgumentParser()
30
  parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path')
signify/yolo/models/yolo.py CHANGED
@@ -9,11 +9,14 @@ from pathlib import Path
9
  sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories
10
  logger = logging.getLogger(__name__)
11
 
12
- from SOURCE.yolo_files.models.common import *
13
- from SOURCE.yolo_files.models.experimental import *
14
- from SOURCE.yolo_files.utils.autoanchor import check_anchor_order
15
- from SOURCE.yolo_files.utils.general import make_divisible, check_file, set_logging
16
- from SOURCE.yolo_files.utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
 
 
 
17
  select_device, copy_attr
18
 
19
  try:
 
9
  sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories
10
  logger = logging.getLogger(__name__)
11
 
12
+ import sys
13
+ sys.path.append("signify/yolo")
14
+
15
+ from models.common import *
16
+ from models.experimental import *
17
+ from utils.autoanchor import check_anchor_order
18
+ from utils.general import make_divisible, check_file, set_logging
19
+ from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
20
  select_device, copy_attr
21
 
22
  try:
signify/yolo/utils/autoanchor.py CHANGED
@@ -5,7 +5,7 @@ import torch
5
  import yaml
6
  from tqdm import tqdm
7
 
8
- from SOURCE.yolo_files.utils.general import colorstr
9
 
10
 
11
  def check_anchor_order(m):
 
5
  import yaml
6
  from tqdm import tqdm
7
 
8
+ from utils.general import colorstr
9
 
10
 
11
  def check_anchor_order(m):
signify/yolo/utils/general.py CHANGED
@@ -20,9 +20,9 @@ import torch
20
  import torchvision
21
  import yaml
22
 
23
- from SOURCE.yolo_files.utils.google_utils import gsutil_getsize
24
- from SOURCE.yolo_files.utils.metrics import fitness
25
- from SOURCE.yolo_files.utils.torch_utils import init_torch_seeds
26
 
27
  # Settings
28
  torch.set_printoptions(linewidth=320, precision=5, profile='long')
 
20
  import torchvision
21
  import yaml
22
 
23
+ from utils.google_utils import gsutil_getsize
24
+ from utils.metrics import fitness
25
+ from utils.torch_utils import init_torch_seeds
26
 
27
  # Settings
28
  torch.set_printoptions(linewidth=320, precision=5, profile='long')
signify/yolo/utils/loss.py CHANGED
@@ -3,8 +3,8 @@
3
  import torch
4
  import torch.nn as nn
5
 
6
- from SOURCE.yolo_files.utils.general import bbox_iou
7
- from SOURCE.yolo_files.utils.torch_utils import is_parallel
8
 
9
 
10
  def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
 
3
  import torch
4
  import torch.nn as nn
5
 
6
+ from utils.general import bbox_iou
7
+ from utils.torch_utils import is_parallel
8
 
9
 
10
  def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
signify/yolo/utils/plots.py CHANGED
@@ -17,8 +17,8 @@ import torch
17
  import yaml
18
  from PIL import Image, ImageDraw, ImageFont
19
 
20
- from SOURCE.yolo_files.utils.general import xywh2xyxy, xyxy2xywh
21
- from SOURCE.yolo_files.utils.metrics import fitness
22
 
23
  # Settings
24
  matplotlib.rc('font', **{'size': 11})
 
17
  import yaml
18
  from PIL import Image, ImageDraw, ImageFont
19
 
20
+ from utils.general import xywh2xyxy, xyxy2xywh
21
+ from utils.metrics import fitness
22
 
23
  # Settings
24
  matplotlib.rc('font', **{'size': 11})