nathbns commited on
Commit
d8344b1
·
verified ·
1 Parent(s): 176327c

Upload 10 files

Browse files
Files changed (5) hide show
  1. llr.py +3 -35
  2. requirements.txt +13 -9
  3. rescale.py +0 -4
  4. slid.py +0 -24
  5. train.py +6 -0
llr.py CHANGED
@@ -1,7 +1,4 @@
1
- # Code taken from
2
- # https://github.com/maciejczyzewski/neural-chessboard/
3
-
4
- from deps.laps import laps_intersections, laps_cluster
5
  from slid import slid_tendency
6
  import scipy
7
  import cv2
@@ -44,11 +41,10 @@ def llr_unique(a):
44
 
45
 
46
  def llr_polysort(pts):
47
- """sort points clockwise"""
48
  mlat = sum(x[0] for x in pts) / len(pts)
49
  mlng = sum(x[1] for x in pts) / len(pts)
50
 
51
- def __sort(x): # main math --> found on MIT site
52
  return (math.atan2(x[0]-mlat, x[1]-mlng) +
53
  2*math.pi) % (2*math.pi)
54
  pts.sort(key=__sort)
@@ -70,7 +66,7 @@ def llr_polyscore(cnt, pts, cen, alfa=5, beta=2):
70
 
71
  pco = pyclipper.PyclipperOffset()
72
  pco.AddPath(cnt, pyclipper.JT_MITER, pyclipper.ET_CLOSEDPOLYGON)
73
- pcnt = matplotlib.path.Path(pco.Execute(gamma)[0]) # FIXME: alfa/1.5
74
  wtfs = pcnt.contains_points(pts)
75
  pts_in = min(np.count_nonzero(wtfs), 49)
76
  t1 = pts_in < min(len(pts), 49) - 2 * beta - 1
@@ -105,15 +101,6 @@ def llr_polyscore(cnt, pts, cen, alfa=5, beta=2):
105
 
106
  G = np.linalg.norm(na(cen)-na(cen2))
107
 
108
- """
109
- cnt_in = __convex_approx(na(pcnt_in))
110
- S = cv2.contourArea(na(cnt_in))
111
- if S < B: E += abs(S - B)
112
- cnt_in = __convex_approx(na(list(cnt_in)+list(cnt)))
113
- S = cv2.contourArea(na(cnt_in))
114
- if S > B: E += abs(S - B)
115
- """
116
-
117
  a = [cnt[0], cnt[1]]
118
  b = [cnt[1], cnt[2]]
119
  c = [cnt[2], cnt[3]]
@@ -135,21 +122,14 @@ def llr_polyscore(cnt, pts, cen, alfa=5, beta=2):
135
  if B == 0 or A == 0:
136
  return 0
137
 
138
- # See Eq.11 and Sec.3.4 in the paper
139
-
140
  C = 1+(E/A)**(1/3)
141
  D = 1+(G/A)**(1/5)
142
  R = (A**4)/((B**2) * C * D)
143
 
144
- # print(R*(10**12), A, "|", B, C, D, "|", E, G)
145
-
146
  return R
147
 
148
  ################################################################################
149
 
150
- # LAPS, SLID
151
-
152
-
153
  def LLR(img, points, lines):
154
  old = points
155
 
@@ -207,8 +187,6 @@ def LLR(img, points, lines):
207
  centroid = (sum(x) / len(points),
208
  sum(y) / len(points))
209
 
210
- # print(alfa, beta, centroid)
211
-
212
  def __v(l):
213
  y_0, x_0 = l[0][0], l[0][1]
214
  y_1, x_1 = l[1][0], l[1][1]
@@ -269,8 +247,6 @@ def LLR(img, points, lines):
269
  pregroup[0] = llr_unique(pregroup[0])
270
  pregroup[1] = llr_unique(pregroup[1])
271
 
272
- # print("---------------------")
273
- # print(pregroup)
274
  for v in itertools.combinations(pregroup[0], 2):
275
  for h in itertools.combinations(pregroup[1], 2):
276
  poly = laps_intersections([v[0], v[1], h[0], h[1]])
@@ -280,20 +256,13 @@ def LLR(img, points, lines):
280
  poly = na(llr_polysort(llr_normalize(poly)))
281
  if not cv2.isContourConvex(poly):
282
  continue
283
- # print("Poly:", -llr_polyscore(poly, points, centroid,
284
- # beta=beta, alfa=alfa/2))
285
  S[-llr_polyscore(poly, points, centroid,
286
  beta=beta, alfa=alfa/2)] = poly
287
 
288
- # print(bool(S))
289
  S = collections.OrderedDict(sorted(S.items()))
290
  K = next(iter(S))
291
- # print("key --", K)
292
  four_points = llr_normalize(S[K])
293
 
294
- # print("POINTS:", len(points))
295
- # print("LINES:", len(lines))
296
-
297
  return four_points
298
 
299
 
@@ -303,5 +272,4 @@ def llr_pad(four_points, img):
303
 
304
  padded = pco.Execute(60)[0]
305
 
306
- # 60,70/75 is best (with buffer/for debug purpose)
307
  return pco.Execute(60)[0]
 
1
+ from laps import laps_intersections, laps_cluster
 
 
 
2
  from slid import slid_tendency
3
  import scipy
4
  import cv2
 
41
 
42
 
43
  def llr_polysort(pts):
 
44
  mlat = sum(x[0] for x in pts) / len(pts)
45
  mlng = sum(x[1] for x in pts) / len(pts)
46
 
47
+ def __sort(x):
48
  return (math.atan2(x[0]-mlat, x[1]-mlng) +
49
  2*math.pi) % (2*math.pi)
50
  pts.sort(key=__sort)
 
66
 
67
  pco = pyclipper.PyclipperOffset()
68
  pco.AddPath(cnt, pyclipper.JT_MITER, pyclipper.ET_CLOSEDPOLYGON)
69
+ pcnt = matplotlib.path.Path(pco.Execute(gamma)[0])
70
  wtfs = pcnt.contains_points(pts)
71
  pts_in = min(np.count_nonzero(wtfs), 49)
72
  t1 = pts_in < min(len(pts), 49) - 2 * beta - 1
 
101
 
102
  G = np.linalg.norm(na(cen)-na(cen2))
103
 
 
 
 
 
 
 
 
 
 
104
  a = [cnt[0], cnt[1]]
105
  b = [cnt[1], cnt[2]]
106
  c = [cnt[2], cnt[3]]
 
122
  if B == 0 or A == 0:
123
  return 0
124
 
 
 
125
  C = 1+(E/A)**(1/3)
126
  D = 1+(G/A)**(1/5)
127
  R = (A**4)/((B**2) * C * D)
128
 
 
 
129
  return R
130
 
131
  ################################################################################
132
 
 
 
 
133
  def LLR(img, points, lines):
134
  old = points
135
 
 
187
  centroid = (sum(x) / len(points),
188
  sum(y) / len(points))
189
 
 
 
190
  def __v(l):
191
  y_0, x_0 = l[0][0], l[0][1]
192
  y_1, x_1 = l[1][0], l[1][1]
 
247
  pregroup[0] = llr_unique(pregroup[0])
248
  pregroup[1] = llr_unique(pregroup[1])
249
 
 
 
250
  for v in itertools.combinations(pregroup[0], 2):
251
  for h in itertools.combinations(pregroup[1], 2):
252
  poly = laps_intersections([v[0], v[1], h[0], h[1]])
 
256
  poly = na(llr_polysort(llr_normalize(poly)))
257
  if not cv2.isContourConvex(poly):
258
  continue
 
 
259
  S[-llr_polyscore(poly, points, centroid,
260
  beta=beta, alfa=alfa/2)] = poly
261
 
 
262
  S = collections.OrderedDict(sorted(S.items()))
263
  K = next(iter(S))
 
264
  four_points = llr_normalize(S[K])
265
 
 
 
 
266
  return four_points
267
 
268
 
 
272
 
273
  padded = pco.Execute(60)[0]
274
 
 
275
  return pco.Execute(60)[0]
requirements.txt CHANGED
@@ -1,9 +1,13 @@
1
- gradio
2
- tensorflow
3
- opencv-python
4
- numpy
5
- pillow
6
- python-chess
7
- matplotlib
8
- scikit-learn
9
- pyclipper
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ keras>=2.0.0
3
+ matplotlib>=3.0.0
4
+ numpy>=1.20.0
5
+ opencv-contrib-python>=4.5.0
6
+ scipy>=1.7.0
7
+ tensorflow>=2.0.0
8
+ pyclipper>=1.2.0
9
+ scikit-learn>=1.0.0
10
+
11
+
12
+
13
+
rescale.py CHANGED
@@ -6,13 +6,11 @@ arr = np.array
6
 
7
 
8
  def image_scale(pts, scale):
9
- """scale to original image size"""
10
  def __loop(x, y): return [x[0] * y, x[1] * y]
11
  return list(map(functools.partial(__loop, y=1/scale), pts))
12
 
13
 
14
  def image_resize(img, height=500):
15
- """resize image to same normalized area (height**2)"""
16
  pixels = height * height
17
  shape = list(np.shape(img))
18
  scale = math.sqrt(float(pixels)/float(shape[0]*shape[1]))
@@ -24,7 +22,6 @@ def image_resize(img, height=500):
24
 
25
 
26
  def image_transform(img, points, square_length=150):
27
- """crop original image using perspective warp"""
28
  board_length = square_length * 8
29
  def __dis(a, b): return np.linalg.norm(arr(a)-arr(b))
30
  def __shi(seq, n=0): return seq[-(n % len(seq)):] + seq[:-(n % len(seq))]
@@ -42,7 +39,6 @@ def image_transform(img, points, square_length=150):
42
 
43
 
44
  def crop(img, pts, scale):
45
- """crop using 4 points transform"""
46
  pts_orig = image_scale(pts, scale)
47
  img_crop = image_transform(img, pts_orig)
48
  return img_crop
 
6
 
7
 
8
  def image_scale(pts, scale):
 
9
  def __loop(x, y): return [x[0] * y, x[1] * y]
10
  return list(map(functools.partial(__loop, y=1/scale), pts))
11
 
12
 
13
  def image_resize(img, height=500):
 
14
  pixels = height * height
15
  shape = list(np.shape(img))
16
  scale = math.sqrt(float(pixels)/float(shape[0]*shape[1]))
 
22
 
23
 
24
  def image_transform(img, points, square_length=150):
 
25
  board_length = square_length * 8
26
  def __dis(a, b): return np.linalg.norm(arr(a)-arr(b))
27
  def __shi(seq, n=0): return seq[-(n % len(seq)):] + seq[:-(n % len(seq))]
 
39
 
40
 
41
  def crop(img, pts, scale):
 
42
  pts_orig = image_scale(pts, scale)
43
  img_crop = image_transform(img, pts_orig)
44
  return img_crop
slid.py CHANGED
@@ -1,15 +1,8 @@
1
- # My implementation of the SLID module from
2
- # https://github.com/maciejczyzewski/neural-chessboard/
3
-
4
- from typing import Tuple
5
  import numpy as np
6
  import cv2
7
 
8
 
9
  arr = np.array
10
- # Four parameters are taken from the original code and
11
- # correspond to four possible cases that need correction:
12
- # low light, overexposure, underexposure, and blur
13
  CLAHE_PARAMS = [[3, (2, 6), 5], # @1
14
  [3, (6, 2), 5], # @2
15
  [5, (3, 3), 5], # @3
@@ -17,7 +10,6 @@ CLAHE_PARAMS = [[3, (2, 6), 5], # @1
17
 
18
 
19
  def slid_clahe(img, limit=2, grid=(3, 3), iters=5):
20
- """repair using CLAHE algorithm (adaptive histogram equalization)"""
21
  img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
22
  for i in range(iters):
23
  img = cv2.createCLAHE(clipLimit=limit,
@@ -29,7 +21,6 @@ def slid_clahe(img, limit=2, grid=(3, 3), iters=5):
29
 
30
 
31
  def slid_detector(img, alfa=150, beta=2):
32
- """detect lines using Hough algorithm"""
33
  __lines, lines = [], cv2.HoughLinesP(img, rho=1, theta=np.pi/360*beta,
34
  threshold=40, minLineLength=50, maxLineGap=15) # [40, 40, 10]
35
  if lines is None:
@@ -41,7 +32,6 @@ def slid_detector(img, alfa=150, beta=2):
41
 
42
 
43
  def slid_canny(img, sigma=0.25):
44
- """apply Canny edge detector (automatic thresh)"""
45
  v = np.median(img)
46
  img = cv2.medianBlur(img, 5)
47
  img = cv2.GaussianBlur(img, (7, 7), 2)
@@ -51,7 +41,6 @@ def slid_canny(img, sigma=0.25):
51
 
52
 
53
  def pSLID(img, thresh=150):
54
- """find all lines using different settings"""
55
  segments = []
56
  i = 0
57
  for key, arr in enumerate(CLAHE_PARAMS):
@@ -59,7 +48,6 @@ def pSLID(img, thresh=150):
59
  curr_segments = list(slid_detector(slid_canny(tmp), thresh))
60
  segments += curr_segments
61
  i += 1
62
- # print("FILTER: {} {} : {}".format(i, arr, len(curr_segments)))
63
  return segments
64
 
65
 
@@ -97,12 +85,9 @@ def SLID(img, segments):
97
 
98
  def height(line, pt):
99
  v = np.cross(arr(line[1])-arr(line[0]), arr(pt)-arr(line[0]))
100
- # Using dist() to speed up distance look-up since the 2-norm
101
- # is used many times
102
  return np.linalg.norm(v)/dist(line[1], line[0])
103
 
104
  def are_similar(l1, l2):
105
- '''See Sec.3.2.2 in Czyzewski et al.'''
106
  a = dist(l1[0], l1[1])
107
  b = dist(l2[0], l2[1])
108
 
@@ -114,10 +99,7 @@ def SLID(img, segments):
114
  if x1 < 1e-8 and x2 < 1e-8 and y1 < 1e-8 and y2 < 1e-8:
115
  return True
116
 
117
- # print("l1: %s, l2: %s" % (str(l1), str(l2)))
118
- # print("x1: %f, x2: %f, y1: %f, y2: %f" % (x1, x2, y1, y2))
119
  gamma = 0.25 * (x1+x2+y1+y2)
120
- # print("gamma:", gamma)
121
 
122
  img_width = 500
123
  img_height = 282
@@ -126,7 +108,6 @@ def SLID(img, segments):
126
  w = np.pi/2 / np.sqrt(np.sqrt(A))
127
  t_delta = p*w
128
  t_delta = 0.0625
129
- # t_delta = 0.05
130
 
131
  delta = (a+b) * t_delta
132
 
@@ -153,7 +134,6 @@ def SLID(img, segments):
153
 
154
  for l in segments:
155
  h = hash(str(l))
156
- # Initialize the line
157
  hashmap[h] = l
158
  group[h] = set([h])
159
  parents[h] = h
@@ -161,8 +141,6 @@ def SLID(img, segments):
161
  wid = l[0][0] - l[1][0]
162
  hei = l[0][1] - l[1][1]
163
 
164
- # Divide lines into more horizontal vs more vertical
165
- # to speed up comparison later
166
  if abs(wid) < abs(hei):
167
  pregroup[0].append(l)
168
  else:
@@ -172,7 +150,6 @@ def SLID(img, segments):
172
  for i in range(len(lines)):
173
  l1 = lines[i]
174
  h1 = hash(str(l1))
175
- # We're looking for the root line of each disjoint set
176
  if parents[h1] != h1:
177
  continue
178
  for j in range(i+1, len(lines)):
@@ -181,7 +158,6 @@ def SLID(img, segments):
181
  if parents[h2] != h2:
182
  continue
183
  if are_similar(l1, l2):
184
- # Merge lines into a single disjoint set
185
  union(h1, h2)
186
 
187
  for h in group:
 
 
 
 
 
1
  import numpy as np
2
  import cv2
3
 
4
 
5
  arr = np.array
 
 
 
6
  CLAHE_PARAMS = [[3, (2, 6), 5], # @1
7
  [3, (6, 2), 5], # @2
8
  [5, (3, 3), 5], # @3
 
10
 
11
 
12
  def slid_clahe(img, limit=2, grid=(3, 3), iters=5):
 
13
  img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14
  for i in range(iters):
15
  img = cv2.createCLAHE(clipLimit=limit,
 
21
 
22
 
23
  def slid_detector(img, alfa=150, beta=2):
 
24
  __lines, lines = [], cv2.HoughLinesP(img, rho=1, theta=np.pi/360*beta,
25
  threshold=40, minLineLength=50, maxLineGap=15) # [40, 40, 10]
26
  if lines is None:
 
32
 
33
 
34
  def slid_canny(img, sigma=0.25):
 
35
  v = np.median(img)
36
  img = cv2.medianBlur(img, 5)
37
  img = cv2.GaussianBlur(img, (7, 7), 2)
 
41
 
42
 
43
  def pSLID(img, thresh=150):
 
44
  segments = []
45
  i = 0
46
  for key, arr in enumerate(CLAHE_PARAMS):
 
48
  curr_segments = list(slid_detector(slid_canny(tmp), thresh))
49
  segments += curr_segments
50
  i += 1
 
51
  return segments
52
 
53
 
 
85
 
86
  def height(line, pt):
87
  v = np.cross(arr(line[1])-arr(line[0]), arr(pt)-arr(line[0]))
 
 
88
  return np.linalg.norm(v)/dist(line[1], line[0])
89
 
90
  def are_similar(l1, l2):
 
91
  a = dist(l1[0], l1[1])
92
  b = dist(l2[0], l2[1])
93
 
 
99
  if x1 < 1e-8 and x2 < 1e-8 and y1 < 1e-8 and y2 < 1e-8:
100
  return True
101
 
 
 
102
  gamma = 0.25 * (x1+x2+y1+y2)
 
103
 
104
  img_width = 500
105
  img_height = 282
 
108
  w = np.pi/2 / np.sqrt(np.sqrt(A))
109
  t_delta = p*w
110
  t_delta = 0.0625
 
111
 
112
  delta = (a+b) * t_delta
113
 
 
134
 
135
  for l in segments:
136
  h = hash(str(l))
 
137
  hashmap[h] = l
138
  group[h] = set([h])
139
  parents[h] = h
 
141
  wid = l[0][0] - l[1][0]
142
  hei = l[0][1] - l[1][1]
143
 
 
 
144
  if abs(wid) < abs(hei):
145
  pregroup[0].append(l)
146
  else:
 
150
  for i in range(len(lines)):
151
  l1 = lines[i]
152
  h1 = hash(str(l1))
 
153
  if parents[h1] != h1:
154
  continue
155
  for j in range(i+1, len(lines)):
 
158
  if parents[h2] != h2:
159
  continue
160
  if are_similar(l1, l2):
 
161
  union(h1, h2)
162
 
163
  for h in group:
train.py CHANGED
@@ -1,3 +1,9 @@
 
 
 
 
 
 
1
  from matplotlib import pyplot as plt
2
  from tensorflow.keras.preprocessing.image import ImageDataGenerator
3
  from tensorflow.keras.models import Sequential
 
1
+ # This module trains the CNN based on the labels provided in ./data/CNN
2
+ # Note that data must be first split into train, validation, and test data
3
+ # by running split_data.py.
4
+ # Reference:
5
+ # https://towardsdatascience.com/a-single-function-to-streamline-image-classification-with-keras-bd04f5cfe6df
6
+
7
  from matplotlib import pyplot as plt
8
  from tensorflow.keras.preprocessing.image import ImageDataGenerator
9
  from tensorflow.keras.models import Sequential