phyloforfun commited on
Commit
a10b606
1 Parent(s): 6b88c8f

fix safety check

Browse files
vouchervision/OCR_google_cloud_vision.py CHANGED
@@ -809,42 +809,42 @@ class SafetyCheck():
809
  return credentials
810
 
811
  def check_for_inappropriate_content(self, file_stream):
812
- # try:
813
- LEVEL = 2
814
- # content = file_stream.read()
815
- file_stream.seek(0) # Reset file stream position to the beginning
816
- content = file_stream.read()
817
- image = vision.Image(content=content)
818
- response = self.client.safe_search_detection(image=image)
819
- safe = response.safe_search_annotation
820
-
821
- likelihood_name = (
822
- "UNKNOWN",
823
- "VERY_UNLIKELY",
824
- "UNLIKELY",
825
- "POSSIBLE",
826
- "LIKELY",
827
- "VERY_LIKELY",
828
- )
829
- print("Safe search:")
830
-
831
- print(f" adult*: {likelihood_name[safe.adult]}")
832
- print(f" medical*: {likelihood_name[safe.medical]}")
833
- print(f" spoofed: {likelihood_name[safe.spoof]}")
834
- print(f" violence*: {likelihood_name[safe.violence]}")
835
- print(f" racy: {likelihood_name[safe.racy]}")
836
-
837
- # Check the levels of adult, violence, racy, etc. content.
838
- if (safe.adult > LEVEL or
839
- safe.medical > LEVEL or
840
- # safe.spoof > LEVEL or
841
- safe.violence > LEVEL #or
842
- # safe.racy > LEVEL
843
- ):
844
- print("Found violation")
845
- return True # The image violates safe search guidelines.
846
-
847
- print("Found NO violation")
848
- return False # The image is considered safe.
849
- # except:
850
- # return False # The image is considered safe. TEMPOROARY FIX TODO
 
809
  return credentials
810
 
811
  def check_for_inappropriate_content(self, file_stream):
812
+ try:
813
+ LEVEL = 2
814
+ # content = file_stream.read()
815
+ file_stream.seek(0) # Reset file stream position to the beginning
816
+ content = file_stream.read()
817
+ image = vision.Image(content=content)
818
+ response = self.client.safe_search_detection(image=image)
819
+ safe = response.safe_search_annotation
820
+
821
+ likelihood_name = (
822
+ "UNKNOWN",
823
+ "VERY_UNLIKELY",
824
+ "UNLIKELY",
825
+ "POSSIBLE",
826
+ "LIKELY",
827
+ "VERY_LIKELY",
828
+ )
829
+ print("Safe search:")
830
+
831
+ print(f" adult*: {likelihood_name[safe.adult]}")
832
+ print(f" medical*: {likelihood_name[safe.medical]}")
833
+ print(f" spoofed: {likelihood_name[safe.spoof]}")
834
+ print(f" violence*: {likelihood_name[safe.violence]}")
835
+ print(f" racy: {likelihood_name[safe.racy]}")
836
+
837
+ # Check the levels of adult, violence, racy, etc. content.
838
+ if (safe.adult > LEVEL or
839
+ safe.medical > LEVEL or
840
+ # safe.spoof > LEVEL or
841
+ safe.violence > LEVEL #or
842
+ # safe.racy > LEVEL
843
+ ):
844
+ print("Found violation")
845
+ return True # The image violates safe search guidelines.
846
+
847
+ print("Found NO violation")
848
+ return False # The image is considered safe.
849
+ except:
850
+ return False # The image is considered safe. TEMPOROARY FIX TODO