LanHarmony commited on
Commit
fa02329
1 Parent(s): 69af532

introduce control net from diffusers

Browse files
Files changed (3) hide show
  1. app.py +0 -44
  2. image/placeholder.txt +0 -0
  3. visual_foundation_models.py +2 -13
app.py CHANGED
@@ -42,24 +42,7 @@ Since Visual ChatGPT is a text language model, Visual ChatGPT must use tools to
42
  The thoughts and observations are only visible for Visual ChatGPT, Visual ChatGPT should remember to repeat important information in the final response for Human.
43
  Thought: Do I need to use a tool? {agent_scratchpad}"""
44
 
45
- import subprocess
46
-
47
- def execute_cmd(cmd):
48
- output = subprocess.check_output(cmd, shell=True)
49
- return output
50
-
51
- execute_cmd('ln -s ControlNet/ldm ./ldm')
52
- execute_cmd('ln -s ControlNet/cldm ./cldm')
53
- execute_cmd('ln -s ControlNet/annotator ./annotator')
54
- print(execute_cmd('nvidia-smi'))
55
- print(execute_cmd('nvcc -V'))
56
-
57
- from diffusers import StableDiffusionPipeline
58
- from diffusers import StableDiffusionInpaintPipeline
59
- from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
60
  from visual_foundation_models import *
61
- from omegaconf import OmegaConf
62
- from ldm.util import instantiate_from_config
63
  from langchain.agents.initialize import initialize_agent
64
  from langchain.agents.tools import Tool
65
  from langchain.chains.conversation.memory import ConversationBufferMemory
@@ -68,10 +51,6 @@ from langchain.vectorstores import Weaviate
68
  import re
69
  import gradio as gr
70
 
71
- try:
72
- os.mkdir('./image')
73
- except OSError as error:
74
- print(error)
75
 
76
  def cut_dialogue_history(history_memory, keep_last_n_words=500):
77
  tokens = history_memory.split()
@@ -87,29 +66,6 @@ def cut_dialogue_history(history_memory, keep_last_n_words=500):
87
  paragraphs = paragraphs[1:]
88
  return '\n' + '\n'.join(paragraphs)
89
 
90
- def get_new_image_name(org_img_name, func_name="update"):
91
- head_tail = os.path.split(org_img_name)
92
- head = head_tail[0]
93
- tail = head_tail[1]
94
- name_split = tail.split('.')[0].split('_')
95
- this_new_uuid = str(uuid.uuid4())[0:4]
96
- if len(name_split) == 1:
97
- most_org_file_name = name_split[0]
98
- recent_prev_file_name = name_split[0]
99
- new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name)
100
- else:
101
- assert len(name_split) == 4
102
- most_org_file_name = name_split[3]
103
- recent_prev_file_name = name_split[0]
104
- new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name)
105
- return os.path.join(head, new_file_name)
106
-
107
- def create_model(config_path, device):
108
- config = OmegaConf.load(config_path)
109
- OmegaConf.update(config, "model.params.cond_stage_config.params.device", device)
110
- model = instantiate_from_config(config.model).cpu()
111
- print(f'Loaded model config from [{config_path}]')
112
- return model
113
 
114
  class ConversationBot:
115
  def __init__(self):
42
  The thoughts and observations are only visible for Visual ChatGPT, Visual ChatGPT should remember to repeat important information in the final response for Human.
43
  Thought: Do I need to use a tool? {agent_scratchpad}"""
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  from visual_foundation_models import *
 
 
46
  from langchain.agents.initialize import initialize_agent
47
  from langchain.agents.tools import Tool
48
  from langchain.chains.conversation.memory import ConversationBufferMemory
51
  import re
52
  import gradio as gr
53
 
 
 
 
 
54
 
55
  def cut_dialogue_history(history_memory, keep_last_n_words=500):
56
  tokens = history_memory.split()
66
  paragraphs = paragraphs[1:]
67
  return '\n' + '\n'.join(paragraphs)
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  class ConversationBot:
71
  def __init__(self):
image/placeholder.txt ADDED
File without changes
visual_foundation_models.py CHANGED
@@ -1,6 +1,3 @@
1
- import os
2
-
3
- import diffusers.utils
4
  from diffusers import StableDiffusionPipeline
5
  from diffusers import StableDiffusionInpaintPipeline
6
  from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
@@ -10,23 +7,15 @@ from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector
10
  from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation
11
  from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
12
  from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
13
- # from ldm.util import instantiate_from_config
14
- # from ControlNet.cldm.model import create_model, load_state_dict
15
- # from ControlNet.cldm.ddim_hacked import DDIMSampler
16
- # from ControlNet.annotator.canny import CannyDetector
17
- # from ControlNet.annotator.mlsd import MLSDdetector
18
- # from ControlNet.annotator.hed import HEDdetector, nms
19
- # from ControlNet.annotator.openpose import OpenposeDetector
20
- # from ControlNet.annotator.uniformer import UniformerDetector
21
- # from ControlNet.annotator.midas import MidasDetector
22
  from PIL import Image
23
  import torch
24
  import numpy as np
25
  import uuid
26
- import einops
27
  from pytorch_lightning import seed_everything
28
  import cv2
29
  import random
 
30
 
31
  def ade_palette():
32
  return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
 
 
 
1
  from diffusers import StableDiffusionPipeline
2
  from diffusers import StableDiffusionInpaintPipeline
3
  from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
7
  from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation
8
  from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
9
  from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
10
+
 
 
 
 
 
 
 
 
11
  from PIL import Image
12
  import torch
13
  import numpy as np
14
  import uuid
 
15
  from pytorch_lightning import seed_everything
16
  import cv2
17
  import random
18
+ import os
19
 
20
  def ade_palette():
21
  return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],