shaw commited on
Commit
381c8b9
2 Parent(s): 0b18231 fb24eda

Merge pull request #4 from blindcrone/webend

Browse files
Files changed (8) hide show
  1. list.sh +6 -0
  2. main.py +1 -1
  3. nerf/renderer.py +4 -1
  4. obj2glb.py +11 -0
  5. server.cpp +166 -0
  6. threadpool.h +145 -0
  7. train.sh +3 -0
  8. upload.sh +6 -0
list.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ export AWS_SECRET_ACCESS_KEY=$(cat .env.local | grep AWS_SECRET | cut -d "\"" -f 2)
3
+ export AWS_ACCESS_KEY=$(cat .env.local | grep AWS_ACCESS | cut -d "\"" -f 2)
4
+ aws s3 ls models.webaverse.com | grep glb | cut -d " " -f 6 | cut -d "." -f 1
5
+
6
+
main.py CHANGED
@@ -157,4 +157,4 @@ if __name__ == '__main__':
157
  trainer.test(test_loader)
158
 
159
  if opt.save_mesh:
160
- trainer.save_mesh(resolution=256)
 
157
  trainer.test(test_loader)
158
 
159
  if opt.save_mesh:
160
+ trainer.save_mesh(resolution=256)
nerf/renderer.py CHANGED
@@ -296,8 +296,11 @@ class NeRFRenderer(nn.Module):
296
  fp.write(f'Ns 0.000000 \n')
297
  fp.write(f'map_Kd {name}albedo.png \n')
298
 
 
 
299
  _export(v, f)
300
 
 
301
  def run(self, rays_o, rays_d, num_steps=128, upsample_steps=128, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, **kwargs):
302
  # rays_o, rays_d: [B, N, 3], assumes B == 1
303
  # bg_color: [BN, 3] in range [0, 1]
@@ -642,4 +645,4 @@ class NeRFRenderer(nn.Module):
642
  else:
643
  results = _run(rays_o, rays_d, **kwargs)
644
 
645
- return results
 
296
  fp.write(f'Ns 0.000000 \n')
297
  fp.write(f'map_Kd {name}albedo.png \n')
298
 
299
+ os.system(f"blender -b -P obj2glb.py")
300
+
301
  _export(v, f)
302
 
303
+
304
  def run(self, rays_o, rays_d, num_steps=128, upsample_steps=128, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, **kwargs):
305
  # rays_o, rays_d: [B, N, 3], assumes B == 1
306
  # bg_color: [BN, 3] in range [0, 1]
 
645
  else:
646
  results = _run(rays_o, rays_d, **kwargs)
647
 
648
+ return results
obj2glb.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # using blender, open an input.obj file, and export it as output.glb
2
+ import bpy
3
+ # import input.obj file
4
+ bpy.ops.import_scene.obj(filepath='trial/mesh/mesh.obj')
5
+
6
+ # select all
7
+ bpy.ops.object.select_all(action='SELECT')
8
+
9
+ # export output.glb file
10
+ bpy.ops.export_scene.gltf(filepath="model.glb")
11
+ print('done')
server.cpp ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <cstdio>
2
+ #include <iostream>
3
+ #include <algorithm>
4
+ #include <memory>
5
+ #include <string>
6
+ #include <thread>
7
+ #include <array>
8
+ #include <queue>
9
+ #include <mutex>
10
+ #include <vector>
11
+ #include <filesystem>
12
+ #include <crow.h>
13
+ #include <ranges>
14
+ #include "threadpool.h"
15
+ #include <sstream>
16
+ #include <chrono>
17
+ #include <unordered_map>
18
+ #define CROW_MAIN
19
+
20
+ using std::string;
21
+ using std::mutex;
22
+ using std::lock_guard;
23
+ using std::make_shared;
24
+ using std::queue;
25
+ using std::vector;
26
+ namespace fs = std::filesystem;
27
+ namespace rv = std::ranges::views;
28
+
29
+ constexpr string to_st(const auto& i){ std::stringstream ss; ss << i; return ss.str(); }
30
+ template<typename T>
31
+ constexpr T st_to(const string& s){
32
+ T t(0);
33
+ std::stringstream ss(s);
34
+ ss >> t;
35
+ return t;
36
+ }
37
+
38
+ static inline string uid(const std::string& s){
39
+ std::stringstream ss;
40
+ std::hash<string> h;
41
+ const auto t0 = std::chrono::system_clock::now();
42
+ ss << s << '|' << std::chrono::duration_cast<std::chrono::nanoseconds>(t0.time_since_epoch()).count();
43
+ return to_st(h(ss.str()));
44
+ }
45
+
46
+ static inline string exec(const char* cmd) {
47
+ std::array<char, 128> buffer;
48
+ std::unique_ptr<FILE, decltype(&pclose)> pipe(popen(cmd, "r"), pclose);
49
+ string result;
50
+ if (!pipe)
51
+ return "Command failed";
52
+ else {
53
+ while (fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr)
54
+ result += buffer.data();
55
+ }
56
+ return result;
57
+ }
58
+
59
+ constexpr auto reify(std::ranges::range auto&& r){ return vector(r.begin(), r.end()); }
60
+
61
+ constexpr string strip(const string& s){ return s; }
62
+
63
+ constexpr string strip(string s, char ch){
64
+ s.erase(std::remove_if(s.begin(), s.end(), [=](char c){ return c == ch; }), s.end());
65
+ return s;
66
+ }
67
+
68
+ constexpr string strip(string s, char ch, auto... chs){
69
+ return strip(strip(s, ch), chs...);
70
+ }
71
+
72
+ constexpr vector<string> splitOn(const string& s, const string& delim){
73
+ vector<string> ret;
74
+ long long int start = 0;
75
+ for(size_t dp = s.find_first_of(delim, 0); start >= 0
76
+ ; start = (dp == std::string::npos) ? -1 : dp + 1, dp = s.find_first_of(delim, start))
77
+ if(auto n = s.substr(start, dp - start); !n.empty())
78
+ ret.emplace_back(n);
79
+ return ret;
80
+ }
81
+
82
+ template <typename T>
83
+ constexpr auto q_to_v(queue<T> qcopy){
84
+ vector<T> v;
85
+ v.reserve(qcopy.size());
86
+ while(!qcopy.empty())
87
+ v.push_back(qcopy.front()), qcopy.pop();
88
+ return v;
89
+ }
90
+
91
+ int main(){
92
+ crow::SimpleApp app;
93
+ typedef std::array<string, 2> guy;
94
+ auto commissions = make_shared<queue<guy>>();
95
+ auto queue_mutex = make_shared<mutex>()
96
+ , train_mutex = make_shared<mutex>();
97
+ auto pool = make_shared<threadpool<>>(avail_threads() / 2);
98
+
99
+ auto run = [=](const string& cmd){
100
+ CROW_LOG_INFO << "running \'" << cmd;
101
+ return exec(cmd.c_str());
102
+ };
103
+
104
+ auto poppe = [=](){
105
+ lock_guard<mutex> qlock(*queue_mutex);
106
+ commissions->pop();
107
+ CROW_LOG_INFO << commissions->size() << " left in queue";
108
+ };
109
+
110
+ auto training_loop = [=](){
111
+ lock_guard<mutex> lock(*train_mutex);
112
+ while(!commissions->empty()){
113
+ auto& [id, prompt] = commissions->front();
114
+ CROW_LOG_INFO << "Launched training for prompt: " + prompt;
115
+ run(string("sh train.sh \"") + prompt + "\"");
116
+ CROW_LOG_INFO << run(string("sh upload.sh ") + id);
117
+ CROW_LOG_INFO << "Finished training for prompt: " + prompt;
118
+ poppe();
119
+ }
120
+ };
121
+
122
+ auto enqueue = [=](const guy& thing){
123
+ lock_guard<mutex> lock(*queue_mutex);
124
+ commissions->push(thing);
125
+ auto& [name, prompt] = thing;
126
+ CROW_LOG_INFO << name << " queued with prompt: " << prompt;
127
+ };
128
+
129
+ CROW_ROUTE(app, "/create")([=](const crow::request& req){
130
+ crow::json::wvalue ret;
131
+ if(auto prompt = req.url_params.get("prompt"); prompt == nullptr){
132
+ CROW_LOG_INFO << "No prompt specified";
133
+ ret["error"] = "No prompt given";
134
+ } else {
135
+ CROW_LOG_INFO << prompt << " commissioned";
136
+ auto id = uid(prompt);
137
+ enqueue({to_st(id), strip(prompt, '\'', '\"')});
138
+ pool->enqueue(training_loop);
139
+ CROW_LOG_INFO << "Launched training loop";
140
+ ret["id"] = id;
141
+ }
142
+ return ret;
143
+ });
144
+
145
+ CROW_ROUTE(app, "/list")([=](){
146
+ auto l = splitOn(run("sh list.sh"), "\n");
147
+ crow::json::wvalue ret;
148
+ for(int k = 0; auto& [i, p] : q_to_v(*commissions))
149
+ ret["pending"][k++] = {{ "id", i }, { "prompt", p}};
150
+ ret["finished"] = l;
151
+ return ret;
152
+ });
153
+
154
+ CROW_ROUTE(app, "/get/<string>")([=](const crow::request& req, crow::response& res, const string n){
155
+ if(auto l = reify( splitOn(run("sh list.sh"), "\n") | rv::filter([=](const string& s){ return s == n; })); !l.empty())
156
+ res.redirect(string("https://s3.us-west-2.amazonaws.com/models.webaverse.com/") + n + ".glb");
157
+ else if(auto q = reify( q_to_v(*commissions)
158
+ | rv::filter([=](const guy& i){ return i[0] == n; })); !q.empty())
159
+ res.code = 209;
160
+ else
161
+ res.code = 404;
162
+ res.end();
163
+ });
164
+
165
+ app.port(80).run();
166
+ }
threadpool.h ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <atomic>
3
+ #include <condition_variable>
4
+ #include <functional>
5
+ #include <future>
6
+ #include <mutex>
7
+ #include <queue>
8
+ #include <utility>
9
+ #include <vector>
10
+ #include <concepts>
11
+ #include <thread>
12
+ static inline size_t avail_threads(){
13
+ return std::thread::hardware_concurrency();
14
+ }
15
+
16
+ template <typename F, typename... Args>
17
+ using irt = std::invoke_result_t<F, Args...>;
18
+
19
+ template <typename F, typename... Args>
20
+ static inline auto taskify(const F& funk, Args... args){
21
+ return std::bind(funk, std::forward<Args...>(args)...);
22
+ }
23
+
24
+ static inline auto promitask(const std::invocable<> auto& funk){
25
+ typedef irt<decltype(funk)> ret;
26
+ auto pr = std::make_shared<std::promise<ret>>();
27
+ if constexpr (std::same_as<irt<decltype(funk)>, void>)
28
+ return make_pair([=](){ funk(), pr->set_value(); }, pr);
29
+ else
30
+ return make_pair([=](){ pr->set_value(funk()); }, pr);
31
+ }
32
+
33
+ static inline auto await_many(const std::ranges::range auto& fu){
34
+ std::for_each(fu.begin(), fu.end(), [](auto&& f){ f.wait(); });
35
+ }
36
+
37
+ typedef std::function<void()> tasque;
38
+
39
+ template <typename Thrd = std::jthread>
40
+ class threadpool {
41
+ /// If true the queue thread should exit
42
+ std::atomic<bool> done;
43
+
44
+ /// The thread object associated with this queue
45
+ std::vector<Thrd> queue_threads;
46
+ /// A queue of functions that will be executed on the queue thread
47
+ std::queue<tasque> work_queue;
48
+
49
+ /// The mutex used in the condition variable
50
+ std::mutex queue_mutex;
51
+
52
+ /// The condition variable that waits for a new function to be inserted in the
53
+ /// queue
54
+ std::condition_variable cond;
55
+
56
+ /// This funciton executes on the queue_thread
57
+ void queue_runner() {
58
+ while (!done) {
59
+ tasque func;
60
+ {
61
+ std::unique_lock<std::mutex> lock(queue_mutex);
62
+ cond.wait( lock
63
+ , [this]() { return work_queue.empty() == false || done; });
64
+
65
+ if (!done){
66
+ swap(func, work_queue.front());
67
+ work_queue.pop();
68
+ }
69
+ }
70
+ if (func) func();
71
+ }
72
+ }
73
+
74
+ void qup(const std::invocable<> auto& f){
75
+ std::lock_guard<std::mutex> lock(queue_mutex);
76
+ work_queue.push(f);
77
+ cond.notify_one();
78
+ }
79
+
80
+ public:
81
+ template <typename F, typename... Args>
82
+ void enqueue(const F& func, Args... args) requires std::invocable<F, Args...> {
83
+ qup(taskify(func, args...));
84
+ }
85
+
86
+ template <typename F, typename... Args>
87
+ auto inquire(const F& func, Args... args) requires std::invocable<F, Args...> {
88
+ auto [t, pr] = promitask(taskify(func, args...));
89
+ auto fut = pr->get_future();
90
+ enqueue(t);
91
+ return fut;
92
+ }
93
+
94
+ void clear() {
95
+ {
96
+ std::lock_guard<std::mutex> lock(queue_mutex);
97
+ while(!work_queue.empty())
98
+ work_queue.pop();
99
+ }
100
+ sync();
101
+ }
102
+
103
+ void sync(){
104
+ std::atomic<size_t> n(0);
105
+ const size_t m = queue_threads.size();
106
+ auto present = [&](){ ++n; size_t l = n.load(); while(l < m) l = n.load(); };
107
+ std::vector<std::future<void>> fu;
108
+ std::ranges::generate_n(std::back_inserter(fu), m, [=, this](){ return inquire(present); });
109
+ await_many(fu);
110
+ }
111
+
112
+ threadpool(size_t n, size_t res) : done(false)
113
+ , queue_threads(n ? std::clamp(n, size_t(1), avail_threads() - res)
114
+ : std::max(size_t(1), avail_threads() - res)) {
115
+ for(auto& i:queue_threads){
116
+ Thrd tmp(&threadpool::queue_runner, this);
117
+ std::swap(i, tmp);
118
+ }
119
+ }
120
+ threadpool(size_t n) : threadpool(n, 0) {}
121
+ threadpool() : threadpool(0, 1) {}
122
+
123
+ ~threadpool() {
124
+ sync();
125
+ done.store(true);
126
+ cond.notify_all();
127
+ }
128
+
129
+ threadpool(const threadpool& other) : work_queue(other.work_queue), done(false) {
130
+ for(auto& i:queue_threads){
131
+ Thrd tmp(&threadpool::queue_runner, this);
132
+ std::swap(i, tmp);
133
+ }
134
+ }
135
+
136
+ threadpool& operator=(const threadpool& other){
137
+ clear();
138
+ work_queue = other.work_queue;
139
+ return *this;
140
+ }
141
+ size_t size() const { return queue_threads.size(); }
142
+ threadpool& operator=(threadpool&& other) = default;
143
+ threadpool(threadpool&& other) = default;
144
+ };
145
+
train.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #!/bin/bash
2
+ rm -rf trial/checkpoints/*
3
+ python main.py --save_mesh --text "$1" --workspace trial -O
upload.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ export AWS_SECRET_ACCESS_KEY=$(cat .env.local | grep AWS_SECRET | cut -d "\"" -f 2)
3
+ export AWS_ACCESS_KEY=$(cat .env.local | grep AWS_ACCESS | cut -d "\"" -f 2)
4
+ aws s3 cp model.glb 's3://models.webaverse.com/'$1'.glb'
5
+ rm model.glb
6
+