Spaces:
Running
Running
MilesCranmer
commited on
Commit
•
7b2d6ac
1
Parent(s):
c862e70
Split into multiple files
Browse files- README.md +7 -5
- dataset.jl +7 -0
- eureqa.jl +3 -30
- hyperparams.jl +31 -0
- paralleleureqa.jl +0 -3
README.md
CHANGED
@@ -2,15 +2,14 @@
|
|
2 |
|
3 |
You can run the performance benchmark with `./benchmark.sh`.
|
4 |
|
5 |
-
Modify the
|
6 |
(see below for options). Then, in a new Julia file called
|
7 |
-
`myfile.jl`, you can write:
|
8 |
|
9 |
```julia
|
10 |
include("paralleleureqa.jl")
|
11 |
fullRun(10,
|
12 |
npop=100,
|
13 |
-
annealing=true,
|
14 |
ncyclesperiteration=1000,
|
15 |
fractionReplaced=0.1f0,
|
16 |
verbosity=100)
|
@@ -29,7 +28,7 @@ Run it with threading turned on using:
|
|
29 |
|
30 |
## Modification
|
31 |
|
32 |
-
You can change the binary and unary operators in `
|
33 |
```julia
|
34 |
const binops = [plus, mult]
|
35 |
const unaops = [sin, cos, exp];
|
@@ -78,7 +77,10 @@ weights = [8, 1, 1, 1, 0.1, 2]
|
|
78 |
|
79 |
# TODO
|
80 |
|
81 |
-
- [ ] Explicit constant
|
|
|
|
|
|
|
82 |
- [ ] Hyperparameter tune
|
83 |
- [ ] Create a Python interface
|
84 |
- [ ] Create a benchmark for accuracy
|
|
|
2 |
|
3 |
You can run the performance benchmark with `./benchmark.sh`.
|
4 |
|
5 |
+
Modify the hyperparameters in `hyperparams.jl` and the dataset in `dataset.jl`
|
6 |
(see below for options). Then, in a new Julia file called
|
7 |
+
`myfile.jl`, or the interpreter, you can write:
|
8 |
|
9 |
```julia
|
10 |
include("paralleleureqa.jl")
|
11 |
fullRun(10,
|
12 |
npop=100,
|
|
|
13 |
ncyclesperiteration=1000,
|
14 |
fractionReplaced=0.1f0,
|
15 |
verbosity=100)
|
|
|
28 |
|
29 |
## Modification
|
30 |
|
31 |
+
You can change the binary and unary operators in `hyperparams.jl` here:
|
32 |
```julia
|
33 |
const binops = [plus, mult]
|
34 |
const unaops = [sin, cos, exp];
|
|
|
77 |
|
78 |
# TODO
|
79 |
|
80 |
+
- [ ] Explicit constant optimization on hall-of-fame
|
81 |
+
- Create method to find and return all constants, from left to right
|
82 |
+
- Create method to find and set all constants, in same order
|
83 |
+
- Pull up some optimization algorithm and add it. No need for gradients; that's a headache. Keep the package small!
|
84 |
- [ ] Hyperparameter tune
|
85 |
- [ ] Create a Python interface
|
86 |
- [ ] Create a benchmark for accuracy
|
dataset.jl
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Here is the function we want to learn (x2^2 + cos(x3) + 5)
|
2 |
+
|
3 |
+
##########################
|
4 |
+
# # Dataset to learn
|
5 |
+
const X = convert(Array{Float32, 2}, randn(100, 5)*2)
|
6 |
+
const y = convert(Array{Float32, 1}, ((cx,)->cx^2).(X[:, 2]) + cos.(X[:, 3]) .- 5)
|
7 |
+
##########################
|
eureqa.jl
CHANGED
@@ -1,35 +1,8 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
##########################
|
6 |
-
# # Allowed operators
|
7 |
-
# (Apparently using const for globals helps speed)
|
8 |
-
const binops = [plus, mult]
|
9 |
-
const unaops = [sin, cos, exp]
|
10 |
-
##########################
|
11 |
-
|
12 |
-
# How many equations to search when replacing
|
13 |
-
const ns=10;
|
14 |
-
|
15 |
-
# Here is the function we want to learn (x2^2 + cos(x3) + 5)
|
16 |
-
#
|
17 |
-
##########################
|
18 |
-
# # Dataset to learn
|
19 |
-
const X = convert(Array{Float32, 2}, randn(100, 5)*2)
|
20 |
-
const y = convert(Array{Float32, 1}, ((cx,)->cx^2).(X[:, 2]) + cos.(X[:, 3]) .- 5)
|
21 |
-
##########################
|
22 |
-
|
23 |
-
##################
|
24 |
-
# Hyperparameters
|
25 |
-
# How much to punish complexity
|
26 |
-
const parsimony = 1f-3
|
27 |
-
# How much to scale temperature by (T between 0 and 1)
|
28 |
-
const alpha = 10.0f0
|
29 |
-
const maxsize = 20
|
30 |
const maxdegree = 2
|
31 |
const actualMaxsize = maxsize + maxdegree
|
32 |
-
##################
|
33 |
|
34 |
id = (x,) -> x
|
35 |
const nuna = size(unaops)[1]
|
|
|
1 |
+
include("hyperparams.jl")
|
2 |
+
include("dataset.jl")
|
3 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
const maxdegree = 2
|
5 |
const actualMaxsize = maxsize + maxdegree
|
|
|
6 |
|
7 |
id = (x,) -> x
|
8 |
const nuna = size(unaops)[1]
|
hyperparams.jl
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Define allowed operators
|
2 |
+
plus(x::Float32, y::Float32)::Float32 = x+y
|
3 |
+
mult(x::Float32, y::Float32)::Float32 = x*y;
|
4 |
+
|
5 |
+
##########################
|
6 |
+
# # Allowed operators
|
7 |
+
# (Apparently using const for globals helps speed)
|
8 |
+
const binops = [plus, mult]
|
9 |
+
const unaops = [sin, cos, exp]
|
10 |
+
##########################
|
11 |
+
|
12 |
+
# How many equations to search when replacing
|
13 |
+
const ns=10;
|
14 |
+
|
15 |
+
##################
|
16 |
+
# Hyperparameters
|
17 |
+
# How much to punish complexity
|
18 |
+
const parsimony = 1f-3
|
19 |
+
# How much to scale temperature by (T between 0 and 1)
|
20 |
+
const alpha = 10.0f0
|
21 |
+
# Max size of an equation (too large will slow program down)
|
22 |
+
const maxsize = 20
|
23 |
+
# Whether to migrate between threads (you should)
|
24 |
+
const migration = true
|
25 |
+
# Whether to re-introduce best examples seen (helps a lot)
|
26 |
+
const hofMigration = true
|
27 |
+
# Fraction of population to replace with hall of fame
|
28 |
+
const fractionReplacedHof = 0.1f0
|
29 |
+
##################
|
30 |
+
|
31 |
+
|
paralleleureqa.jl
CHANGED
@@ -1,9 +1,6 @@
|
|
1 |
include("eureqa.jl")
|
2 |
|
3 |
const nthreads = Threads.nthreads()
|
4 |
-
const migration = true
|
5 |
-
const hofMigration = true
|
6 |
-
const fractionReplacedHof = 0.1f0
|
7 |
|
8 |
# List of the best members seen all time
|
9 |
mutable struct HallOfFame
|
|
|
1 |
include("eureqa.jl")
|
2 |
|
3 |
const nthreads = Threads.nthreads()
|
|
|
|
|
|
|
4 |
|
5 |
# List of the best members seen all time
|
6 |
mutable struct HallOfFame
|