Fixed database typo and removed unnecessary class identifier.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-14 10:10:37 -04:00
parent 00ad49a143
commit 45fb349a7d
5098 changed files with 952558 additions and 85 deletions

View file

@ -0,0 +1,372 @@
"""
Licensing:
This code is distributed under the MIT license.
Authors:
Original FORTRAN77 version of i4_sobol by Bennett Fox.
MATLAB version by John Burkardt.
PYTHON version by Corrado Chisari
Original Python version of is_prime by Corrado Chisari
Original MATLAB versions of other functions by John Burkardt.
PYTHON versions by Corrado Chisari
Original code is available from
http://people.sc.fsu.edu/~jburkardt/py_src/sobol/sobol.html
Modifications:
Wrapped into Python class [30.10.2017]
"""
import numpy as np
__all__ = ['Sobol']
class Sobol:
def __init__(self):
# Init class variables
self.atmost = None
self.dim_max = None
self.dim_num_save = None
self.initialized = None
self.lastq = None
self.log_max = None
self.maxcol = None
self.poly = None
self.recipd = None
self.seed_save = None
self.v = None
def i4_sobol_generate(self, dim_num, n, skip=1):
"""
i4_sobol_generate generates a Sobol dataset.
Parameters:
Input, integer dim_num, the spatial dimension.
Input, integer N, the number of points to generate.
Input, integer SKIP, the number of initial points to skip.
Output, real R(M,N), the points.
"""
r = np.full((n, dim_num), np.nan)
for j in range(n):
seed = j + skip
r[j, 0:dim_num], next_seed = self.i4_sobol(dim_num, seed)
return r
def i4_bit_hi1(self, n):
"""
i4_bit_hi1 returns the position of the high 1 bit base 2 in an integer.
Example:
+------+-------------+-----
| N | Binary | BIT
+------|-------------+-----
| 0 | 0 | 0
| 1 | 1 | 1
| 2 | 10 | 2
| 3 | 11 | 2
| 4 | 100 | 3
| 5 | 101 | 3
| 6 | 110 | 3
| 7 | 111 | 3
| 8 | 1000 | 4
| 9 | 1001 | 4
| 10 | 1010 | 4
| 11 | 1011 | 4
| 12 | 1100 | 4
| 13 | 1101 | 4
| 14 | 1110 | 4
| 15 | 1111 | 4
| 16 | 10000 | 5
| 17 | 10001 | 5
| 1023 | 1111111111 | 10
| 1024 | 10000000000 | 11
| 1025 | 10000000001 | 11
Parameters:
Input, integer N, the integer to be measured.
N should be nonnegative. If N is nonpositive,
the value will always be 0.
Output, integer BIT, the number of bits base 2.
"""
i = np.floor(n)
bit = 0
while i > 0:
bit += 1
i //= 2
return bit
def i4_bit_lo0(self, n):
"""
I4_BIT_LO0 returns the position of the low 0 bit base 2 in an integer.
Example:
+------+------------+----
| N | Binary | BIT
+------+------------+----
| 0 | 0 | 1
| 1 | 1 | 2
| 2 | 10 | 1
| 3 | 11 | 3
| 4 | 100 | 1
| 5 | 101 | 2
| 6 | 110 | 1
| 7 | 111 | 4
| 8 | 1000 | 1
| 9 | 1001 | 2
| 10 | 1010 | 1
| 11 | 1011 | 3
| 12 | 1100 | 1
| 13 | 1101 | 2
| 14 | 1110 | 1
| 15 | 1111 | 5
| 16 | 10000 | 1
| 17 | 10001 | 2
| 1023 | 1111111111 | 1
| 1024 | 0000000000 | 1
| 1025 | 0000000001 | 1
Parameters:
Input, integer N, the integer to be measured.
N should be nonnegative.
Output, integer BIT, the position of the low 1 bit.
"""
bit = 1
i = np.floor(n)
while i != 2 * (i // 2):
bit += 1
i //= 2
return bit
def i4_sobol(self, dim_num, seed):
"""
i4_sobol generates a new quasirandom Sobol vector with each call.
Discussion:
The routine adapts the ideas of Antonov and Saleev.
Reference:
Antonov, Saleev,
USSR Computational Mathematics and Mathematical Physics,
Volume 19, 1980, pages 252 - 256.
Paul Bratley, Bennett Fox,
Algorithm 659:
Implementing Sobol's Quasirandom Sequence Generator,
ACM Transactions on Mathematical Software,
Volume 14, Number 1, pp. 88-100, 1988.
Bennett Fox,
Algorithm 647:
Implementation and Relative Efficiency of Quasirandom
Sequence Generators,
ACM Transactions on Mathematical Software,
Volume 12, Number 4, pp. 362-376, 1986.
Ilya Sobol,
USSR Computational Mathematics and Mathematical Physics,
Volume 16, pp. 236-242, 1977.
Ilya Sobol, Levitan,
The Production of Points Uniformly Distributed in a Multidimensional
Cube (in Russian),
Preprint IPM Akad. Nauk SSSR,
Number 40, Moscow 1976.
Parameters:
Input, integer DIM_NUM, the number of spatial dimensions.
DIM_NUM must satisfy 1 <= DIM_NUM <= 40.
Input/output, integer SEED, the "seed" for the sequence.
This is essentially the index in the sequence of the quasirandom
value to be generated. On output, SEED has been set to the
appropriate next value, usually simply SEED+1.
If SEED is less than 0 on input, it is treated as though it were 0.
An input value of 0 requests the first (0th) element of the sequence.
Output, real QUASI(DIM_NUM), the next quasirandom vector.
"""
# if 'self.initialized' not in list(globals().keys()):
if self.initialized is None:
self.initialized = 0
self.dim_num_save = -1
if not self.initialized or dim_num != self.dim_num_save:
self.initialized = 1
self.dim_max = 40
self.dim_num_save = -1
self.log_max = 30
self.seed_save = -1
# Initialize (part of) V.
self.v = np.zeros((self.dim_max, self.log_max))
self.v[0:40, 0] = np.transpose([
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
self.v[2:40, 1] = np.transpose([
1, 3, 1, 3, 1, 3, 3, 1,
3, 1, 3, 1, 3, 1, 1, 3, 1, 3,
1, 3, 1, 3, 3, 1, 3, 1, 3, 1,
3, 1, 1, 3, 1, 3, 1, 3, 1, 3])
self.v[3:40, 2] = np.transpose([
7, 5, 1, 3, 3, 7, 5,
5, 7, 7, 1, 3, 3, 7, 5, 1, 1,
5, 3, 3, 1, 7, 5, 1, 3, 3, 7,
5, 1, 1, 5, 7, 7, 5, 1, 3, 3])
self.v[5:40, 3] = np.transpose([
1, 7, 9, 13, 11,
1, 3, 7, 9, 5, 13, 13, 11, 3, 15,
5, 3, 15, 7, 9, 13, 9, 1, 11, 7,
5, 15, 1, 15, 11, 5, 3, 1, 7, 9])
self.v[7:40, 4] = np.transpose([
9, 3, 27,
15, 29, 21, 23, 19, 11, 25, 7, 13, 17,
1, 25, 29, 3, 31, 11, 5, 23, 27, 19,
21, 5, 1, 17, 13, 7, 15, 9, 31, 9])
self.v[13:40, 5] = np.transpose([
37, 33, 7, 5, 11, 39, 63,
27, 17, 15, 23, 29, 3, 21, 13, 31, 25,
9, 49, 33, 19, 29, 11, 19, 27, 15, 25])
self.v[19:40, 6] = np.transpose([
13,
33, 115, 41, 79, 17, 29, 119, 75, 73, 105,
7, 59, 65, 21, 3, 113, 61, 89, 45, 107])
self.v[37:40, 7] = np.transpose([
7, 23, 39])
# Set POLY.
self.poly = [
1, 3, 7, 11, 13, 19, 25, 37, 59, 47,
61, 55, 41, 67, 97, 91, 109, 103, 115, 131,
193, 137, 145, 143, 241, 157, 185, 167, 229, 171,
213, 191, 253, 203, 211, 239, 247, 285, 369, 299]
self.atmost = 2 ** self.log_max - 1
# Find the number of bits in ATMOST.
self.maxcol = self.i4_bit_hi1(self.atmost)
# Initialize row 1 of V.
self.v[0, 0:self.maxcol] = 1
# Things to do only if the dimension changed.
if dim_num != self.dim_num_save:
self.dim_num_save = dim_num
# Initialize the remaining rows of V.
for i in range(2, dim_num + 1):
# The bits of the integer POLY(I) gives the form of
# self.polynomial I.
# Find the degree of self.polynomial I from binary encoding.
j = self.poly[i - 1]
m = 0
j //= 2
while j > 0:
j //= 2
m += 1
# Expand this bit pattern to separate
# components of the logical array INCLUD.
j = self.poly[i - 1]
includ = np.zeros(m)
for k in range(m, 0, -1):
j2 = j // 2
includ[k - 1] = (j != 2 * j2)
j = j2
# Calculate the remaining elements of row I as explained
# in Bratley and Fox, section 2.
for j in range(m + 1, self.maxcol + 1):
newv = self.v[i - 1, j - m - 1]
lseed = 1
for k in range(1, m + 1):
lseed *= 2
if includ[k - 1]:
newv = np.bitwise_xor(
int(newv),
int(lseed * self.v[i - 1, j - k - 1]))
self.v[i - 1, j - 1] = newv
# Multiply columns of V by appropriate power of 2.
lseed = 1
for j in range(self.maxcol - 1, 0, -1):
lseed *= 2
self.v[0:dim_num, j - 1] = self.v[0:dim_num, j - 1] * lseed
# RECIPD is 1/(common denominator of the elements in V).
self.recipd = 1.0 / (2 * lseed)
self.lastq = np.zeros(dim_num)
seed = int(np.floor(seed))
if seed < 0:
seed = 0
lseed = 1
if seed == 0:
self.lastq = np.zeros(dim_num)
elif seed == self.seed_save + 1:
# Find the position of the right-hand zero in SEED.
lseed = self.i4_bit_lo0(seed)
elif seed <= self.seed_save:
self.seed_save = 0
self.lastq = np.zeros(dim_num)
for seed_temp in range(int(self.seed_save), int(seed)):
lseed = self.i4_bit_lo0(seed_temp)
for i in range(1, dim_num + 1):
self.lastq[i - 1] = np.bitwise_xor(
int(self.lastq[i - 1]), int(self.v[i - 1, lseed - 1]))
lseed = self.i4_bit_lo0(seed)
elif self.seed_save + 1 < seed:
for seed_temp in range(int(self.seed_save + 1), int(seed)):
lseed = self.i4_bit_lo0(seed_temp)
for i in range(1, dim_num + 1):
self.lastq[i - 1] = np.bitwise_xor(
int(self.lastq[i - 1]), int(self.v[i - 1, lseed - 1]))
lseed = self.i4_bit_lo0(seed)
# Check that the user is not calling too many times!
if self.maxcol < lseed:
print('I4_SOBOL - Fatal error!')
print(' Too many calls!')
print(' MAXCOL = %d\n' % self.maxcol)
print(' L = %d\n' % lseed)
return
# Calculate the new components of QUASI.
quasi = np.zeros(dim_num)
for i in range(1, dim_num + 1):
quasi[i - 1] = self.lastq[i - 1] * self.recipd
self.lastq[i - 1] = np.bitwise_xor(
int(self.lastq[i - 1]), int(self.v[i - 1, lseed - 1]))
self.seed_save = seed
seed += 1
return [quasi, seed]

View file

@ -0,0 +1,661 @@
import numpy as np
import copy
class Complex:
def __init__(self, dim, func, func_args=(), symmetry=False, bounds=None,
g_cons=None, g_args=()):
self.dim = dim
self.bounds = bounds
self.symmetry = symmetry # TODO: Define the functions to be used
# here in init to avoid if checks
self.gen = 0
self.perm_cycle = 0
# Every cell is stored in a list of its generation,
# e.g., the initial cell is stored in self.H[0]
# 1st get new cells are stored in self.H[1] etc.
# When a cell is subgenerated it is removed from this list
self.H = [] # Storage structure of cells
# Cache of all vertices
self.V = VertexCache(func, func_args, bounds, g_cons, g_args)
# Generate n-cube here:
self.n_cube(dim, symmetry=symmetry)
# TODO: Assign functions to a the complex instead
if symmetry:
self.generation_cycle = 1
# self.centroid = self.C0()[-1].x
# self.C0.centroid = self.centroid
else:
self.add_centroid()
self.H.append([])
self.H[0].append(self.C0)
self.hgr = self.C0.homology_group_rank()
self.hgrd = 0 # Complex group rank differential
# self.hgr = self.C0.hg_n
# Build initial graph
self.graph_map()
self.performance = []
self.performance.append(0)
self.performance.append(0)
def __call__(self):
return self.H
def n_cube(self, dim, symmetry=False, printout=False):
"""
Generate the simplicial triangulation of the N-D hypercube
containing 2**n vertices
"""
origin = list(np.zeros(dim, dtype=int))
self.origin = origin
supremum = list(np.ones(dim, dtype=int))
self.supremum = supremum
# tuple versions for indexing
origintuple = tuple(origin)
supremumtuple = tuple(supremum)
x_parents = [origintuple]
if symmetry:
self.C0 = Simplex(0, 0, 0, self.dim) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
i_s = 0
self.perm_symmetry(i_s, x_parents, origin)
self.C0.add_vertex(self.V[supremumtuple])
else:
self.C0 = Cell(0, 0, origin, supremum) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
self.C0.add_vertex(self.V[supremumtuple])
i_parents = []
self.perm(i_parents, x_parents, origin)
if printout:
print("Initial hyper cube:")
for v in self.C0():
v.print_out()
def perm(self, i_parents, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
# Construct required iterator
iter_range = [x for x in range(self.dim) if x not in i_parents]
for i in iter_range:
i2_parents = copy.copy(i_parents)
i2_parents.append(i)
xi2 = copy.copy(xi)
xi2[i] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
# Permutate
self.perm(i2_parents, x_parents2, xi2)
def perm_symmetry(self, i_s, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
xi2 = copy.copy(xi)
xi2[i_s] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
i_s += 1
if i_s == self.dim:
return
# Permutate
self.perm_symmetry(i_s, x_parents2, xi2)
def add_centroid(self):
"""Split the central edge between the origin and supremum of
a cell and add the new vertex to the complex"""
self.centroid = list(
(np.array(self.origin) + np.array(self.supremum)) / 2.0)
self.C0.add_vertex(self.V[tuple(self.centroid)])
self.C0.centroid = self.centroid
# Disconnect origin and supremum
self.V[tuple(self.origin)].disconnect(self.V[tuple(self.supremum)])
# Connect centroid to all other vertices
for v in self.C0():
self.V[tuple(self.centroid)].connect(self.V[tuple(v.x)])
self.centroid_added = True
return
# Construct incidence array:
def incidence(self):
if self.centroid_added:
self.structure = np.zeros([2 ** self.dim + 1, 2 ** self.dim + 1],
dtype=int)
else:
self.structure = np.zeros([2 ** self.dim, 2 ** self.dim],
dtype=int)
for v in self.HC.C0():
for v2 in v.nn:
self.structure[v.index, v2.index] = 1
return
# A more sparse incidence generator:
def graph_map(self):
""" Make a list of size 2**n + 1 where an entry is a vertex
incidence, each list element contains a list of indexes
corresponding to that entries neighbors"""
self.graph = [[v2.index for v2 in v.nn] for v in self.C0()]
# Graph structure method:
# 0. Capture the indices of the initial cell.
# 1. Generate new origin and supremum scalars based on current generation
# 2. Generate a new set of vertices corresponding to a new
# "origin" and "supremum"
# 3. Connected based on the indices of the previous graph structure
# 4. Disconnect the edges in the original cell
def sub_generate_cell(self, C_i, gen):
"""Subgenerate a cell `C_i` of generation `gen` and
homology group rank `hgr`."""
origin_new = tuple(C_i.centroid)
centroid_index = len(C_i()) - 1
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Generate subcubes using every extreme vertex in C_i as a supremum
# and the centroid of C_i as the origin
H_new = [] # list storing all the new cubes split from C_i
for i, v in enumerate(C_i()[:-1]):
supremum = tuple(v.x)
H_new.append(
self.construct_hypercube(origin_new, supremum, gen, C_i.hg_n))
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
if i == centroid_index: # Break out of centroid
break
for j in connections:
C_i()[i].disconnect(C_i()[j])
# Destroy the old cell
if C_i is not self.C0: # Garbage collector does this anyway; not needed
del C_i
# TODO: Recalculate all the homology group ranks of each cell
return H_new
def split_generation(self):
"""
Run sub_generate_cell for every cell in the current complex self.gen
"""
no_splits = False # USED IN SHGO
try:
for c in self.H[self.gen]:
if self.symmetry:
# self.sub_generate_cell_symmetry(c, self.gen + 1)
self.split_simplex_symmetry(c, self.gen + 1)
else:
self.sub_generate_cell(c, self.gen + 1)
except IndexError:
no_splits = True # USED IN SHGO
self.gen += 1
return no_splits # USED IN SHGO
def construct_hypercube(self, origin, supremum, gen, hgr,
printout=False):
"""
Build a hypercube with triangulations symmetric to C0.
Parameters
----------
origin : vec
supremum : vec (tuple)
gen : generation
hgr : parent homology group rank
"""
# Initiate new cell
v_o = np.array(origin)
v_s = np.array(supremum)
C_new = Cell(gen, hgr, origin, supremum)
C_new.centroid = tuple((v_o + v_s) * .5)
# Build new indexed vertex list
V_new = []
for i, v in enumerate(self.C0()[:-1]):
v_x = np.array(v.x)
sub_cell_t1 = v_o - v_o * v_x
sub_cell_t2 = v_s * v_x
vec = sub_cell_t1 + sub_cell_t2
vec = tuple(vec)
C_new.add_vertex(self.V[vec])
V_new.append(vec)
# Add new centroid
C_new.add_vertex(self.V[C_new.centroid])
V_new.append(C_new.centroid)
# Connect new vertices #TODO: Thread into other loop; no need for V_new
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
for j in connections:
self.V[V_new[i]].connect(self.V[V_new[j]])
if printout:
print("A sub hyper cube with:")
print("origin: {}".format(origin))
print("supremum: {}".format(supremum))
for v in C_new():
v.print_out()
# Append the new cell to the to complex
self.H[gen].append(C_new)
return C_new
def split_simplex_symmetry(self, S, gen):
"""
Split a hypersimplex S into two sub simplices by building a hyperplane
which connects to a new vertex on an edge (the longest edge in
dim = {2, 3}) and every other vertex in the simplex that is not
connected to the edge being split.
This function utilizes the knowledge that the problem is specified
with symmetric constraints
The longest edge is tracked by an ordering of the
vertices in every simplices, the edge between first and second
vertex is the longest edge to be split in the next iteration.
"""
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Find new vertex.
# V_new_x = tuple((np.array(C()[0].x) + np.array(C()[1].x)) / 2.0)
s = S()
firstx = s[0].x
lastx = s[-1].x
V_new = self.V[tuple((np.array(firstx) + np.array(lastx)) / 2.0)]
# Disconnect old longest edge
self.V[firstx].disconnect(self.V[lastx])
# Connect new vertices to all other vertices
for v in s[:]:
v.connect(self.V[V_new.x])
# New "lower" simplex
S_new_l = Simplex(gen, S.hg_n, self.generation_cycle,
self.dim)
S_new_l.add_vertex(s[0])
S_new_l.add_vertex(V_new) # Add new vertex
for v in s[1:-1]: # Add all other vertices
S_new_l.add_vertex(v)
# New "upper" simplex
S_new_u = Simplex(gen, S.hg_n, S.generation_cycle, self.dim)
# First vertex on new long edge
S_new_u.add_vertex(s[S_new_u.generation_cycle + 1])
for v in s[1:-1]: # Remaining vertices
S_new_u.add_vertex(v)
for k, v in enumerate(s[1:-1]): # iterate through inner vertices
if k == S.generation_cycle:
S_new_u.add_vertex(V_new)
else:
S_new_u.add_vertex(v)
S_new_u.add_vertex(s[-1]) # Second vertex on new long edge
self.H[gen].append(S_new_l)
self.H[gen].append(S_new_u)
return
# Plots
def plot_complex(self):
"""
Here, C is the LIST of simplexes S in the
2- or 3-D complex
To plot a single simplex S in a set C, use e.g., [C[0]]
"""
from matplotlib import pyplot # type: ignore[import]
if self.dim == 2:
pyplot.figure()
for C in self.H:
for c in C:
for v in c():
if self.bounds is None:
x_a = np.array(v.x, dtype=float)
else:
x_a = np.array(v.x, dtype=float)
for i in range(len(self.bounds)):
x_a[i] = (x_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('v.x_a = {}'.format(x_a))
pyplot.plot([x_a[0]], [x_a[1]], 'o')
xlines = []
ylines = []
for vn in v.nn:
if self.bounds is None:
xn_a = np.array(vn.x, dtype=float)
else:
xn_a = np.array(vn.x, dtype=float)
for i in range(len(self.bounds)):
xn_a[i] = (xn_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('vn.x = {}'.format(vn.x))
xlines.append(xn_a[0])
ylines.append(xn_a[1])
xlines.append(x_a[0])
ylines.append(x_a[1])
pyplot.plot(xlines, ylines)
if self.bounds is None:
pyplot.ylim([-1e-2, 1 + 1e-2])
pyplot.xlim([-1e-2, 1 + 1e-2])
else:
pyplot.ylim(
[self.bounds[1][0] - 1e-2, self.bounds[1][1] + 1e-2])
pyplot.xlim(
[self.bounds[0][0] - 1e-2, self.bounds[0][1] + 1e-2])
pyplot.show()
elif self.dim == 3:
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
for C in self.H:
for c in C:
for v in c():
x = []
y = []
z = []
# logging.info('v.x = {}'.format(v.x))
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
for vn in v.nn:
x.append(vn.x[0])
y.append(vn.x[1])
z.append(vn.x[2])
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
# logging.info('vn.x = {}'.format(vn.x))
ax.plot(x, y, z, label='simplex')
pyplot.show()
else:
print("dimension higher than 3 or wrong complex format")
return
class VertexGroup(object):
def __init__(self, p_gen, p_hgr):
self.p_gen = p_gen # parent generation
self.p_hgr = p_hgr # parent homology group rank
self.hg_n = None
self.hg_d = None
# Maybe add parent homology group rank total history
# This is the sum off all previously split cells
# cumulatively throughout its entire history
self.C = []
def __call__(self):
return self.C
def add_vertex(self, V):
if V not in self.C:
self.C.append(V)
def homology_group_rank(self):
"""
Returns the homology group order of the current cell
"""
if self.hg_n is None:
self.hg_n = sum(1 for v in self.C if v.minimiser())
return self.hg_n
def homology_group_differential(self):
"""
Returns the difference between the current homology group of the
cell and its parent group
"""
if self.hg_d is None:
self.hgd = self.hg_n - self.p_hgr
return self.hgd
def polytopial_sperner_lemma(self):
"""
Returns the number of stationary points theoretically contained in the
cell based information currently known about the cell
"""
pass
def print_out(self):
"""
Print the current cell to console
"""
for v in self():
v.print_out()
class Cell(VertexGroup):
"""
Contains a cell that is symmetric to the initial hypercube triangulation
"""
def __init__(self, p_gen, p_hgr, origin, supremum):
super(Cell, self).__init__(p_gen, p_hgr)
self.origin = origin
self.supremum = supremum
self.centroid = None # (Not always used)
# TODO: self.bounds
class Simplex(VertexGroup):
"""
Contains a simplex that is symmetric to the initial symmetry constrained
hypersimplex triangulation
"""
def __init__(self, p_gen, p_hgr, generation_cycle, dim):
super(Simplex, self).__init__(p_gen, p_hgr)
self.generation_cycle = (generation_cycle + 1) % (dim - 1)
class Vertex:
def __init__(self, x, bounds=None, func=None, func_args=(), g_cons=None,
g_cons_args=(), nn=None, index=None):
self.x = x
self.order = sum(x)
x_a = np.array(x, dtype=float)
if bounds is not None:
for i, (lb, ub) in enumerate(bounds):
x_a[i] = x_a[i] * (ub - lb) + lb
# TODO: Make saving the array structure optional
self.x_a = x_a
# Note Vertex is only initiated once for all x so only
# evaluated once
if func is not None:
self.feasible = True
if g_cons is not None:
for g, args in zip(g_cons, g_cons_args):
if g(self.x_a, *args) < 0.0:
self.f = np.inf
self.feasible = False
break
if self.feasible:
self.f = func(x_a, *func_args)
if nn is not None:
self.nn = nn
else:
self.nn = set()
self.fval = None
self.check_min = True
# Index:
if index is not None:
self.index = index
def __hash__(self):
return hash(self.x)
def connect(self, v):
if v is not self and v not in self.nn:
self.nn.add(v)
v.nn.add(self)
if self.minimiser():
v._min = False
v.check_min = False
# TEMPORARY
self.check_min = True
v.check_min = True
def disconnect(self, v):
if v in self.nn:
self.nn.remove(v)
v.nn.remove(self)
self.check_min = True
v.check_min = True
def minimiser(self):
"""Check whether this vertex is strictly less than all its neighbors"""
if self.check_min:
self._min = all(self.f < v.f for v in self.nn)
self.check_min = False
return self._min
def print_out(self):
print("Vertex: {}".format(self.x))
constr = 'Connections: '
for vc in self.nn:
constr += '{} '.format(vc.x)
print(constr)
print('Order = {}'.format(self.order))
class VertexCache:
def __init__(self, func, func_args=(), bounds=None, g_cons=None,
g_cons_args=(), indexed=True):
self.cache = {}
self.func = func
self.g_cons = g_cons
self.g_cons_args = g_cons_args
self.func_args = func_args
self.bounds = bounds
self.nfev = 0
self.size = 0
if indexed:
self.index = -1
def __getitem__(self, x, indexed=True):
try:
return self.cache[x]
except KeyError:
if indexed:
self.index += 1
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args,
index=self.index)
else:
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args)
# logging.info("New generated vertex at x = {}".format(x))
# NOTE: Surprisingly high performance increase if logging is commented out
self.cache[x] = xval
# TODO: Check
if self.func is not None:
if self.g_cons is not None:
if xval.feasible:
self.nfev += 1
self.size += 1
else:
self.size += 1
else:
self.nfev += 1
self.size += 1
return self.cache[x]