1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
| def ball_constraint(center, radius):
def ball(x): #x est considere un vecteur et non une liste
f=np.linalg.norm(x-c)-r**2
return f
def ball_grad(x):
df=2*(x-c)
def ball_proj(x):
proj=c+r*(x-c)/np.linalg.norm(x-c)
return ball, ball_grad,ball_proj
# Projected gradient descend method
def proj_grad(f, grad, x0, iterations, error_point, error_grad, constraint_proj, hstep):
# Memory allocation
dim = np.max(np.shape(x0)) # dimensions of the space
x_list = np.zeros([dim, iterations]) # solution at each iteration
f_list = np.zeros(iterations) # f. value at each solution point
error_point_list = np.zeros(iterations) # error of solutions
error_grad_list = np.zeros(iterations) # error of the gradient
# Initialization
x = x0
x_old = np.copy(x)
grad_x = grad(x)
d_x = -grad_x
f_x = f(x)
# Iterative optimization
for i in range(iterations):
y=x_old-hstep*grad(x_old)
b_c=ball_constraint(y,constraint_proj)
x=b_c.ball_proj(x_old)
# Log results
x_list[:, i] = np.reshape(x, -1)
f_list[i] = f_x
error_point_list[i] = np.linalg.norm(x - x_old)
error_grad_list[i] = np.linalg.norm(grad(x))
if error_point_list[i]<error_point or error_grad_list[i]<error_grad : # termination condition
break
x_old = np.copy(x)
return {'x_list': x_list[:, 0:i], 'f_list': f_list[0:i], 'error_point_list': error_point_list[0:i], 'error_grad_list': error_grad_list[0:i]} |
Partager