Take the 2-minute tour ×
Code Review Stack Exchange is a question and answer site for peer programmer code reviews. It's 100% free, no registration required.

What I have here is an implementation of an optimization algorithm called the covariance matrix adaptation evolutionary strategy. I am using this algorithm to optimize the position of wind turbines inside a wind farm, but if I place more than a certain number of turbines (equivalent to a high value of 'N' at the beginning of the code) after around 50 iterations of the while loop my RAM memory is filled. I have tried using deletion of certain variables after they are being used and using slots in my class declaration but to no noticeable effect. Could you take a look at the code and tell me if there is something very memory consuming that I'm not seeing?

This is a stand-alone, coupled with the rosenbrock function to optimize.

def frosenbrock(x):
    x1=np.delete(x,len(x)-1);
    x2=np.delete(x,0);
    f = 100*sum(((x1**2) - (x2))**2) + sum((x1-1)**2);
    return f;

def optimise_CMA(self,initial_guess):   
    #--------------INITIALIZATION----------------------
    N=20 #Number of variables, problem dimension
    #Container for coordinates called xmean
    xmean=[]                                  #Objective variables (coordinates)initial point
    for i in range(int(N)):
        xmean.append([random.uniform(0,1)]);
    xmean=np.asarray(xmean,dtype=np.float_)
    sigma=0.5                              #Coordinate wise standard deviation
    stopfitness = 1e-10                    #stop if fitness < stopfitness (minimization)
    stopeval =1e3*N**2                     #stop after this number of evaluations ()

    #Strategy parameter setting: Selection
    lambd=int(4.0+floor(3.0*log(N)))                #population size, offspring number
    mu=(lambd/2)                           #number of parents/points for recombination
    weights=np.array([])
    for j in range(mu):
        weights=np.append(weights,log(mu+0.5)-log(j+1))#muXone array for weighted recombination
    mu = floor(mu)
    weights=weights/sum(weights)
    mueff=sum(weights)**2/sum(weights**2)
    weights_array=weights.reshape([mu,1])  

    #Strategy parameter setting: Adaptation
    cc=(4+mueff/N)/(N+4 + 2*mueff/N)      #time constant for cumulation for C
    cs=(mueff+2)/(N+mueff+5)              #t-const for cumulation for sigma control 
    c1=2/((N+1.3)**2+mueff)               #learning rate for rank-one update of C
    cmu=min(1-c1,2*(mueff-2+1/mueff)/((N+2)**2+mueff)) #and for rank-mu update
    damps =1 +2*max(0,sqrt((mueff-1)/(N+1))-1)+cs  #damping for sigma, usually close to 1

    #Initialize dynamic (internal) strategy parameters and constants
    pc=np.zeros((N,1),dtype=np.float_)                      #evolution path for C  
    ps=np.zeros((N,1),dtype=np.float_)                      #evolution path for sigma 
    D=np.ones((N,1),dtype=np.float_)                        #diagonal D defines the scaling  
    B =np.eye(N,dtype=np.float_)                #B defines the coordinate system
    D_sqd=D**2           
    diag_D_sqd=(np.diag(D_sqd[:,0]))       #Generate diagonal matrix with D_sqd in the diagonal, the rest are zeros 
    trans_B=B.transpose()                 #Tranpose matrix of B 
    C=np.dot(np.dot(B,diag_D_sqd),trans_B)#Covariance matrix C
    nega_D=D**(-1)                        #Elementwise to power -1 
    diag_D_neg=(np.diag(nega_D[:,0]))     #Generate diagonal matrix with nega_D in diagonal, rest zeros
    invsqrtC =np.dot(np.dot(B,diag_D_neg),trans_B)#C^-1/2 
    eigeneval = 0                         #track update of B and D
    chiN=N**0.5*(1-1/(4*N)+1/(21*N**2))   #expectation of ||N(0,I)|| == norm(randn(N,1)) 

    #---------GENERATION LOOP---------------
    counteval=0
    arx=[]
    while counteval<stopeval:
        #Generate and evaluate lamba offspring
        itera=1
        arfitness=[]
        for l in range(lambd):
            offspring=[]                  #Create a container for the offspring 
            offspring=xmean+np.dot(sigma*B,(D*np.random.standard_normal((N,1)))) #m + sig * Normal(0,C)
            if itera==1:
                arx=offspring
            else:
                arx=np.hstack((arx,offspring))
            arfitness.append(frosenbrock(offspring))          #EVALUATE OBJ FUNCTION
            counteval=counteval+1
            itera=itera+1   
        #Sort by fitness and compute weighted mean into xmean
        ordered=[]
        ordered=sorted(enumerate(arfitness), key=lambda x: x[1])    #minimization, list with (Index,Fitness) elements
        xold=xmean
        best_off_indexes=[]
        for i in range(int(mu)):
            best_off_indexes.append(ordered[i][0]) #List of best indexes of mu offspring
        recomb=np.zeros([N,mu])
        cont=0
        for index in best_off_indexes:
            recomb[:,cont]=arx[:,index]
            cont=cont+1
        xmean=np.dot(recomb,weights_array)

        #Cumulation: update evolution paths
        ps=(1-cs)*ps+sqrt(cs*(2-cs)*mueff)*np.dot(invsqrtC,(xmean-xold))*(1/sigma)
        hsig=np.linalg.norm(ps)/sqrt(1-(1-cs)**(2*counteval/lambd))/chiN < 1.4 + 2/(N+1)
        if hsig==True:
            hsig=1
        else:
            hsig=0
        pc=(1-cc)*pc+hsig*sqrt(cc*(2-cc)*mueff)*(xmean-xold)/sigma

        #Adapt covariance matrix C
        artmp=(1/sigma)*(recomb-np.tile(xold,(1,mu)))
        weights_diag=np.diag(weights_array[:,0])
        C=(1-c1-cmu)*C+c1*(np.dot(pc,pc.transpose())+((1-hsig)*cc*(2-cc)*C))+cmu*np.dot(artmp,np.dot(weights_diag,artmp.transpose()))

        #Adapt step size sigma
        sigma=sigma*exp((cs/damps)*(np.linalg.norm(ps)/chiN - 1))

        #Decomposition of C into B*diag(D.^2)*B' (diagonalization)
        if counteval-eigeneval > lambd/(c1+cmu)/N/10:
            eigeneval=counteval                    #to achieve O(N^2)
            C=np.triu(C)+np.triu(C,1).transpose()  #enforce symmetry
            B=np.linalg.eig(C)[1]                  #eigen decomposition, B==normalized eigenvectors
            diag_D=np.linalg.eig(C)[0]
            diag_D=diag_D.reshape([N,1])
            D=diag_D**0.5                       #D is a vector of standard deviations now
            D_inv=D**(-1)
            D_inv=np.diag(D_inv[:,0])
            invsqrtC=np.dot(B,np.dot(D_inv,B.transpose()))
            print ordered[0][1];               # Uncomment to see convergence in the console
        #Break, if fitness is good enough or condition exceeds 1e14, better termination methods are advisable           
        del arfitness
        if ordered[0][1]<=stopfitness or max(D)>1e7*min(D):
            break
        else:
            del arx
            del ordered
        #Return best point at last iteration
    xmin=arx[:,best_off_indexes[0]]
    return xmin         
share|improve this question

Your Answer

 
discard

By posting your answer, you agree to the privacy policy and terms of service.

Browse other questions tagged or ask your own question.