2011年6月8日星期三

A Lagrangean Decomposition Approach for Solving MINLPs





http://cepac.cheme.cmu.edu/pasi2008/slides/flores/library/supplemental/Examples.zip

RMINLP (initial) UPPER BOUND
LAGRANGEAN DECOMPOSITION (possibly tighter) UPPER BOUND
Y.L=YFIXED LOWER BOUND

$title Simple MINLP Problem (Problem No.1 from Marco Duran PhD Thesis)
* -------------------------------------------------------------------
* A Lagrangean Decomposition Approach for Solving MINLPs
*
* Written by Antonio Flores T.
* 9 March, 2006
* -------------------------------------------------------------------
*
Variables         profit,x1,x2,x6,z1,z2,z6,lambda1_dummy,lambda2_dummy,lambda6_dummy ;
Variables         profit_z1, profit_z2,zlow;

Binary variables  y1,y2,y3 ;

Equations         obj,r1,r2,r3,r4,r5,r6 ;
Equations         objz1,objz2;
Equations         objlower,r7,r8,r9,r10,r11,r12;

Scalar            alpha /1/;

parameter         zupper,zlower,niters,maxniters;
parameters        lambda1,lambda2,lambda6;
parameters        diff1,diff2,diff6,errnorm;
parameters        y1fixed, y2fixed, y3fixed;
parameters        tk,lambda1_old,lambda2_old,lambda6_old;

zupper            =
inf;
zlower            = -
inf;
niters            = 0;
maxniters         = 5;

* ----------------------------------------------------------------------------------
* Form the Reformulated Problem (RP) from which a relaxed MINLP solution is computed
* ----------------------------------------------------------------------------------

obj .. profit =e= -(5*y1+6*y2+8*y3+10*x1-7*x6-18*log(1+x2)-19.2*log(1+x1-x2)+10
                    +lambda1_dummy*(z1-x1)+lambda2_dummy*(z2-x2)+lambda6_dummy*(z6-x6)) ;

r1.. 0.8*log(1+x2)+0.96*log(1+x1-x2)-0.8*x6 =g= 0 ;

r2.. z2-z1 =l= 0 ;

r3.. x2-2*y1 =l= 0 ;

r4.. x1-x2-2*y2 =l= 0 ;

r5.. log(1+z2)+1.2*log(1+z1-z2)-z6-2*y3 =g= -2 ;

r6.. y1+y2 =l= 1;

x1.lo = 0 ; x2.lo = 0 ; x6.lo = 0 ;
z1.lo = 0 ; z2.lo = 0 ; z6.lo = 0 ;
lambda1_dummy.lo = 0;  lambda1_dummy.up = 5;
lambda2_dummy.lo = 0;  lambda2_dummy.up = 5;
lambda6_dummy.lo = 0;  lambda6_dummy.up = 5;

model RP /obj,r1,r2,r3,r4,r5,r6 / ;

* ---------------------------------------------------------------------
* Form the two indepedent MINLPs into which the RP has been decomposed
* ---------------------------------------------------------------------

objz1.. profit_z1 =e= -(5*y1+6*y2+10*x1-7*x6-18*log(1+x2)-19.2*log(1+x1-x2)+10
                    -lambda1*x1-lambda2*x2-lambda6*x6) ;

model LRP_1 /objz1,r1,r3,r4,r6/ ;

objz2.. profit_z2 =e= -(8*y3+lambda1*z1+lambda2*z2+lambda6*z6);

model LRP_2 /objz2,r2,r5/ ;

* --------------------------------------------------------------------------
* By fixing the binary variables into the RP problem, compute a lower bound
* on the optimal value of the original objective function solving a NLP
* --------------------------------------------------------------------------

objlower .. zlow   =e= -(5*y1fixed+6*y2fixed+8*y3fixed+10*x1-7*x6-18*log(1+x2)
                       -19.2*log(1+x1-x2)+10);

r7.. x2 - 2*y1fixed =l= 0 ;

r8.. x1-x2-2*y2fixed =l= 0 ;

r9.. log(1+z2)+1.2*log(1+z1-z2)-z6-2*y3fixed =g= -2 ;

r10.. x1-z1 =e= 0 ;

r11.. x2-z2 =e= 0 ;

r12.. x6-z6 =e= 0 ;

model LRP_LB /objlower,r1,r2,r7,r8,r9,r10,r11,r12/ ;

* ---------------------------------------------------------------
* Beginning of the iterative Lagrangean Decomposition procedure
* ---------------------------------------------------------------

solve RP maximizing profit using rminlp ;
lambda1 = lambda1_dummy.L ;
lambda2 = lambda2_dummy.L ;
lambda6 = lambda6_dummy.L ;

while ( (zlower lt zupper),
      niters = niters+1;
*
* Compute an optimal value upper bound
*
     
solve LRP_1 maximizing profit_z1 using minlp ;
     
solve LRP_2 maximizing profit_z2 using minlp ;
      errnorm   = sqr(z1.L-x1.L)+sqr(z2.L-x2.L)+sqr(z6.L-x6.L) ;
      diff1     = z1.L-x1.L ;
      diff2     = z2.L-x2.L ;
      diff6     = z6.L-x6.L ;
*
* Fixing binary variable to get an optimal value lower bound
*
      y1fixed = y1.L ;
      y2fixed = y2.L ;
      y3fixed = y3.L ;
     
solve LRP_LB maximizing zlow using nlp ;
* --------------------------------------------------------
* Update Lagrange multipliers by a simple rule
* --------------------------------------------------------
      lambda1_old = lambda1;
      lambda2_old = lambda2;
      lambda6_old = lambda6;
      zupper      = profit_z1.L+profit_z2.L ;
      zlower      = zlow.L ;
      tk          = alpha*(zupper-zlower)/errnorm ;
      lambda1     = lambda1_old+tk*diff1;
      lambda2     = lambda2_old+tk*diff2;
      lambda6     = lambda6_old+tk*diff6;

     
display lambda1,lambda2,lambda6,tk,zlower,zupper;
);


*-- End of the ejemplo-1-relaxation.gms file --

没有评论:

发表评论