MyNLP.cpp 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. // Copyright (C) 2004, 2006 International Business Machines and others.
  2. // All Rights Reserved.
  3. // This code is published under the Eclipse Public License.
  4. //
  5. // $Id: MyNLP.cpp 2005 2011-06-06 12:55:16Z stefan $
  6. //
  7. // Authors: Carl Laird, Andreas Waechter IBM 2004-11-05
  8. #include "MyNLP.hpp"
  9. #include <cassert>
  10. using namespace Ipopt;
  11. /* Constructor. */
  12. MyNLP::MyNLP()
  13. {}
  14. MyNLP::~MyNLP()
  15. {}
  16. bool MyNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
  17. Index& nnz_h_lag, IndexStyleEnum& index_style)
  18. {
  19. // The problem described in MyNLP.hpp has 2 variables, x1, & x2,
  20. n = 2;
  21. // one equality constraint,
  22. m = 1;
  23. // 2 nonzeros in the jacobian (one for x1, and one for x2),
  24. nnz_jac_g = 2;
  25. // and 2 nonzeros in the hessian of the lagrangian
  26. // (one in the hessian of the objective for x2,
  27. // and one in the hessian of the constraints for x1)
  28. nnz_h_lag = 2;
  29. // We use the standard fortran index style for row/col entries
  30. index_style = FORTRAN_STYLE;
  31. return true;
  32. }
  33. bool MyNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
  34. Index m, Number* g_l, Number* g_u)
  35. {
  36. // here, the n and m we gave IPOPT in get_nlp_info are passed back to us.
  37. // If desired, we could assert to make sure they are what we think they are.
  38. assert(n == 2);
  39. assert(m == 1);
  40. // x1 has a lower bound of -1 and an upper bound of 1
  41. x_l[0] = -1.0;
  42. x_u[0] = 1.0;
  43. // x2 has no upper or lower bound, so we set them to
  44. // a large negative and a large positive number.
  45. // The value that is interpretted as -/+infinity can be
  46. // set in the options, but it defaults to -/+1e19
  47. x_l[1] = -1.0e19;
  48. x_u[1] = +1.0e19;
  49. // we have one equality constraint, so we set the bounds on this constraint
  50. // to be equal (and zero).
  51. g_l[0] = g_u[0] = 0.0;
  52. return true;
  53. }
  54. bool MyNLP::get_starting_point(Index n, bool init_x, Number* x,
  55. bool init_z, Number* z_L, Number* z_U,
  56. Index m, bool init_lambda,
  57. Number* lambda)
  58. {
  59. // Here, we assume we only have starting values for x, if you code
  60. // your own NLP, you can provide starting values for the others if
  61. // you wish.
  62. assert(init_x == true);
  63. assert(init_z == false);
  64. assert(init_lambda == false);
  65. // we initialize x in bounds, in the upper right quadrant
  66. x[0] = 0.5;
  67. x[1] = 1.5;
  68. return true;
  69. }
  70. bool MyNLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value)
  71. {
  72. // return the value of the objective function
  73. Number x2 = x[1];
  74. obj_value = -(x2 - 2.0) * (x2 - 2.0);
  75. return true;
  76. }
  77. bool MyNLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f)
  78. {
  79. // return the gradient of the objective function grad_{x} f(x)
  80. // grad_{x1} f(x): x1 is not in the objective
  81. grad_f[0] = 0.0;
  82. // grad_{x2} f(x):
  83. Number x2 = x[1];
  84. grad_f[1] = -2.0*(x2 - 2.0);
  85. return true;
  86. }
  87. bool MyNLP::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g)
  88. {
  89. // return the value of the constraints: g(x)
  90. Number x1 = x[0];
  91. Number x2 = x[1];
  92. g[0] = -(x1*x1 + x2 - 1.0);
  93. return true;
  94. }
  95. bool MyNLP::eval_jac_g(Index n, const Number* x, bool new_x,
  96. Index m, Index nele_jac, Index* iRow, Index *jCol,
  97. Number* values)
  98. {
  99. if (values == NULL) {
  100. // return the structure of the jacobian of the constraints
  101. // element at 1,1: grad_{x1} g_{1}(x)
  102. iRow[0] = 1;
  103. jCol[0] = 1;
  104. // element at 1,2: grad_{x2} g_{1}(x)
  105. iRow[1] = 1;
  106. jCol[1] = 2;
  107. }
  108. else {
  109. // return the values of the jacobian of the constraints
  110. Number x1 = x[0];
  111. // element at 1,1: grad_{x1} g_{1}(x)
  112. values[0] = -2.0 * x1;
  113. // element at 1,2: grad_{x1} g_{1}(x)
  114. values[1] = -1.0;
  115. }
  116. return true;
  117. }
  118. bool MyNLP::eval_h(Index n, const Number* x, bool new_x,
  119. Number obj_factor, Index m, const Number* lambda,
  120. bool new_lambda, Index nele_hess, Index* iRow,
  121. Index* jCol, Number* values)
  122. {
  123. if (values == NULL) {
  124. // return the structure. This is a symmetric matrix, fill the lower left
  125. // triangle only.
  126. // element at 1,1: grad^2_{x1,x1} L(x,lambda)
  127. iRow[0] = 1;
  128. jCol[0] = 1;
  129. // element at 2,2: grad^2_{x2,x2} L(x,lambda)
  130. iRow[1] = 2;
  131. jCol[1] = 2;
  132. // Note: off-diagonal elements are zero for this problem
  133. }
  134. else {
  135. // return the values
  136. // element at 1,1: grad^2_{x1,x1} L(x,lambda)
  137. values[0] = -2.0 * lambda[0];
  138. // element at 2,2: grad^2_{x2,x2} L(x,lambda)
  139. values[1] = -2.0 * obj_factor;
  140. // Note: off-diagonal elements are zero for this problem
  141. }
  142. return true;
  143. }
  144. void MyNLP::finalize_solution(SolverReturn status,
  145. Index n, const Number* x, const Number* z_L, const Number* z_U,
  146. Index m, const Number* g, const Number* lambda,
  147. Number obj_value,
  148. const IpoptData* ip_data,
  149. IpoptCalculatedQuantities* ip_cq)
  150. {
  151. // here is where we would store the solution to variables, or write to a file, etc
  152. // so we could use the solution. Since the solution is displayed to the console,
  153. // we currently do nothing here.
  154. }