hs071_nlp.cpp 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. // Copyright (C) 2005, 2006 International Business Machines and others.
  2. // All Rights Reserved.
  3. // This code is published under the Eclipse Public License.
  4. //
  5. // $Id: hs071_nlp.cpp 2005 2011-06-06 12:55:16Z stefan $
  6. //
  7. // Authors: Carl Laird, Andreas Waechter IBM 2005-08-16
  8. #include "hs071_nlp.hpp"
  9. #include <cassert>
  10. #include <iostream>
  11. using namespace Ipopt;
  12. // constructor
  13. HS071_NLP::HS071_NLP()
  14. {}
  15. //destructor
  16. HS071_NLP::~HS071_NLP()
  17. {}
  18. // returns the size of the problem
  19. bool HS071_NLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
  20. Index& nnz_h_lag, IndexStyleEnum& index_style)
  21. {
  22. // The problem described in HS071_NLP.hpp has 4 variables, x[0] through x[3]
  23. n = 4;
  24. // one equality constraint and one inequality constraint
  25. m = 2;
  26. // in this example the jacobian is dense and contains 8 nonzeros
  27. nnz_jac_g = 8;
  28. // the hessian is also dense and has 16 total nonzeros, but we
  29. // only need the lower left corner (since it is symmetric)
  30. nnz_h_lag = 10;
  31. // use the C style indexing (0-based)
  32. index_style = TNLP::C_STYLE;
  33. return true;
  34. }
  35. // returns the variable bounds
  36. bool HS071_NLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
  37. Index m, Number* g_l, Number* g_u)
  38. {
  39. // here, the n and m we gave IPOPT in get_nlp_info are passed back to us.
  40. // If desired, we could assert to make sure they are what we think they are.
  41. assert(n == 4);
  42. assert(m == 2);
  43. // the variables have lower bounds of 1
  44. for (Index i=0; i<4; i++) {
  45. x_l[i] = 1.0;
  46. }
  47. // the variables have upper bounds of 5
  48. for (Index i=0; i<4; i++) {
  49. x_u[i] = 5.0;
  50. }
  51. // the first constraint g1 has a lower bound of 25
  52. g_l[0] = 25;
  53. // the first constraint g1 has NO upper bound, here we set it to 2e19.
  54. // Ipopt interprets any number greater than nlp_upper_bound_inf as
  55. // infinity. The default value of nlp_upper_bound_inf and nlp_lower_bound_inf
  56. // is 1e19 and can be changed through ipopt options.
  57. g_u[0] = 2e19;
  58. // the second constraint g2 is an equality constraint, so we set the
  59. // upper and lower bound to the same value
  60. g_l[1] = g_u[1] = 40.0;
  61. return true;
  62. }
  63. // returns the initial point for the problem
  64. bool HS071_NLP::get_starting_point(Index n, bool init_x, Number* x,
  65. bool init_z, Number* z_L, Number* z_U,
  66. Index m, bool init_lambda,
  67. Number* lambda)
  68. {
  69. // Here, we assume we only have starting values for x, if you code
  70. // your own NLP, you can provide starting values for the dual variables
  71. // if you wish
  72. assert(init_x == true);
  73. assert(init_z == false);
  74. assert(init_lambda == false);
  75. // initialize to the given starting point
  76. x[0] = 1.0;
  77. x[1] = 5.0;
  78. x[2] = 5.0;
  79. x[3] = 1.0;
  80. return true;
  81. }
  82. // returns the value of the objective function
  83. bool HS071_NLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value)
  84. {
  85. assert(n == 4);
  86. obj_value = x[0] * x[3] * (x[0] + x[1] + x[2]) + x[2];
  87. return true;
  88. }
  89. // return the gradient of the objective function grad_{x} f(x)
  90. bool HS071_NLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f)
  91. {
  92. assert(n == 4);
  93. grad_f[0] = x[0] * x[3] + x[3] * (x[0] + x[1] + x[2]);
  94. grad_f[1] = x[0] * x[3];
  95. grad_f[2] = x[0] * x[3] + 1;
  96. grad_f[3] = x[0] * (x[0] + x[1] + x[2]);
  97. return true;
  98. }
  99. // return the value of the constraints: g(x)
  100. bool HS071_NLP::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g)
  101. {
  102. assert(n == 4);
  103. assert(m == 2);
  104. g[0] = x[0] * x[1] * x[2] * x[3];
  105. g[1] = x[0]*x[0] + x[1]*x[1] + x[2]*x[2] + x[3]*x[3];
  106. return true;
  107. }
  108. // return the structure or values of the jacobian
  109. bool HS071_NLP::eval_jac_g(Index n, const Number* x, bool new_x,
  110. Index m, Index nele_jac, Index* iRow, Index *jCol,
  111. Number* values)
  112. {
  113. if (values == NULL) {
  114. // return the structure of the jacobian
  115. // this particular jacobian is dense
  116. iRow[0] = 0;
  117. jCol[0] = 0;
  118. iRow[1] = 0;
  119. jCol[1] = 1;
  120. iRow[2] = 0;
  121. jCol[2] = 2;
  122. iRow[3] = 0;
  123. jCol[3] = 3;
  124. iRow[4] = 1;
  125. jCol[4] = 0;
  126. iRow[5] = 1;
  127. jCol[5] = 1;
  128. iRow[6] = 1;
  129. jCol[6] = 2;
  130. iRow[7] = 1;
  131. jCol[7] = 3;
  132. }
  133. else {
  134. // return the values of the jacobian of the constraints
  135. values[0] = x[1]*x[2]*x[3]; // 0,0
  136. values[1] = x[0]*x[2]*x[3]; // 0,1
  137. values[2] = x[0]*x[1]*x[3]; // 0,2
  138. values[3] = x[0]*x[1]*x[2]; // 0,3
  139. values[4] = 2*x[0]; // 1,0
  140. values[5] = 2*x[1]; // 1,1
  141. values[6] = 2*x[2]; // 1,2
  142. values[7] = 2*x[3]; // 1,3
  143. }
  144. return true;
  145. }
  146. //return the structure or values of the hessian
  147. bool HS071_NLP::eval_h(Index n, const Number* x, bool new_x,
  148. Number obj_factor, Index m, const Number* lambda,
  149. bool new_lambda, Index nele_hess, Index* iRow,
  150. Index* jCol, Number* values)
  151. {
  152. if (values == NULL) {
  153. // return the structure. This is a symmetric matrix, fill the lower left
  154. // triangle only.
  155. // the hessian for this problem is actually dense
  156. Index idx=0;
  157. for (Index row = 0; row < 4; row++) {
  158. for (Index col = 0; col <= row; col++) {
  159. iRow[idx] = row;
  160. jCol[idx] = col;
  161. idx++;
  162. }
  163. }
  164. assert(idx == nele_hess);
  165. }
  166. else {
  167. // return the values. This is a symmetric matrix, fill the lower left
  168. // triangle only
  169. // fill the objective portion
  170. values[0] = obj_factor * (2*x[3]); // 0,0
  171. values[1] = obj_factor * (x[3]); // 1,0
  172. values[2] = 0.; // 1,1
  173. values[3] = obj_factor * (x[3]); // 2,0
  174. values[4] = 0.; // 2,1
  175. values[5] = 0.; // 2,2
  176. values[6] = obj_factor * (2*x[0] + x[1] + x[2]); // 3,0
  177. values[7] = obj_factor * (x[0]); // 3,1
  178. values[8] = obj_factor * (x[0]); // 3,2
  179. values[9] = 0.; // 3,3
  180. // add the portion for the first constraint
  181. values[1] += lambda[0] * (x[2] * x[3]); // 1,0
  182. values[3] += lambda[0] * (x[1] * x[3]); // 2,0
  183. values[4] += lambda[0] * (x[0] * x[3]); // 2,1
  184. values[6] += lambda[0] * (x[1] * x[2]); // 3,0
  185. values[7] += lambda[0] * (x[0] * x[2]); // 3,1
  186. values[8] += lambda[0] * (x[0] * x[1]); // 3,2
  187. // add the portion for the second constraint
  188. values[0] += lambda[1] * 2; // 0,0
  189. values[2] += lambda[1] * 2; // 1,1
  190. values[5] += lambda[1] * 2; // 2,2
  191. values[9] += lambda[1] * 2; // 3,3
  192. }
  193. return true;
  194. }
  195. void HS071_NLP::finalize_solution(SolverReturn status,
  196. Index n, const Number* x, const Number* z_L, const Number* z_U,
  197. Index m, const Number* g, const Number* lambda,
  198. Number obj_value,
  199. const IpoptData* ip_data,
  200. IpoptCalculatedQuantities* ip_cq)
  201. {
  202. // here is where we would store the solution to variables, or write to a file, etc
  203. // so we could use the solution.
  204. // For this example, we write the solution to the console
  205. std::cout << std::endl << std::endl << "Solution of the primal variables, x" << std::endl;
  206. for (Index i=0; i<n; i++) {
  207. std::cout << "x[" << i << "] = " << x[i] << std::endl;
  208. }
  209. std::cout << std::endl << std::endl << "Solution of the bound multipliers, z_L and z_U" << std::endl;
  210. for (Index i=0; i<n; i++) {
  211. std::cout << "z_L[" << i << "] = " << z_L[i] << std::endl;
  212. }
  213. for (Index i=0; i<n; i++) {
  214. std::cout << "z_U[" << i << "] = " << z_U[i] << std::endl;
  215. }
  216. std::cout << std::endl << std::endl << "Objective value" << std::endl;
  217. std::cout << "f(x*) = " << obj_value << std::endl;
  218. std::cout << std::endl << "Final value of the constraints:" << std::endl;
  219. for (Index i=0; i<m ;i++) {
  220. std::cout << "g(" << i << ") = " << g[i] << std::endl;
  221. }
  222. }