perf_convolution1d.cpp 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html.
  4. #include "perf_precomp.hpp"
  5. #include <opencv2/dnn/shape_utils.hpp>
  6. namespace opencv_test {
  7. struct Conv1DParam_t {
  8. int kernel;
  9. struct BlobShape { int dims[3]; } shapeIn;
  10. int outCN;
  11. int groups;
  12. int stride;
  13. int dilation;
  14. int pad[2];
  15. const char* padMode;
  16. bool hasBias;
  17. double declared_flops;
  18. };
  19. // Details: #12142
  20. static const Conv1DParam_t testConvolution1DConfigs[] = {
  21. {3, {{1, 6, 10}}, 6, 1, 1, 1, {0, 0}, "VALID", true, 1776.},
  22. {3, {{1, 2, 19}}, 2, 2, 2, 1, {1, 1}, "", true, 260.},
  23. {3, {{1, 2, 25}}, 2, 2, 1, 1, {2, 2}, "SAME", false, 650.},
  24. };
  25. struct Conv1DParamID
  26. {
  27. enum {
  28. CONV_0 = 0,
  29. CONV_LAST = sizeof(testConvolution1DConfigs) / sizeof(testConvolution1DConfigs[0])
  30. };
  31. int val_;
  32. Conv1DParamID(int val = 0) : val_(val) {}
  33. operator int() const { return val_; }
  34. static ::testing::internal::ParamGenerator<Conv1DParamID> all()
  35. {
  36. enum { NUM = (int)CONV_LAST };
  37. Conv1DParamID v_[NUM]; for (int i = 0; i < NUM; ++i) { v_[i] = Conv1DParamID(i); } // reduce generated code size
  38. return ::testing::ValuesIn(v_, v_ + NUM);
  39. }
  40. };
  41. static inline void PrintTo(const Conv1DParamID& v, std::ostream* os)
  42. {
  43. CV_Assert((int)v >= 0); CV_Assert((int)v < Conv1DParamID::CONV_LAST);
  44. const Conv1DParam_t& p = testConvolution1DConfigs[(int)v];
  45. *os << "GFLOPS=" << cv::format("%.3f", p.declared_flops * 1e-9)
  46. << ", K=[" << p.kernel << "]"
  47. << ", IN={" << p.shapeIn.dims[0] << ", " << p.shapeIn.dims[1] << ", " << p.shapeIn.dims[2] << "}"
  48. << ", OCN=" << p.outCN;
  49. if (p.groups > 1)
  50. *os << ", G=" << p.groups;
  51. if (p.stride != 1)
  52. *os << ", S=" << p.stride;
  53. if (p.dilation != 1)
  54. *os << ", D=" << p.dilation;
  55. if (p.pad[0] != 0 && p.pad[1] != 0 )
  56. *os << ", P=(" << p.pad[0] << ", " << p.pad[1] << ")";
  57. if (!((std::string)p.padMode).empty())
  58. *os << ", PM=" << ((std::string)p.padMode);
  59. if (p.hasBias)
  60. *os << ", BIAS";
  61. }
  62. typedef tuple<Conv1DParamID, tuple<Backend, Target> > Conv1DTestParam_t;
  63. typedef TestBaseWithParam<Conv1DTestParam_t> Conv1D;
  64. PERF_TEST_P_(Conv1D, conv1d)
  65. {
  66. int test_id = (int)get<0>(GetParam());
  67. ASSERT_GE(test_id, 0); ASSERT_LT(test_id, Conv1DParamID::CONV_LAST);
  68. const Conv1DParam_t& params = testConvolution1DConfigs[test_id];
  69. double declared_flops = params.declared_flops;
  70. DictValue kernel = DictValue::arrayInt(&params.kernel, 1);
  71. DictValue stride = DictValue::arrayInt(&params.stride, 1);
  72. DictValue pad = DictValue::arrayInt(&params.pad[0], 2);
  73. DictValue dilation = DictValue::arrayInt(&params.dilation, 1);
  74. MatShape inputShape = MatShape(params.shapeIn.dims, params.shapeIn.dims + 3);
  75. int outChannels = params.outCN;
  76. int groups = params.groups;
  77. std::string padMode(params.padMode);
  78. bool hasBias = params.hasBias;
  79. Backend backendId = get<0>(get<1>(GetParam()));
  80. Target targetId = get<1>(get<1>(GetParam()));
  81. if (targetId != DNN_TARGET_CPU)
  82. throw SkipTestException("Only CPU is supported");
  83. int inChannels = inputShape[1];
  84. int sz[] = {outChannels, inChannels / groups, params.kernel};
  85. Mat weights(3, &sz[0], CV_32F);
  86. randu(weights, -1.0f, 1.0f);
  87. LayerParams lp;
  88. lp.set("kernel_size", kernel);
  89. lp.set("pad", pad);
  90. if (!padMode.empty())
  91. lp.set("pad_mode", padMode);
  92. lp.set("stride", stride);
  93. lp.set("dilation", dilation);
  94. lp.set("num_output", outChannels);
  95. lp.set("group", groups);
  96. lp.set("bias_term", hasBias);
  97. lp.type = "Convolution";
  98. lp.name = "testLayer";
  99. lp.blobs.push_back(weights);
  100. if (hasBias)
  101. {
  102. Mat bias(1, outChannels, CV_32F);
  103. randu(bias, -1.0f, 1.0f);
  104. lp.blobs.push_back(bias);
  105. }
  106. int inpSz[] = {1, inChannels, inputShape[2]};
  107. Mat input(3, &inpSz[0], CV_32F);
  108. randu(input, -1.0f, 1.0f);
  109. Net net;
  110. net.addLayerToPrev(lp.name, lp.type, lp);
  111. net.setInput(input);
  112. net.setPreferableBackend(backendId);
  113. net.setPreferableTarget(targetId);
  114. // warmup
  115. Mat output = net.forward();
  116. MatShape netInputShape = shape(input);
  117. size_t weightsMemory = 0, blobsMemory = 0;
  118. net.getMemoryConsumption(netInputShape, weightsMemory, blobsMemory);
  119. int64 flops = net.getFLOPS(netInputShape);
  120. CV_Assert(flops > 0);
  121. std::cout
  122. << "IN=" << divUp(input.total() * input.elemSize(), 1u<<10) << " Kb " << netInputShape
  123. << " OUT=" << divUp(output.total() * output.elemSize(), 1u<<10) << " Kb " << shape(output)
  124. << " Weights(parameters): " << divUp(weightsMemory, 1u<<10) << " Kb"
  125. << " MFLOPS=" << flops * 1e-6 << std::endl;
  126. TEST_CYCLE()
  127. {
  128. Mat res = net.forward();
  129. }
  130. EXPECT_NEAR(flops, declared_flops, declared_flops * 1e-6);
  131. SANITY_CHECK_NOTHING();
  132. }
  133. INSTANTIATE_TEST_CASE_P(/**/, Conv1D, Combine(
  134. Conv1DParamID::all(),
  135. dnnBackendsAndTargets(false, false) // defined in ../test/test_common.hpp
  136. ));
  137. } // namespace