functions.hpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. //
  2. // Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
  3. //
  4. // Distributed under the Boost Software License, Version 1.0. (See
  5. // accompanying file LICENSE_1_0.txt or copy at
  6. // http://www.boost.org/LICENSE_1_0.txt)
  7. //
  8. // The authors gratefully acknowledge the support of
  9. // Fraunhofer IOSB, Ettlingen, Germany
  10. //
  11. #ifndef BOOST_UBLAS_TENSOR_FUNCTIONS_HPP
  12. #define BOOST_UBLAS_TENSOR_FUNCTIONS_HPP
  13. #include <stdexcept>
  14. #include <vector>
  15. #include <algorithm>
  16. #include <numeric>
  17. #include "multiplication.hpp"
  18. #include "algorithms.hpp"
  19. #include "expression.hpp"
  20. #include "expression_evaluation.hpp"
  21. #include "storage_traits.hpp"
  22. namespace boost {
  23. namespace numeric {
  24. namespace ublas {
  25. template<class Value, class Format, class Allocator>
  26. class tensor;
  27. template<class Value, class Format, class Allocator>
  28. class matrix;
  29. template<class Value, class Allocator>
  30. class vector;
  31. /** @brief Computes the m-mode tensor-times-vector product
  32. *
  33. * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im]
  34. *
  35. * @note calls ublas::ttv
  36. *
  37. * @param[in] m contraction dimension with 1 <= m <= p
  38. * @param[in] a tensor object A with order p
  39. * @param[in] b vector object B
  40. *
  41. * @returns tensor object C with order p-1, the same storage format and allocator type as A
  42. */
  43. template<class V, class F, class A1, class A2>
  44. auto prod(tensor<V,F,A1> const& a, vector<V,A2> const& b, const std::size_t m)
  45. {
  46. using tensor_type = tensor<V,F,A1>;
  47. using extents_type = typename tensor_type::extents_type;
  48. using ebase_type = typename extents_type::base_type;
  49. using value_type = typename tensor_type::value_type;
  50. using size_type = typename extents_type::value_type;
  51. auto const p = std::size_t(a.rank());
  52. if( m == 0)
  53. throw std::length_error("error in boost::numeric::ublas::prod(ttv): contraction mode must be greater than zero.");
  54. if( p < m )
  55. throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the modus.");
  56. if( p == 0)
  57. throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than zero.");
  58. if( a.empty() )
  59. throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty.");
  60. if( b.size() == 0)
  61. throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty.");
  62. auto nc = ebase_type(std::max(p-1, size_type(2)) , size_type(1));
  63. auto nb = ebase_type{b.size(),1};
  64. for(auto i = 0u, j = 0u; i < p; ++i)
  65. if(i != m-1)
  66. nc[j++] = a.extents().at(i);
  67. auto c = tensor_type(extents_type(nc),value_type{});
  68. auto bb = &(b(0));
  69. ttv(m, p,
  70. c.data(), c.extents().data(), c.strides().data(),
  71. a.data(), a.extents().data(), a.strides().data(),
  72. bb, nb.data(), nb.data());
  73. return c;
  74. }
  75. /** @brief Computes the m-mode tensor-times-matrix product
  76. *
  77. * Implements C[i1,...,im-1,j,im+1,...,ip] = A[i1,i2,...,ip] * B[j,im]
  78. *
  79. * @note calls ublas::ttm
  80. *
  81. * @param[in] a tensor object A with order p
  82. * @param[in] b vector object B
  83. * @param[in] m contraction dimension with 1 <= m <= p
  84. *
  85. * @returns tensor object C with order p, the same storage format and allocator type as A
  86. */
  87. template<class V, class F, class A1, class A2>
  88. auto prod(tensor<V,F,A1> const& a, matrix<V,F,A2> const& b, const std::size_t m)
  89. {
  90. using tensor_type = tensor<V,F,A1>;
  91. using extents_type = typename tensor_type::extents_type;
  92. using strides_type = typename tensor_type::strides_type;
  93. using value_type = typename tensor_type::value_type;
  94. auto const p = a.rank();
  95. if( m == 0)
  96. throw std::length_error("error in boost::numeric::ublas::prod(ttm): contraction mode must be greater than zero.");
  97. if( p < m || m > a.extents().size())
  98. throw std::length_error("error in boost::numeric::ublas::prod(ttm): rank of the tensor must be greater equal the modus.");
  99. if( p == 0)
  100. throw std::length_error("error in boost::numeric::ublas::prod(ttm): rank of the tensor must be greater than zero.");
  101. if( a.empty() )
  102. throw std::length_error("error in boost::numeric::ublas::prod(ttm): first argument tensor should not be empty.");
  103. if( b.size1()*b.size2() == 0)
  104. throw std::length_error("error in boost::numeric::ublas::prod(ttm): second argument matrix should not be empty.");
  105. auto nc = a.extents().base();
  106. auto nb = extents_type {b.size1(),b.size2()};
  107. auto wb = strides_type (nb);
  108. nc[m-1] = nb[0];
  109. auto c = tensor_type(extents_type(nc),value_type{});
  110. auto bb = &(b(0,0));
  111. ttm(m, p,
  112. c.data(), c.extents().data(), c.strides().data(),
  113. a.data(), a.extents().data(), a.strides().data(),
  114. bb, nb.data(), wb.data());
  115. return c;
  116. }
  117. /** @brief Computes the q-mode tensor-times-tensor product
  118. *
  119. * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] )
  120. *
  121. * @note calls ublas::ttt
  122. *
  123. * na[phia[x]] = nb[phib[x]] for 1 <= x <= q
  124. *
  125. * @param[in] phia one-based permutation tuple of length q for the first input tensor a
  126. * @param[in] phib one-based permutation tuple of length q for the second input tensor b
  127. * @param[in] a left-hand side tensor with order r+q
  128. * @param[in] b right-hand side tensor with order s+q
  129. * @result tensor with order r+s
  130. */
  131. template<class V, class F, class A1, class A2>
  132. auto prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b,
  133. std::vector<std::size_t> const& phia, std::vector<std::size_t> const& phib)
  134. {
  135. using tensor_type = tensor<V,F,A1>;
  136. using extents_type = typename tensor_type::extents_type;
  137. using value_type = typename tensor_type::value_type;
  138. using size_type = typename extents_type::value_type;
  139. auto const pa = a.rank();
  140. auto const pb = b.rank();
  141. auto const q = size_type(phia.size());
  142. if(pa == 0ul)
  143. throw std::runtime_error("error in ublas::prod: order of left-hand side tensor must be greater than 0.");
  144. if(pb == 0ul)
  145. throw std::runtime_error("error in ublas::prod: order of right-hand side tensor must be greater than 0.");
  146. if(pa < q)
  147. throw std::runtime_error("error in ublas::prod: number of contraction dimensions cannot be greater than the order of the left-hand side tensor.");
  148. if(pb < q)
  149. throw std::runtime_error("error in ublas::prod: number of contraction dimensions cannot be greater than the order of the right-hand side tensor.");
  150. if(q != phib.size())
  151. throw std::runtime_error("error in ublas::prod: permutation tuples must have the same length.");
  152. if(pa < phia.size())
  153. throw std::runtime_error("error in ublas::prod: permutation tuple for the left-hand side tensor cannot be greater than the corresponding order.");
  154. if(pb < phib.size())
  155. throw std::runtime_error("error in ublas::prod: permutation tuple for the right-hand side tensor cannot be greater than the corresponding order.");
  156. auto const& na = a.extents();
  157. auto const& nb = b.extents();
  158. for(auto i = 0ul; i < q; ++i)
  159. if( na.at(phia.at(i)-1) != nb.at(phib.at(i)-1))
  160. throw std::runtime_error("error in ublas::prod: permutations of the extents are not correct.");
  161. auto const r = pa - q;
  162. auto const s = pb - q;
  163. std::vector<std::size_t> phia1(pa), phib1(pb);
  164. std::iota(phia1.begin(), phia1.end(), 1ul);
  165. std::iota(phib1.begin(), phib1.end(), 1ul);
  166. std::vector<std::size_t> nc( std::max ( r+s , size_type(2) ), size_type(1) );
  167. for(auto i = 0ul; i < phia.size(); ++i)
  168. * std::remove(phia1.begin(), phia1.end(), phia.at(i)) = phia.at(i);
  169. //phia1.erase( std::remove(phia1.begin(), phia1.end(), phia.at(i)), phia1.end() ) ;
  170. assert(phia1.size() == pa);
  171. for(auto i = 0ul; i < r; ++i)
  172. nc[ i ] = na[ phia1[ i ] - 1 ];
  173. for(auto i = 0ul; i < phib.size(); ++i)
  174. * std::remove(phib1.begin(), phib1.end(), phib.at(i)) = phib.at(i) ;
  175. //phib1.erase( std::remove(phib1.begin(), phib1.end(), phia.at(i)), phib1.end() ) ;
  176. assert(phib1.size() == pb);
  177. for(auto i = 0ul; i < s; ++i)
  178. nc[ r + i ] = nb[ phib1[ i ] - 1 ];
  179. // std::copy( phib.begin(), phib.end(), phib1.end() );
  180. assert( phia1.size() == pa );
  181. assert( phib1.size() == pb );
  182. auto c = tensor_type(extents_type(nc), value_type{});
  183. ttt(pa, pb, q,
  184. phia1.data(), phib1.data(),
  185. c.data(), c.extents().data(), c.strides().data(),
  186. a.data(), a.extents().data(), a.strides().data(),
  187. b.data(), b.extents().data(), b.strides().data());
  188. return c;
  189. }
  190. //template<class V, class F, class A1, class A2, std::size_t N, std::size_t M>
  191. //auto operator*( tensor_index<V,F,A1,N> const& lhs, tensor_index<V,F,A2,M> const& rhs)
  192. /** @brief Computes the q-mode tensor-times-tensor product
  193. *
  194. * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] )
  195. *
  196. * @note calls ublas::ttt
  197. *
  198. * na[phi[x]] = nb[phi[x]] for 1 <= x <= q
  199. *
  200. * @param[in] phi one-based permutation tuple of length q for bot input tensors
  201. * @param[in] a left-hand side tensor with order r+q
  202. * @param[in] b right-hand side tensor with order s+q
  203. * @result tensor with order r+s
  204. */
  205. template<class V, class F, class A1, class A2>
  206. auto prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b,
  207. std::vector<std::size_t> const& phi)
  208. {
  209. return prod(a, b, phi, phi);
  210. }
  211. /** @brief Computes the inner product of two tensors
  212. *
  213. * Implements c = sum(A[i1,i2,...,ip] * B[i1,i2,...,jp])
  214. *
  215. * @note calls inner function
  216. *
  217. * @param[in] a tensor object A
  218. * @param[in] b tensor object B
  219. *
  220. * @returns a value type.
  221. */
  222. template<class V, class F, class A1, class A2>
  223. auto inner_prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b)
  224. {
  225. using value_type = typename tensor<V,F,A1>::value_type;
  226. if( a.rank() != b.rank() )
  227. throw std::length_error("error in boost::numeric::ublas::inner_prod: Rank of both tensors must be the same.");
  228. if( a.empty() || b.empty())
  229. throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensors should not be empty.");
  230. if( a.extents() != b.extents())
  231. throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensor extents should be the same.");
  232. return inner(a.rank(), a.extents().data(),
  233. a.data(), a.strides().data(),
  234. b.data(), b.strides().data(), value_type{0});
  235. }
  236. /** @brief Computes the outer product of two tensors
  237. *
  238. * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq]
  239. *
  240. * @note calls outer function
  241. *
  242. * @param[in] a tensor object A
  243. * @param[in] b tensor object B
  244. *
  245. * @returns tensor object C with the same storage format F and allocator type A1
  246. */
  247. template<class V, class F, class A1, class A2>
  248. auto outer_prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b)
  249. {
  250. using tensor_type = tensor<V,F,A1>;
  251. using extents_type = typename tensor_type::extents_type;
  252. if( a.empty() || b.empty() )
  253. throw std::runtime_error("error in boost::numeric::ublas::outer_prod: tensors should not be empty.");
  254. auto nc = typename extents_type::base_type(a.rank() + b.rank());
  255. for(auto i = 0u; i < a.rank(); ++i)
  256. nc.at(i) = a.extents().at(i);
  257. for(auto i = 0u; i < b.rank(); ++i)
  258. nc.at(a.rank()+i) = b.extents().at(i);
  259. auto c = tensor_type(extents_type(nc));
  260. outer(c.data(), c.rank(), c.extents().data(), c.strides().data(),
  261. a.data(), a.rank(), a.extents().data(), a.strides().data(),
  262. b.data(), b.rank(), b.extents().data(), b.strides().data());
  263. return c;
  264. }
  265. /** @brief Transposes a tensor according to a permutation tuple
  266. *
  267. * Implements C[tau[i1],tau[i2]...,tau[ip]] = A[i1,i2,...,ip]
  268. *
  269. * @note calls trans function
  270. *
  271. * @param[in] a tensor object of rank p
  272. * @param[in] tau one-based permutation tuple of length p
  273. * @returns a transposed tensor object with the same storage format F and allocator type A
  274. */
  275. template<class V, class F, class A>
  276. auto trans(tensor<V,F,A> const& a, std::vector<std::size_t> const& tau)
  277. {
  278. using tensor_type = tensor<V,F,A>;
  279. using extents_type = typename tensor_type::extents_type;
  280. // using strides_type = typename tensor_type::strides_type;
  281. if( a.empty() )
  282. return tensor<V,F,A>{};
  283. auto const p = a.rank();
  284. auto const& na = a.extents();
  285. auto nc = typename extents_type::base_type (p);
  286. for(auto i = 0u; i < p; ++i)
  287. nc.at(tau.at(i)-1) = na.at(i);
  288. // auto wc = strides_type(extents_type(nc));
  289. auto c = tensor_type(extents_type(nc));
  290. trans( a.rank(), a.extents().data(), tau.data(),
  291. c.data(), c.strides().data(),
  292. a.data(), a.strides().data());
  293. // auto wc_pi = typename strides_type::base_type (p);
  294. // for(auto i = 0u; i < p; ++i)
  295. // wc_pi.at(tau.at(i)-1) = c.strides().at(i);
  296. //copy(a.rank(),
  297. // a.extents().data(),
  298. // c.data(), wc_pi.data(),
  299. // a.data(), a.strides().data() );
  300. return c;
  301. }
  302. /** @brief Computes the frobenius norm of a tensor expression
  303. *
  304. * @note evaluates the tensor expression and calls the accumulate function
  305. *
  306. *
  307. * Implements the two-norm with
  308. * k = sqrt( sum_(i1,...,ip) A(i1,...,ip)^2 )
  309. *
  310. * @param[in] a tensor object of rank p
  311. * @returns the frobenius norm of the tensor
  312. */
  313. //template<class V, class F, class A>
  314. //auto norm(tensor<V,F,A> const& a)
  315. template<class T, class D>
  316. auto norm(detail::tensor_expression<T,D> const& expr)
  317. {
  318. using tensor_type = typename detail::tensor_expression<T,D>::tensor_type;
  319. using value_type = typename tensor_type::value_type;
  320. auto a = tensor_type( expr );
  321. if( a.empty() )
  322. throw std::runtime_error("error in boost::numeric::ublas::norm: tensors should not be empty.");
  323. return std::sqrt( accumulate( a.order(), a.extents().data(), a.data(), a.strides().data(), value_type{},
  324. [](auto const& l, auto const& r){ return l + r*r; } ) ) ;
  325. }
  326. /** @brief Extract the real component of tensor elements within a tensor expression
  327. *
  328. * @param[in] lhs tensor expression
  329. * @returns unary tensor expression
  330. */
  331. template<class T, class D>
  332. auto real(detail::tensor_expression<T,D> const& expr) {
  333. return detail::make_unary_tensor_expression<T> (expr(), [] (auto const& l) { return std::real( l ); } );
  334. }
  335. /** @brief Extract the real component of tensor elements within a tensor expression
  336. *
  337. * @param[in] lhs tensor expression
  338. * @returns unary tensor expression
  339. */
  340. template<class V, class F, class A, class D>
  341. auto real(detail::tensor_expression<tensor<std::complex<V>,F,A>,D> const& expr)
  342. {
  343. using tensor_complex_type = tensor<std::complex<V>,F,A>;
  344. using tensor_type = tensor<V,F,typename storage_traits<A>::template rebind<V>>;
  345. if( detail::retrieve_extents( expr ).empty() )
  346. throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty.");
  347. auto a = tensor_complex_type( expr );
  348. auto c = tensor_type( a.extents() );
  349. std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::real(l) ; } );
  350. return c;
  351. }
  352. /** @brief Extract the imaginary component of tensor elements within a tensor expression
  353. *
  354. * @param[in] lhs tensor expression
  355. * @returns unary tensor expression
  356. */
  357. template<class T, class D>
  358. auto imag(detail::tensor_expression<T,D> const& lhs) {
  359. return detail::make_unary_tensor_expression<T> (lhs(), [] (auto const& l) { return std::imag( l ); } );
  360. }
  361. /** @brief Extract the imag component of tensor elements within a tensor expression
  362. *
  363. * @param[in] lhs tensor expression
  364. * @returns unary tensor expression
  365. */
  366. template<class V, class A, class F, class D>
  367. auto imag(detail::tensor_expression<tensor<std::complex<V>,F,A>,D> const& expr)
  368. {
  369. using tensor_complex_type = tensor<std::complex<V>,F,A>;
  370. using tensor_type = tensor<V,F,typename storage_traits<A>::template rebind<V>>;
  371. if( detail::retrieve_extents( expr ).empty() )
  372. throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty.");
  373. auto a = tensor_complex_type( expr );
  374. auto c = tensor_type( a.extents() );
  375. std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::imag(l) ; } );
  376. return c;
  377. }
  378. /** @brief Computes the complex conjugate component of tensor elements within a tensor expression
  379. *
  380. * @param[in] expr tensor expression
  381. * @returns complex tensor
  382. */
  383. template<class T, class D>
  384. auto conj(detail::tensor_expression<T,D> const& expr)
  385. {
  386. using tensor_type = T;
  387. using value_type = typename tensor_type::value_type;
  388. using layout_type = typename tensor_type::layout_type;
  389. using array_type = typename tensor_type::array_type;
  390. using new_value_type = std::complex<value_type>;
  391. using new_array_type = typename storage_traits<array_type>::template rebind<new_value_type>;
  392. using tensor_complex_type = tensor<new_value_type,layout_type, new_array_type>;
  393. if( detail::retrieve_extents( expr ).empty() )
  394. throw std::runtime_error("error in boost::numeric::ublas::conj: tensors should not be empty.");
  395. auto a = tensor_type( expr );
  396. auto c = tensor_complex_type( a.extents() );
  397. std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::conj(l) ; } );
  398. return c;
  399. }
  400. /** @brief Computes the complex conjugate component of tensor elements within a tensor expression
  401. *
  402. * @param[in] lhs tensor expression
  403. * @returns unary tensor expression
  404. */
  405. template<class V, class A, class F, class D>
  406. auto conj(detail::tensor_expression<tensor<std::complex<V>,F,A>,D> const& expr)
  407. {
  408. return detail::make_unary_tensor_expression<tensor<std::complex<V>,F,A>> (expr(), [] (auto const& l) { return std::conj( l ); } );
  409. }
  410. }
  411. }
  412. }
  413. #endif