diff --git a/dynare++/tl/cc/gs_tensor.cc b/dynare++/tl/cc/gs_tensor.cc index a69e736799bcf57c03516b488f192f5c6c4b2f7a..851f7cf801507f0845e3ceb4e8d5153ee3ab9127 100644 --- a/dynare++/tl/cc/gs_tensor.cc +++ b/dynare++/tl/cc/gs_tensor.cc @@ -161,7 +161,7 @@ FGSTensor::FGSTensor(const UGSTensor &ut) } } -// FGSTensor slicing from FSSparseTensor +// FGSTensor slicing constructor from FSSparseTensor /* Here is the code of slicing constructor from the sparse tensor. We first calculate coordinates of first and last index of the slice within the sparse tensor (these are ‘lb’ and ‘ub’), and then we diff --git a/dynare++/tl/cc/rfs_tensor.cc b/dynare++/tl/cc/rfs_tensor.cc index 576e55198688371920fa4784a0d6eb9dd2582281..a79057b03621b13bd6b54c24a557d926a4303647 100644 --- a/dynare++/tl/cc/rfs_tensor.cc +++ b/dynare++/tl/cc/rfs_tensor.cc @@ -4,11 +4,10 @@ #include "kron_prod.hh" #include "tl_exception.hh" -// |FRTensor| conversion from unfolded -/* The conversion from unfolded to folded sums up all data from - unfolded corresponding to one folded index. So we go through all the - rows in the unfolded tensor |ut|, make an index of the folded tensor - by sorting the coordinates, and add the row. */ +/* The conversion from unfolded to folded sums up all data from unfolded + corresponding to one folded index. So we go through all the rows in the + unfolded tensor ‘ut’, make an index of the folded tensor by sorting the + coordinates, and add the row. */ FRTensor::FRTensor(const URTensor &ut) : FTensor(indor::along_row, IntSequence(ut.dimen(), ut.nvar()), FFSTensor::calcMaxOffset(ut.nvar(), ut.dimen()), ut.ncols(), @@ -25,15 +24,13 @@ FRTensor::FRTensor(const URTensor &ut) } } -/* Here just make a new instance and return the reference. */ - std::unique_ptr<UTensor> FRTensor::unfold() const { return std::make_unique<URTensor>(*this); } -/* Incrementing is easy. The same as for |FFSTensor|. */ +/* Incrementing is easy. The same as for FFSTensor. */ void FRTensor::increment(IntSequence &v) const @@ -45,8 +42,6 @@ FRTensor::increment(IntSequence &v) const v.monotone(); } -/* Decrement calls static |FTensor::decrement|. */ - void FRTensor::decrement(IntSequence &v) const { @@ -56,7 +51,6 @@ FRTensor::decrement(IntSequence &v) const FTensor::decrement(v, nv); } -// |URTensor| conversion from folded /* Here we convert folded full symmetry tensor to unfolded. We copy all columns of folded tensor to unfolded and leave other columns (duplicates) zero. In this way, if the unfolded tensor is folded back, @@ -75,16 +69,12 @@ URTensor::URTensor(const FRTensor &ft) } } -/* Here we just return a reference to new instance of folded tensor. */ - std::unique_ptr<FTensor> URTensor::fold() const { return std::make_unique<FRTensor>(*this); } -/* Here we just call |UTensor| respective static methods. */ - void URTensor::increment(IntSequence &v) const { @@ -112,8 +102,8 @@ URTensor::getOffset(const IntSequence &v) const return UTensor::getOffset(v, nv); } -/* Here we construct $v_1\otimes v_2\otimes\ldots\otimes v_n$, where - $v_1,v_2,\ldots,v_n$ are stored in |vector<ConstVector>|. */ +/* Here we construct v₁⊗v₂⊗…⊗vₙ, where v₁,v₂,…,vₙ are stored in a + std::vector<ConstVector>. */ URSingleTensor::URSingleTensor(const std::vector<ConstVector> &cols) : URTensor(1, cols[0].length(), cols.size()) @@ -134,8 +124,7 @@ URSingleTensor::URSingleTensor(const std::vector<ConstVector> &cols) KronProd::kronMult(cols[0], ConstVector(*last), getData()); } -/* Here we construct $v\otimes\ldots\otimes v$, where the number of $v$ - copies is |d|. */ +/* Here we construct v⊗…⊗v, where ‘d’ gives the number of copies of v. */ URSingleTensor::URSingleTensor(const ConstVector &v, int d) : URTensor(1, v.length(), d) @@ -156,19 +145,15 @@ URSingleTensor::URSingleTensor(const ConstVector &v, int d) KronProd::kronMult(v, ConstVector(*last), getData()); } -/* Here we construct |FRSingleTensor| from |URSingleTensor| and return - its reference. */ - std::unique_ptr<FTensor> URSingleTensor::fold() const { return std::make_unique<FRSingleTensor>(*this); } -// |FRSingleTensor| conversion from unfolded -/* The conversion from unfolded |URSingleTensor| to folded - |FRSingleTensor| is completely the same as conversion from |URTensor| - to |FRTensor|, only we do not copy rows but elements. */ +/* The conversion from unfolded URSingleTensor to folded FRSingleTensor is + exactly the same as the conversion from URTensor to FRTensor, except that we + do not copy rows but elements. */ FRSingleTensor::FRSingleTensor(const URSingleTensor &ut) : FRTensor(1, ut.nvar(), ut.dimen()) { diff --git a/dynare++/tl/cc/rfs_tensor.hh b/dynare++/tl/cc/rfs_tensor.hh index 7334565571aa5b5d5b6c00ed50756a1dcf865360..6e97127a02ace38edd649b5d7979e93c15a95734 100644 --- a/dynare++/tl/cc/rfs_tensor.hh +++ b/dynare++/tl/cc/rfs_tensor.hh @@ -2,36 +2,37 @@ // Row-wise full symmetry tensor. -/* Here we define classes for full symmetry tensors with the - multidimensional index identified with rows. The primary usage is for - storage of data coming from (or from a sum of) - $$\prod_{m=1}^l\left[g_{s^{\vert c_m\vert}}\right]^{\gamma_m}_{c_m(\alpha)}$$ - where $\alpha$ coming from a multidimensional index go through some - set $S$ and $c$ is some equivalence. So we model a tensor of the form: - $$\left[\prod_{m=1}^l - \left[g_{s^{\vert c_m\vert}}\right]^{\gamma_m}_{c_m(\alpha)} - \right]_S^{\gamma_1\ldots\gamma_l}$$ - Since all $\gamma_1,\ldots,\gamma_l$ correspond to the same variable, - the tensor is fully symmetric. The set of indices $S$ cannot be very - large and sometimes it is only one element. This case is handled in a - special subclass. - - We provide both folded and unfolded versions. Their logic is perfectly - the same as in |UFSTensor| and |FFSTensor| with two exceptions. One - has been already mentioned, the multidimensional index is along the - rows. The second are conversions between the two types. Since this - kind of tensor is used to multiply (from the right) a tensor whose - multidimensional index is identified with columns, we will need a - different way of a conversion. If the multiplication of two folded - tensors is to be equivalent with multiplication of two unfolded, the - folding of the right tensor must sum all equivalent elements since - they are multiplied with the same number from the folded - tensor. (Equivalent here means all elements of unfolded tensor +/* Here we define classes for full symmetry tensors with the multidimensional + index identified with rows. The primary usage is for storage of data coming + from (or from a sum of) + ₗ + ∏ [g_(s^|cₘ|)]_cₘ(α)^γₘ + ᵐ⁼¹ + where α comes from a multidimensional index that goes through some set S, + and cₘ is some equivalence class. So we model a tensor of the form: + + ⎡ ₗ ⎤ + ⎢ ∏ [g_(s^|cₘ|)]_cₘ(α)^γₘ⎥ + ⎣ᵐ⁼¹ ⎦S^γ₁…γₗ + + Since all γ₁…γₗ correspond to the same variable, the tensor is fully + symmetric. The set of indices S cannot be very large and sometimes it is + only one element. This case is handled in a special subclass. + + We provide both folded and unfolded versions. Their logic is perfectly the + same as in UFSTensor and FFSTensor with two exceptions. One has been already + mentioned, the multidimensional index is along the rows. The second are + conversions between the two types. Since this kind of tensor is used to + multiply (from the right) a tensor whose multidimensional index is + identified with columns, we will need a different way of a conversion. If + the multiplication of two folded tensors is to be equivalent with + multiplication of two unfolded, the folding of the right tensor must sum all + equivalent elements since they are multiplied with the same number from the + folded tensor. (Equivalent here means all elements of unfolded tensor corresponding to one element in folded tensor.) For this reason, it is necessary to calculate a column number from the given sequence, so we - implement |getOffset|. Process of unfolding is not used, so we - implemented it so that unfolding and then folding a tensor would yield - the same data. */ + implement getOffset(). Process of unfolding is not used, so we implemented + it so that unfolding and then folding a tensor would yield the same data. */ #ifndef RFS_TENSOR_H #define RFS_TENSOR_H @@ -40,7 +41,7 @@ #include "fs_tensor.hh" #include "symmetry.hh" -/* This is straightforward and very similar to |UFSTensor|. */ +/* This is straightforward and very similar to UFSTensor. */ class FRTensor; class URTensor : public UTensor @@ -75,7 +76,7 @@ public: } }; -/* This is straightforward and very similar to |FFSTensor|. */ +/* This is straightforward and very similar to FFSTensor. */ class FRTensor : public FTensor { @@ -113,12 +114,11 @@ public: } }; -/* The following class represents specialization of |URTensor| coming - from Kronecker multiplication of a few vectors. So the resulting - row-oriented tensor has one column. We provide two constructors, - one constructs the tensor from a few vectors stored as - |vector<ConstVector>|. The second makes the Kronecker power of one - given vector. */ +/* The following class represents specialization of URTensor coming from + Kronecker multiplication of a few vectors. So the resulting row-oriented + tensor has one column. We provide two constructors, one constructs the + tensor from a few vectors stored as std::vector<ConstVector>. The second + makes the Kronecker power of one given vector. */ class URSingleTensor : public URTensor { @@ -135,11 +135,10 @@ public: std::unique_ptr<FTensor> fold() const override; }; -/* This class represents one column row-oriented tensor. The only way - how to construct it is from the |URSingleTensor| or from the - scratch. The folding algorithm is the same as folding of general - |URTensor|. Only its implementation is different, since we do not copy - rows, but only elements. */ +/* This class represents one column row-oriented tensor. The only way to + construct it is from URSingleTensor or from scratch. The folding algorithm + is the same as folding of general URTensor. Only its implementation is + different, since we do not copy rows, but only elements. */ class FRSingleTensor : public FRTensor { diff --git a/dynare++/tl/cc/sparse_tensor.cc b/dynare++/tl/cc/sparse_tensor.cc index 6594b790f46032fb95d66137572edbf0aa091fe9..a2669dfcc3480d8d2bceabbfccd78dd55e2757f5 100644 --- a/dynare++/tl/cc/sparse_tensor.cc +++ b/dynare++/tl/cc/sparse_tensor.cc @@ -9,7 +9,7 @@ #include <cmath> /* This is straightforward. Before we insert anything, we do a few - checks. Then we reset |first_nz_row| and |last_nz_row| if necessary. */ + checks. Then we reset ‘first_nz_row’ and ‘last_nz_row’ if necessary. */ void SparseTensor::insert(IntSequence key, int r, double c) @@ -23,7 +23,7 @@ SparseTensor::insert(IntSequence key, int r, double c) auto first_pos = m.lower_bound(key); - // check that pair |key| and |r| is unique + // check that pair ‘key’ and ‘r’ is unique auto last_pos = m.upper_bound(key); for (auto it = first_pos; it != last_pos; ++it) TL_RAISE_IF(it->second.first == r, "Duplicate <key, r> insertion in SparseTensor::insert"); @@ -35,7 +35,7 @@ SparseTensor::insert(IntSequence key, int r, double c) last_nz_row = r; } -/* This returns true if all items are finite (not Nan nor Inf). */ +/* This returns true if all items are finite (not NaN nor ∞). */ bool SparseTensor::isFinite() const @@ -130,17 +130,17 @@ FSSparseTensor::insert(IntSequence key, int r, double c) SparseTensor::insert(std::move(key), r, c); } -/* We go through the tensor |t| which is supposed to have single - column. If the item of |t| is nonzero, we make a key by sorting the +/* We go through the tensor ‘t’ which is supposed to have single + column. If the item of ‘t’ is nonzero, we make a key by sorting the index, and then we go through all items having the same key (it is its column), obtain the row number and the element, and do the multiplication. - The test for non-zero is |a != 0.0|, since there will be items which + The test for non-zero is ‘a != 0.0’, since there will be items which are exact zeros. I have also tried to make the loop through the sparse tensor outer, and - find index of tensor |t| within the loop. Surprisingly, it is little + find index of tensor ‘t’ within the loop. Surprisingly, it is little slower (for monomial tests with probability of zeros equal 0.3). But everything depends how filled is the sparse tensor. */ @@ -164,7 +164,7 @@ FSSparseTensor::multColumnAndAdd(const Tensor &t, Vector &v) const IntSequence key(it.getCoor()); key.sort(); - // check that |key| is within the range + // check that ‘key’ is within the range TL_RAISE_IF(key[0] < 0 || key[key.size()-1] >= nv, "Wrong coordinates of index in FSSparseTensor::multColumnAndAdd"); @@ -187,16 +187,16 @@ FSSparseTensor::print() const SparseTensor::print(); } -// |GSSparseTensor| slicing constructor -/* This is the same as |@<|FGSTensor| slicing from |FSSparseTensor|@>|. */ +// GSSparseTensor slicing constructor +/* This is the same as FGSTensor slicing constructor from FSSparseTensor. */ GSSparseTensor::GSSparseTensor(const FSSparseTensor &t, const IntSequence &ss, const IntSequence &coor, TensorDimens td) : SparseTensor(td.dimen(), t.nrows(), td.calcFoldMaxOffset()), tdims(std::move(td)) { - // set |lb| and |ub| to lower and upper bounds of slice indices - /* This is the same as |@<set |lb| and |ub| to lower and upper bounds - of indices@>| in {\tt gs\_tensor.cpp}, see that file for details. */ + // set ‘lb’ and ‘ub’ to lower and upper bounds of slice indices + /* The same code is present in FGSTensor slicing constructor, see it for + details. */ IntSequence s_offsets(ss.size(), 0); for (int i = 1; i < ss.size(); i++) s_offsets[i] = s_offsets[i-1] + ss[i-1]; diff --git a/dynare++/tl/cc/sparse_tensor.hh b/dynare++/tl/cc/sparse_tensor.hh index 6281eb17450e5dbececa20dca3dfc8443233e4f3..c620f069e0162e622533ab0b4be0307b5aa58add 100644 --- a/dynare++/tl/cc/sparse_tensor.hh +++ b/dynare++/tl/cc/sparse_tensor.hh @@ -3,19 +3,19 @@ // Sparse tensor. /* Here we declare a sparse full and general symmetry tensors with the - multidimensional index along columns. We implement them as a |multimap| - associating to each sequence of coordinates |IntSequence| a set of - pairs (row, number). This is very convenient but not optimal in terms - of memory consumption. So the implementation can be changed. + multidimensional index along columns. We implement them as a std::multimap + associating to each sequence of coordinates IntSequence a set of pairs (row, + number). This is very convenient but not optimal in terms of memory + consumption. So the implementation can be changed. - The current |multimap| implementation allows insertions. Another + The current std::multimap implementation allows insertions. Another advantage of this approach is that we do not need to calculate column - numbers from the |IntSequence|, since the column is accessed directly - via the key which is |IntSequence|. + numbers from the IntSequence, since the column is accessed directly via the + key which is IntSequence. The only operation we need to do with the full symmetry sparse tensor is a left multiplication of a row oriented single column tensor. The - result of such operation is a column of the same size as the sparse + result of such an operation is a column of the same size as the sparse tensor. Other important operations are slicing operations. We need to do sparse and dense slices of full symmetry sparse tensors. In fact, the only constructor of general symmetry sparse tensor is slicing from @@ -31,7 +31,6 @@ #include <map> -// |ltseq| predicate struct ltseq { bool @@ -41,9 +40,9 @@ struct ltseq } }; -/* This is a super class of both full symmetry and general symmetry - sparse tensors. It contains a |multimap| and implements insertions. It - tracks maximum and minimum row, for which there is an item. */ +/* This is a super class of both full symmetry and general symmetry sparse + tensors. It contains a std::multimap and implements insertions. It tracks + maximum and minimum row, for which there is an item. */ class SparseTensor { @@ -109,9 +108,9 @@ public: bool isFinite() const; }; -/* This is a full symmetry sparse tensor. It implements - |multColumnAndAdd| and in addition to |sparseTensor|, it has |nv| - (number of variables), and symmetry (basically it is a dimension). */ +/* This is a full symmetry sparse tensor. It implements multColumnAndAdd() and, + in addition to SparseTensor, it has ‘nv’ (number of variables) and symmetry + (basically it is a dimension). */ class FSSparseTensor : public SparseTensor { @@ -135,10 +134,10 @@ public: void print() const; }; -/* This is a general symmetry sparse tensor. It has |TensorDimens| and - can be constructed as a slice of the full symmetry sparse tensor. The - slicing constructor takes the same form as the slicing |FGSTensor| - constructor from full symmetry sparse tensor. */ +/* This is a general symmetry sparse tensor. It has TensorDimens and can be + constructed as a slice of the full symmetry sparse tensor. The slicing + constructor takes the same form as the slicing FGSTensor constructor from + full symmetry sparse tensor. */ class GSSparseTensor : public SparseTensor {