GetFEM  5.4.3
getfem_assembling_tensors.cc
1 /*===========================================================================
2 
3  Copyright (C) 2003-2020 Julien Pommier
4 
5  This file is a part of GetFEM
6 
7  GetFEM is free software; you can redistribute it and/or modify it
8  under the terms of the GNU Lesser General Public License as published
9  by the Free Software Foundation; either version 3 of the License, or
10  (at your option) any later version along with the GCC Runtime Library
11  Exception either version 3.1 or (at your option) any later version.
12  This program is distributed in the hope that it will be useful, but
13  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14  or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
15  License and GCC Runtime Library Exception for more details.
16  You should have received a copy of the GNU Lesser General Public License
17  along with this program; if not, write to the Free Software Foundation,
18  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 
20 ===========================================================================*/
21 
22 #include "gmm/gmm_blas_interface.h"
24 #include "getfem/getfem_locale.h"
25 #include "getfem/getfem_mat_elem.h"
26 
27 namespace getfem {
28  size_type vdim_specif_list::nb_mf() const {
29  return std::count_if(begin(), end(),
30  std::mem_fn(&vdim_specif::is_mf_ref));
31  }
32  size_type vdim_specif_list::nbelt() const {
33  size_type sz = 1;
34  for (const_iterator it = begin(); it != end(); ++it) sz *= (*it).dim;
35  return sz;
36  }
37  void vdim_specif_list::build_strides_for_cv
38  (size_type cv, tensor_ranges& r, std::vector<tensor_strides >& str) const {
39  stride_type s = 1, cnt = 0;
40  str.resize(size());
41  r.resize(size());
42  for (const_iterator it = begin(); it != end(); ++it, ++cnt) {
43  if ((*it).is_mf_ref()) {
44  r[cnt] = unsigned((*it).pmf->nb_basic_dof_of_element(cv));
45  //mesh_fem::ind_dof_ct::const_iterator ii
46  // = (*it).pmf->ind_basic_dof_of_element(cv).begin();
47  str[cnt].resize(r[cnt]);
48  for (index_type j=0; j < r[cnt]; ++j) {
49  str[cnt][j] = int((*it).pmf->ind_basic_dof_of_element(cv)[j]*s);
50  }
51  } else {
52  r[cnt] = int((*it).dim);
53  str[cnt].resize(r[cnt]);
54  for (index_type j=0; j < (*it).dim; ++j) {
55  str[cnt][j] = j*s;
56  }
57  }
58  s *= stride_type((*it).dim);
59  }
60  }
61 
62  void ATN::update_childs_required_shape() {
63  for (dim_type i=0; i < nchilds(); ++i) {
64  child(i).merge_required_shape(tensor_shape(child(i).ranges()));
65  }
66  }
67  void ATN::set_number(unsigned &gcnt) {
68  if (number_ == unsigned(-1)) {
69  for (unsigned i=0; i < nchilds(); ++i) child(i).set_number(gcnt);
70  number_ = ++gcnt;
71  }
72  }
73 
74  bool ATN::is_zero_size() {
75  return child(0).is_zero_size();
76  }
77 
78  /*
79  general class for tensor who store their data
80  */
81  class ATN_tensor_w_data : public ATN_tensor {
82  TDIter data_base;
83  protected:
84  std::vector<scalar_type> data;
85  void reinit_();
86  void reinit0()
87  { ATN_tensor_w_data::reinit_(); std::fill(data.begin(), data.end(),0); }
88  };
89 
90  /* note that the data is NOT filled with zeros */
91  void ATN_tensor_w_data::reinit_() {
92  tr.assign_shape(req_shape);
93  tr.init_strides();
94  if (tr.card() > 10000000) {
95  cerr << "warning, a tensor of size " << tr.card()
96  << " will be created, it needs "
97  << tr.card()*sizeof(scalar_type) << " bytes of memory\n";
98  }
99  if (tr.card() == 0) {
100  cerr << "WARNING: tensor " << name()
101  << " will be created with a size of "
102  << ranges() << " and 0 non-null elements!" << endl;
103  }
104  data.resize(tr.card());
105  data_base = &data[0];
106  tr.set_base(data_base);
107  }
108 
109 
110  /*
111  general class for the computation of a reduced product of tensors
112  (templated by the number of product tensors)
113 
114  should be very effective.
115  */
116  typedef std::vector<std::pair<ATN_tensor*,std::string> >
117  reduced_tensor_arg_type;
118 
119  class ATN_reduced_tensor : public ATN_tensor_w_data {
120  /* used for specification of tensors and reduction indices , see below */
121  reduced_tensor_arg_type red;
122  bgeot::tensor_reduction tred;
123  public:
124  void check_shape_update(size_type , dim_type) {
125  shape_updated_ = false;
126  for (dim_type i=0; i < nchilds(); ++i) {
127  if (child(i).is_shape_updated()) {
128  shape_updated_ = true;
129  }
130  }
131  if (shape_updated_) {
132  r_.resize(0);
133  for (dim_type i=0; i < nchilds(); ++i) {
134  std::string s = red_n(i);
135  if (s.size() != child(i).ranges().size()) {
136  ASM_THROW_TENSOR_ERROR("wrong number of indexes for the "
137  << int(i+1)
138  << "th argument of the reduction "
139  << name()
140  << " (ranges=" << child(i).ranges() << ")");
141  }
142  for (size_type j=0; j < s.length(); ++j) {
143  if (s[j] == ' ') r_.push_back(child(i).ranges()[j]);
144  }
145  }
146  }
147  }
148  void update_childs_required_shape() {
149  /* pourrait etre mieux, cf les commentaires de la fonction
150  tensor_reduction::required_shape */
151  for (dim_type n=0; n < nchilds(); ++n) {
152  tensor_shape ts(child(n).ranges());
153  tensor_ranges rn(child(n).ranges());
154  const std::string &s = red[n].second;
155  GMM_ASSERT1(rn.size() == s.size(), "Wrong size !");
156  for (unsigned i=0; i < rn.size(); ++i) {
157  if (s[i] != ' ') {
158  size_type p = s.find(s[i]);
159  if (p != size_type(-1) && p < i && rn[p] != rn[i])
160  ASM_THROW_TENSOR_ERROR("can't reduce the diagonal of a tensor "
161  "of size " << rn << " with '"
162  << s << "'");
163  }
164  }
165  bgeot::tensor_reduction::diag_shape(ts, red[n].second);
166  child(n).merge_required_shape(ts);
167  }
168  }
169 
170  /*
171  r is a container of pair<vtensor&,std::string>
172  where the strings specify the reduction indices:
173 
174  if a_{ik}b_{kjl} is reduced against k and l, then the strings are
175  " k" and "k l"
176  */
177  ATN_reduced_tensor(reduced_tensor_arg_type& r) : red(r) {
178  for (size_type i=0; i < r.size(); ++i) add_child(*red[i].first);
179  }
180 
181  std::string red_n(size_type n) {
182  std::string s = red[n].second;
183  if (s.length() == 0)
184  s.append(red[n].first->ranges().size(), ' ');
185  return s;
186  }
187 
188  private:
189  void reinit_() {
190  tred.clear();
191  for (dim_type i=0; i < red.size(); ++i) {
192  // cerr << "ATN_reduced_tensor::reinit : insertion of r(" << red_n(i)
193  // << "), tr[" << red[i].first->ranges() << "\n"
194  // << red[i].first->tensor() << endl;*/
195  // if (red[i].first->ranges().size() != red_n(i).length()) {
196  // ASM_THROW_TENSOR_ERROR("wrong number of indexes for the "
197  // << int(i+1)
198  // << "th argument of the reduction " << name()
199  // << " (ranges=" << red[i].first->ranges()
200  // << ")");
201  // }
202  tred.insert(red[i].first->tensor(), red_n(i));
203  }
204  /* reserve the memory for the output
205  the memory is set to zero since the reduction may only affect a
206  subset of this tensor hence a part of it would not be initialized
207  */
208  ATN_tensor_w_data::reinit0();
209  /* on fournit notre propre tenseur pour stocker les resultats */
210  tred.prepare(&tensor());
211  }
212 
213  void exec_(size_type , dim_type ) {
214  std::fill(data.begin(), data.end(), 0.); /* do_reduction ne peut pas */
215  /* le faire puisque ce n'est pas lui le proprietaire du tenseur de */
216  /* sortie. */
217  tred.do_reduction();
218  }
219  };
220 
221 
222  /* slice tensor:
223  no need of a temporary storage for the slice, since direct access
224  can be provided via strides.
225  */
226  class ATN_sliced_tensor : public ATN_tensor {
227  dim_type slice_dim;
228  size_type slice_idx;
229  public:
230  ATN_sliced_tensor(ATN_tensor& a, dim_type slice_dim_,
231  size_type slice_idx_) :
232  slice_dim(slice_dim_), slice_idx(slice_idx_) { add_child(a); }
233  void check_shape_update(size_type , dim_type) {
234  if ((shape_updated_ = child(0).is_shape_updated())) {
235  if (slice_dim >= child(0).ranges().size() ||
236  slice_idx >= child(0).ranges()[slice_dim]) {
237  ASM_THROW_TENSOR_ERROR("can't slice tensor " << child(0).ranges()
238  << " at index " << int(slice_idx)
239  << " of dimension " << int(slice_dim));
240  }
241  r_ = child(0).ranges(); r_.erase(r_.begin()+slice_dim);
242  }
243  }
244  void update_childs_required_shape() {
245  tensor_shape ts = req_shape;
246  ts.set_ndim_noclean(dim_type(ts.ndim()+1));
247  ts.shift_dim_num_ge(slice_dim,+1);
248  ts.push_mask(tensor_mask(child(0).ranges()[slice_dim],
249  tensor_mask::Slice(slice_dim, index_type(slice_idx))));
250  child(0).merge_required_shape(ts);
251  }
252  private:
253  void reinit_() {
254  tensor() = tensor_ref(child(0).tensor(),
255  tensor_mask::Slice(slice_dim, index_type(slice_idx)));
256  }
257  void exec_(size_type, dim_type) {}
258  };
259 
260  /* tensor with reoderer indices:
261  t{i,j,k} -> t{j,i,k}
262  reorder= 0 1 2 1 0 2
263  */
264  class ATN_permuted_tensor : public ATN_tensor {
265  std::vector<dim_type> reorder;
266  public:
267  /* attention on ne s'assure pas que reorder est une permutation */
268  ATN_permuted_tensor(ATN_tensor& a, const std::vector<dim_type>& reorder_) :
269  reorder(reorder_) { add_child(a); }
270  private:
271  void check_shape_update(size_type , dim_type) {
272  if ((shape_updated_ = child(0).is_shape_updated())) {
273  if (reorder.size() != child(0).ranges().size())
274  ASM_THROW_TENSOR_ERROR("can't reorder tensor '" << name()
275  << "' of dimensions " << child(0).ranges()
276  << " with this permutation: " << vref(reorder));
277  r_.resize(reorder.size());
278  std::fill(r_.begin(), r_.end(), dim_type(-1));
279 
280  /*
281  --- TODO: A VERIFIER !!!!! ---
282  */
283  for (size_type i=0; i < reorder.size(); ++i)
284  r_[i] = child(0).ranges()[reorder[i]];
285  }
286  }
287  void update_childs_required_shape() {
288  tensor_shape ts = req_shape;
289  ts.permute(reorder, true);
290  child(0).merge_required_shape(ts);
291  }
292  void reinit_() {
293  tensor() = child(0).tensor();
294  tensor().permute(reorder);
295  }
296  void exec_(size_type, dim_type) {}
297  };
298 
299  /* diagonal tensor: take the "diagonal" of a tensor
300  (ie diag(t(i,j,k), {i,k}) == t(i,j,i))
301 
302  /!\ the number of dimensions DO NOT change
303  */
304  class ATN_diagonal_tensor : public ATN_tensor {
305  dim_type i1, i2;
306  public:
307  ATN_diagonal_tensor(ATN_tensor& a, dim_type i1_, dim_type i2_) :
308  i1(i1_), i2(i2_) { add_child(a); }
309  private:
310  void check_shape_update(size_type , dim_type) {
311  if ((shape_updated_ = child(0).is_shape_updated())) {
312  if (i1 >= child(0).ranges().size() || i2 >= child(0).ranges().size() ||
313  i1 == i2 || child(0).ranges()[i1] != child(0).ranges()[i2])
314  ASM_THROW_TENSOR_ERROR("can't take the diagonal of a tensor of "
315  "sizes " << child(0).ranges() <<
316  " at indexes " << int(i1) << " and "
317  << int(i2));
318  r_ = child(0).ranges();
319  }
320  }
321  void update_childs_required_shape() {
322  tensor_shape ts = req_shape.diag_shape(tensor_mask::Diagonal(i1,i2));
323  child(0).merge_required_shape(ts);
324  }
325  void reinit_() {
326  tensor() = tensor_ref(child(0).tensor(), tensor_mask::Diagonal(i1,i2));
327  }
328  void exec_(size_type, dim_type) {}
329  };
330 
331  /* called (if possible, i.e. if not an exact integration) for each
332  integration point during mat_elem->compute() */
333  struct computed_tensor_integration_callback
334  : public mat_elem_integration_callback {
335  bgeot::tensor_reduction red;
336  bool was_called;
337  std::vector<TDIter> tensor_bases; /* each tref of 'red' has a */
338  /* reference into this vector */
339  virtual void exec(bgeot::base_tensor &t, bool first, scalar_type c) {
340  if (first) {
341  resize_t(t);
342  std::fill(t.begin(), t.end(), 0.);
343  was_called = true;
344  }
345  assert(t.size());
346  for (unsigned k=0; k!=eltm.size(); ++k) { /* put in the 'if (first)' ? */
347  tensor_bases[k] = const_cast<TDIter>(&(*eltm[k]->begin()));
348  }
349  red.do_reduction();
350 #if defined(GMM_USES_BLAS)
351  BLAS_INT one = BLAS_INT(1), n = BLAS_INT(red.out_data.size());
352  assert(n);
353  gmm::daxpy_(&n, &c, const_cast<double*>(&(red.out_data[0])),
354  &one, (double*)&(t[0]), &one);
355 #else
356  size_type n = red.out_data.size();
357  assert(n);
358  for (size_type k=0; k < n; ++k)
359  t[k] += c * red.out_data[k];
360  // gmm::add(gmm::scaled(red.out_data, c), t.as_vector())
361 #endif
362  }
363  void resize_t(bgeot::base_tensor &t) {
364  bgeot::multi_index r;
365  if (red.reduced_range.size())
366  r.assign(red.reduced_range.begin(), red.reduced_range.end());
367  else { r.resize(1); r[0]=1; }
368  t.adjust_sizes(r);
369  }
370  };
371 
372  /*
373  ATN_computed_tensor , the largest of all
374 
375  This object has become quite complex. It is the glue with the
376  mat_elem_*. It is able to perform an inline reduction (i.e. a
377  reduction applied during the mat_elem->compute()) when it is
378  allowed (i.e. no exact integration), or do the same reduction
379  after the mat_elem->compute().
380  The reduction may also involve other ATN_tensors.
381  */
382 
383  struct mf_comp_vect;
384 
385  struct mf_comp {
386  pnonlinear_elem_term nlt;
387  const mesh_fem* pmf; /* always defined except when op_type == NORMAL */
388  mf_comp_vect *owner;
389 
390  ATN_tensor *data;
391  std::vector<const mesh_fem*> auxmf; /* used only by nonlinear terms */
392  typedef enum { BASE=1, GRAD=2, HESS=3, NORMAL=4, GRADGT=5, GRADGTINV=6,
393  NONLIN=7, DATA=8 } op_type;
394  typedef enum { PLAIN_SHAPE = 0, VECTORIZED_SHAPE = 1,
395  MATRIXIZED_SHAPE = 2 } field_shape_type;
396  op_type op; /* the numerical values indicates the number
397  of dimensions in the tensor */
398  field_shape_type vshape; /* VECTORIZED_SHAPE if vectorization was required
399  (adds an addiational dimension to the tensor
400  which represents the component number.
401  MATRIXIZED_SHAPE for "matricization" of the
402  field.
403  */
404  std::string reduction;
405  /*
406  vectorization of non-vector FEM:
407 
408  phi1 0 0
409  0 phi1 0
410  0 0 phi1
411  phi2 0 0
412  0 phi2 0
413  0 0 phi2
414  ...
415  */
416  mf_comp(mf_comp_vect *ow, const mesh_fem* pmf_, op_type op_,
417  field_shape_type fst) :
418  nlt(0), pmf(pmf_), owner(ow), data(0), op(op_), vshape(fst) { }
419  mf_comp(mf_comp_vect *ow, const std::vector<const mesh_fem*> vmf,
420  pnonlinear_elem_term nlt_) :
421  nlt(nlt_), pmf(vmf[0]), owner(ow), data(0),
422  auxmf(vmf.begin()+1, vmf.end()), op(NONLIN),
423  vshape(PLAIN_SHAPE) { }
424  mf_comp(mf_comp_vect *ow, ATN_tensor *t) :
425  nlt(0), pmf(0), owner(ow), data(t), op(DATA), vshape(PLAIN_SHAPE) {}
426  void push_back_dimensions(size_type cv, tensor_ranges &rng,
427  bool only_reduced=false) const;
428  bool reduced(unsigned i) const {
429  if (i >= reduction.size()) return false;
430  else return reduction[i] != ' ';
431  }
432  };
433 
434  struct mf_comp_vect : public std::vector<mf_comp> {
435  const mesh_im *main_im;
436  public:
437  mf_comp_vect() : std::vector<mf_comp>(), main_im(0) { }
438  mf_comp_vect(const mf_comp_vect &other)
439  : std::vector<mf_comp>(other), main_im(other.main_im) {
440  for (size_type i=0; i < size(); ++i) (*this)[i].owner = this;
441  }
442  void set_im(const mesh_im &mim) { main_im = &mim; }
443  const mesh_im& get_im() const { return *main_im; }
444  private:
445  mf_comp_vect& operator=(const mf_comp_vect &other);
446  };
447 
448  void mf_comp::push_back_dimensions(size_type cv, tensor_ranges &rng,
449  bool only_reduced) const {
450  switch (op) {
451  case NONLIN:
452  {
453  const bgeot::multi_index &sizes = nlt->sizes(cv);
454  for (unsigned j=0; j < sizes.size(); ++j)
455  if (!only_reduced || !reduced(j))
456  rng.push_back(short_type(sizes[j]));
457  }
458  break;
459  case DATA:
460  for (unsigned i=0; i < data->ranges().size(); ++i)
461  if (!only_reduced || !reduced(i))
462  rng.push_back(data->ranges()[i]);
463  break;
464  case NORMAL:
465  assert(pmf==0);
466  assert(&owner->get_im());
467  assert(owner->get_im().linked_mesh().dim() != dim_type(-1));
468  rng.push_back(owner->get_im().linked_mesh().dim());
469  break;
470  case GRADGT:
471  case GRADGTINV:
472  assert(pmf==0);
473  assert(&owner->get_im());
474  rng.push_back(owner->get_im().linked_mesh().dim()); // N
475  rng.push_back(owner->get_im().linked_mesh().structure_of_convex(cv)->dim()); // P
476  break;
477  default:
478  unsigned d = 0;
479  if (!only_reduced || !reduced(d))
480  rng.push_back(unsigned(pmf->nb_basic_dof_of_element(cv)));
481  ++d;
482  if (vshape == mf_comp::VECTORIZED_SHAPE) {
483  if (!only_reduced || !reduced(d)) rng.push_back(pmf->get_qdim());
484  ++d;
485  }
486  if (vshape == mf_comp::MATRIXIZED_SHAPE) {
487  if (!only_reduced || !reduced(d)) {
488  GMM_ASSERT1(pmf->get_qdims().size() == 2, "Non matrix field");
489  rng.push_back(dim_type(pmf->get_qdims()[0]));
490  }
491  ++d;
492  if (!only_reduced || !reduced(d)) rng.push_back(dim_type(pmf->get_qdims()[1]));
493  ++d;
494  }
495 
496  if (op == GRAD || op == HESS) {
497  if (!only_reduced || !reduced(d))
498  rng.push_back(pmf->linked_mesh().dim());
499  ++d;
500  }
501  if (op == HESS) {
502  if (!only_reduced || !reduced(d))
503  rng.push_back(pmf->linked_mesh().dim());
504  ++d;
505  }
506  break;
507  }
508  }
509 
510 
511  class ATN_computed_tensor : public ATN_tensor {
512  mf_comp_vect mfcomp;
513  mat_elem_pool mep;
514  pmat_elem_computation pmec;
515  pmat_elem_type pme;
516  pintegration_method pim;
518  base_tensor t;
519  std::vector<scalar_type> data;
520  TDIter data_base;
521  stride_type tsize;
522  dal::bit_vector req_bv; /* bit_vector of values the mat_elem has to compute
523  (useful when only a subset is required from the
524  possibly very large elementary tensor) */
525  bool has_inline_reduction; /* true if used with reductions inside the comp, for example:
526  "comp(Grad(#1)(:,i).Grad(#2)(:,i))" */
527  computed_tensor_integration_callback icb; /* callback for inline reductions */
528 
529  /* if inline reduction are to be done, but were not possible (i.e. if exact
530  integration was used) then a fallback is used: apply the reduction
531  afterward, on the large expanded tensor */
532  bgeot::tensor_reduction fallback_red;
533  bool fallback_red_uptodate;
534  TDIter fallback_base;
535 
536  size_type cv_shape_update;
537  //mat_elem_inline_reduction inline_red;
538  public:
539  ATN_computed_tensor(const mf_comp_vect &mfcomp_) :
540  mfcomp(mfcomp_), pmec(0), pme(0), pim(0), pgt(0), data_base(0) {
541  has_inline_reduction = false;
542  bool in_data = false;
543  for (size_type i=0; i < mfcomp.size(); ++i) {
544  if (mfcomp[i].reduction.size() || mfcomp[i].op == mf_comp::DATA) {
545  has_inline_reduction = true;
546  if (mfcomp[i].op == mf_comp::DATA) { add_child(*mfcomp[i].data); in_data = true; }
547  }
548  if (mfcomp[i].op != mf_comp::DATA && in_data) {
549  /* constraint of fallback 'do_post_reduction' */
550  ASM_THROW_ERROR("data tensors inside comp() cannot be intermixed with Grad() and Base() etc., they must appear LAST");
551  }
552  }
553  }
554 
555  private:
556  /* mostly for non-linear terms, such as a 3x3x3x3 tensor which may have
557  many symmetries or many null elements.. in that case, it is preferable
558  for getfem_mat_elem to handle only a sufficient subset of the tensor,
559  and build back the full tensor via adequate strides and masks */
560 
561  /* append a dimension (full) to tref */
562  stride_type add_dim(const tensor_ranges& rng, dim_type d, stride_type s, tensor_ref &tref) {
563  assert(d < rng.size());
564  tensor_strides v;
565  index_type r = rng[d];
566  tensor_mask m; m.set_full(d, r);
567  v.resize(r);
568  for (index_type i=0; i < r; ++i) v[i] = s*i;
569  assert(tref.masks().size() == tref.strides().size());
570  tref.set_ndim_noclean(dim_type(tref.ndim()+1));
571  tref.push_mask(m);
572  tref.strides().push_back(v);
573  return s*r;
574  }
575 
576  /* append a vectorized dimension to tref -- works also for cases
577  where target_dim > 1
578  */
579  stride_type add_vdim(const tensor_ranges& rng, dim_type d,
580  index_type target_dim, stride_type s,
581  tensor_ref &tref) {
582  assert(d < rng.size()-1);
583  index_type r = rng[d], q=rng[d+1];
584  index_type qmult = q/target_dim;
585  assert(r%qmult == 0); assert(q%qmult==0);
586 
587  tensor_strides v;
588  tensor_ranges trng(2); trng[0] = q; trng[1] = r;
589  index_set ti(2); ti[0] = dim_type(d+1); ti[1] = d;
590  tensor_mask m(trng,ti);
591  v.resize(r*target_dim);
592  tensor_ranges cnt(2);
593  for (index_type i=0; i < r; ++i) {
594  // the value in cnt[1] is not directly used as the loop variable
595  // as this makes the INTEL 2019 compiler wrongly optimize the loop check,
596  // making the outer loop go one more than it needs to;
597  // creating SEH exceptions
598  cnt[1] = i;
599  for (index_type k=0; k < target_dim; ++k) {
600  cnt[0] = k*qmult + (cnt[1]%qmult); //(cnt[1] % qmult)*target_dim + k;
601  m.set_mask_val(m.lpos(cnt), true);
602  v[cnt[1]*target_dim+k] = s*( k * r/qmult + cnt[1]/qmult); //s*((cnt[1]/qmult)*target_dim + k);
603  }
604  }
605  assert(tref.masks().size() == tref.strides().size());
606  tref.set_ndim_noclean(dim_type(tref.ndim()+2));
607  tref.push_mask(m);
608  tref.strides().push_back(v);
609  return s*(r/qmult)*target_dim;
610  }
611 
612  /* append a matrixized dimension to tref -- works also for cases
613  where target_dim > 1 (in that case the rows are "vectorized")
614 
615  for example, the Base(RT0) in 2D (3 dof, target_dim=2) is:
616  [0 1 2;
617  3 4 5]
618 
619 
620  if we set it in a mesh_fem of qdim = 3x2 , we produce the sparse
621  elementary tensor 9x3x2 =
622 
623  x x x y y y
624  0 . . 3 . . <- phi1
625  . 0 . . 3 . <- phi2
626  . . 0 . . 3 <- phi3
627  1 . . 4 . . <- phi4
628  . 1 . . 4 . <- phi5
629  . . 1 . . 4 <- phi6
630  2 . . 5 . . <- phi7
631  . 2 . . 5 . <- phi8
632  . . 2 . . 5 <- phi9
633 
634  */
635  stride_type add_mdim(const tensor_ranges& rng, dim_type d,
636  index_type target_dim, stride_type s, tensor_ref &tref) {
637  assert(d < rng.size()-2);
638 
639  /* r = nb_dof, q = nb_rows, p = nb_cols */
640  index_type r = rng[d], q=rng[d+1], p=rng[d+2];
641  index_type qmult = (q*p)/target_dim;
642 
643  assert(r % q == 0);
644  assert(p % target_dim == 0);
645  assert(r % (p/target_dim) == 0);
646 
647  tensor_strides v;
648  tensor_ranges trng(3); trng[0] = q; trng[1] = p; trng[2] = r;
649  index_set ti(3); ti[0] = dim_type(d+1); ti[1] = dim_type(d+2); ti[2] = d;
650  tensor_mask m(trng,ti);
651  v.resize(r*target_dim);
652  tensor_ranges cnt(3);
653  for (cnt[2]=0; cnt[2] < r; cnt[2]++) { // loop over virtual dof number {
654  for (index_type k=0; k < target_dim; ++k) {
655  unsigned pos = (cnt[2]*target_dim+k) % (q*p);
656  //unsigned ii = (pos%q), jj = (pos/q);
657  unsigned ii = (pos/p), jj = (pos%p);
658  cnt[0] = ii; cnt[1] = jj;
659  //cerr << " set_mask_val(lpos(" << cnt[0] << "," << cnt[1] << "," << cnt[2] << ") = " << m.lpos(cnt) << ")\n";
660  m.set_mask_val(m.lpos(cnt), true);
661  v[cnt[2]*target_dim+k] = s*(k * r/qmult + cnt[2]/qmult); //s*((cnt[2]/qmult)*target_dim + k);
662  }
663  }
664  assert(tref.masks().size() == tref.strides().size());
665  tref.set_ndim_noclean(dim_type(tref.ndim()+3));
666  tref.push_mask(m);
667  // cerr << "rng = " << rng << "\nr=" << r << ", q=" << q << ", p="
668  // << p << ", qmult =" << qmult << ", target_dim= " << target_dim
669  // << "\n" << "m = " << m << "\nv=" << v << "\n";
670  tref.strides().push_back(v);
671  return s*(r/qmult)*target_dim;
672  }
673 
674 
675  /* called when the FEM has changed */
676  void update_pmat_elem(size_type cv) {
677  pme = 0;
678  for (size_type i=0; i < mfcomp.size(); ++i) {
679  if (mfcomp[i].op == mf_comp::DATA) continue;
680  pfem fem = (mfcomp[i].pmf ? mfcomp[i].pmf->fem_of_element(cv)
681  : NULL);
682  pmat_elem_type pme2 = 0;
683  switch (mfcomp[i].op) {
684  case mf_comp::BASE: pme2 = mat_elem_base(fem); break;
685  case mf_comp::GRAD: pme2 = mat_elem_grad(fem); break;
686  case mf_comp::HESS: pme2 = mat_elem_hessian(fem); break;
687  case mf_comp::NORMAL: pme2 = mat_elem_unit_normal(); break;
688  case mf_comp::GRADGT:
689  case mf_comp::GRADGTINV:
690  pme2 = mat_elem_grad_geotrans(mfcomp[i].op == mf_comp::GRADGTINV);
691  break;
692  case mf_comp::NONLIN: {
693  std::vector<pfem> ftab(1+mfcomp[i].auxmf.size());
694  ftab[0] = fem;
695  for (unsigned k=0; k < mfcomp[i].auxmf.size(); ++k)
696  ftab[k+1] = mfcomp[i].auxmf[k]->fem_of_element(cv);
697  pme2 = mat_elem_nonlinear(mfcomp[i].nlt, ftab);
698  } break;
699  case mf_comp::DATA: /*ignore*/;
700  }
701  if (pme == 0) pme = pme2;
702  else pme = mat_elem_product(pme, pme2);
703  }
704 
705  if (pme == 0) pme = mat_elem_empty();
706  //ASM_THROW_ERROR("no Base() or Grad() or etc!");
707  }
708 
709 
710 
711  size_type
712  push_back_mfcomp_dimensions(size_type cv, const mf_comp& mc,
713  unsigned &d, const bgeot::tensor_ranges &rng,
714  bgeot::tensor_ref &tref, size_type tsz=1) {
715  if (mc.op == mf_comp::NONLIN) {
716  for (size_type j=0; j < mc.nlt->sizes(cv).size(); ++j)
717  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
718  } else if (mc.op == mf_comp::DATA) {
719  assert(tsz == 1);
720  tref = mc.data->tensor();
721  tsz *= tref.card();
722  d += tref.ndim();
723  } else if (mc.op == mf_comp::NORMAL) {
724  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
725  } else if (mc.op == mf_comp::GRADGT ||
726  mc.op == mf_comp::GRADGTINV) {
727  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
728  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
729  } else {
730  size_type target_dim = mc.pmf->fem_of_element(cv)->target_dim();
731  size_type qdim = mc.pmf->get_qdim();
732 
733  //cerr << "target_dim = " << target_dim << ", qdim = " << qdim << ", vectorize=" << mc.vectorize << ", rng=" << rng << " d=" << d << ", tsz=" << tsz << "\n";
734  if (mc.vshape == mf_comp::VECTORIZED_SHAPE) {
735  if (target_dim == qdim) {
736  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
737  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
738  } else {
739  tsz = add_vdim(rng,dim_type(d),index_type(target_dim),
740  stride_type(tsz), tref);
741  d += 2;
742  }
743  } else if (mc.vshape == mf_comp::MATRIXIZED_SHAPE) {
744  if (target_dim == qdim) {
745  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
746  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
747  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
748  } else {
749  tsz = add_mdim(rng, dim_type(d), index_type(target_dim),
750  stride_type(tsz), tref);
751  d += 3;
752  }
753  } else tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
754  if (mc.op == mf_comp::GRAD || mc.op == mf_comp::HESS) {
755  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
756  }
757  if (mc.op == mf_comp::HESS) {
758  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
759  }
760  }
761  return tsz;
762  }
763 
764  void update_shape_with_inline_reduction(size_type cv) {
765  fallback_red_uptodate = false;
766  icb.tensor_bases.resize(mfcomp.size()); /* todo : resize(nb_mfcomp_not_data) */
767  icb.red.clear();
768  for (size_type i=0; i < mfcomp.size(); ++i) {
769  tensor_ref tref;
770  tensor_ranges rng;
771  unsigned d = 0;
772  mfcomp[i].push_back_dimensions(cv,rng);
773  push_back_mfcomp_dimensions(cv,mfcomp[i], d, rng, tref);
774  assert(tref.ndim() == rng.size() && d == rng.size());
775  if (mfcomp[i].reduction.size() == 0)
776  mfcomp[i].reduction.insert(size_type(0), tref.ndim(), ' ');
777  if (mfcomp[i].op != mf_comp::DATA) /* should already have the correct base */
778  tref.set_base(icb.tensor_bases[i]);
779  tref.update_idx2mask();
780  if (mfcomp[i].reduction.size() != tref.ndim()) {
781  ASM_THROW_TENSOR_ERROR("wrong number of indices for the "<< int(i+1)
782  << "th argument of the reduction "<< name()
783  << " (expected " << int(tref.ndim())
784  << " indexes, got "
785  << mfcomp[i].reduction.size());
786  }
787  icb.red.insert(tref, mfcomp[i].reduction);
788  }
789  icb.red.prepare();
790  icb.red.result(tensor());
791  r_.resize(tensor().ndim());
792  for (dim_type i=0; i < tensor().ndim(); ++i) r_[i] = tensor().dim(i);
793  tsize = tensor().card();
794  //cerr << "update_shape_with_inline_reduction: tensor=" << tensor()
795  // << "\nr_=" << r_ << ", tsize=" << tsize << "\n";
796  }
797 
798  void update_shape_with_expanded_tensor(size_type cv) {
799  icb.red.clear();
800  unsigned d = 0;
801  for (size_type i=0; i < mfcomp.size(); ++i) {
802  tsize = stride_type(push_back_mfcomp_dimensions(cv, mfcomp[i], d, r_, tensor(), tsize));
803  }
804  assert(d == r_.size());
805  tensor().update_idx2mask();
806  }
807 
808  void check_shape_update(size_type cv, dim_type) {
809  const mesh_im& mi = mfcomp.get_im();
810  pintegration_method pim2;
812  bool fem_changed = false;
813  pgt2 = mi.linked_mesh().trans_of_convex(cv);
814  pim2 = mi.int_method_of_element(cv);
815  // cerr << "computed tensor cv=" << cv << " f=" << int(face) << "\n";
816  /* shape is considered for update only if the FEM changes,
817  changes of pgt or integration method does not affect shape
818  (only the mat_elem) */
819  cv_shape_update = cv;
820  shape_updated_ = (pme == 0); //false;
821  for (size_type i=0; i < nchilds(); ++i)
822  shape_updated_ = shape_updated_ || child(i).is_shape_updated();
823  for (size_type i=0; shape_updated_ == false && i < mfcomp.size(); ++i) {
824  if (mfcomp[i].pmf == 0) continue;
825  if (current_cv == size_type(-1)) {
826  shape_updated_ = true; fem_changed = true;
827  } else {
828  fem_changed = fem_changed ||
829  (mfcomp[i].pmf->fem_of_element(current_cv) !=
830  mfcomp[i].pmf->fem_of_element(cv));
831  /* for FEM with non-constant nb_dof.. */
832  shape_updated_ = shape_updated_ ||
833  (mfcomp[i].pmf->nb_basic_dof_of_element(current_cv) !=
834  mfcomp[i].pmf->nb_basic_dof_of_element(cv));
835  }
836  }
837  if (shape_updated_) {
838  r_.resize(0);
839  /* get the new ranges */
840  for (unsigned i=0; i < mfcomp.size(); ++i)
841  mfcomp[i].push_back_dimensions(cv, r_, true);
842  }
843  if (fem_changed || shape_updated_) {
844  /* build the new mat_elem structure */
845  update_pmat_elem(cv);
846  }
847  if (shape_updated_ || fem_changed || pgt != pgt2 || pim != pim2) {
848  pgt = pgt2; pim = pim2;
849  pmec = mep(pme, pim, pgt, has_inline_reduction);
850  }
851  }
852 
853  void reinit_() {
854  if (!shape_updated_) return;
855  tensor().clear();
856  tsize = 1;
857  if (has_inline_reduction) {
858  update_shape_with_inline_reduction(cv_shape_update);
859  } else {
860  update_shape_with_expanded_tensor(cv_shape_update);
861  }
862  data_base = 0;
863  tensor().set_base(data_base);
864  }
865  void update_childs_required_shape() {
866  }
867 
868  /* fallback when inline reduction is not possible:
869  do the reduction after evaluation of the mat_elem */
870  void do_post_reduction(size_type cv) {
871  if (!fallback_red_uptodate) {
872  fallback_red_uptodate = true;
873  std::string s;
874  size_type sz = 1;
875  tensor_ref tref; tensor_ranges r;
876  unsigned cnt, d=0;
877  /* insert the tensorial product of Grad() etc */
878  for (cnt=0; cnt < mfcomp.size() && mfcomp[cnt].op != mf_comp::DATA; ++cnt) {
879  mfcomp[cnt].push_back_dimensions(cv,r);
880  sz = push_back_mfcomp_dimensions(cv, mfcomp[cnt], d, r, tref, sz);
881  s += mfcomp[cnt].reduction;
882  }
883  fallback_red.clear();
884  tref.set_base(fallback_base);
885  fallback_red.insert(tref, s);
886  /* insert the optional data tensors */
887  for ( ; cnt < mfcomp.size(); ++cnt) {
888  assert(mfcomp[cnt].op == mf_comp::DATA);
889  fallback_red.insert(mfcomp[cnt].data->tensor(), mfcomp[cnt].reduction);
890  }
891  fallback_red.prepare();
892  fallback_red.result(tensor()); /* this SHOULD NOT, IN ANY CASE change the shape or strides of tensor() .. */
893  assert(icb.red.reduced_range == fallback_red.reduced_range);
894  }
895  icb.resize_t(t);
896  fallback_base = &(*t.begin());
897  fallback_red.do_reduction();
898  }
899 
900  void exec_(size_type cv, dim_type face) {
901  const mesh_im& mim = mfcomp.get_im();
902  for (unsigned i=0; i < mfcomp.size(); ++i) {
903  if (mfcomp[i].op == mf_comp::DATA) {
904  size_type fullsz = 1;
905  for (unsigned j=0; j < mfcomp[i].data->ranges().size(); ++j)
906  fullsz *= mfcomp[i].data->ranges()[j];
907  if (fullsz != size_type(mfcomp[i].data->tensor().card()))
908  ASM_THROW_TENSOR_ERROR("aaarg inline reduction will explode with non-full tensors. "
909  "Complain to the author, I was too lazy to do that properly");
910  }
911  }
912  icb.was_called = false;
913  if (face == dim_type(-1)) {
914  pmec->gen_compute(t, mim.linked_mesh().points_of_convex(cv), cv,
915  has_inline_reduction ? &icb : 0);
916  } else {
917  pmec->gen_compute_on_face(t, mim.linked_mesh().points_of_convex(cv), face, cv,
918  has_inline_reduction ? &icb : 0);
919  }
920 
921  if (has_inline_reduction && icb.was_called == false) {
922  do_post_reduction(cv);
923  data_base = &fallback_red.out_data[0];
924  } else data_base = &(*t.begin());
925  GMM_ASSERT1(t.size() == size_type(tsize),
926  "Internal error: bad size " << t.size() << " should be " << tsize);
927  }
928  };
929 
930 
931  /* extract data for each dof of the convex */
932  class ATN_tensor_from_dofs_data : public ATN_tensor_w_data {
933  const base_asm_data *basm; //scalar_type* global_array;
934  vdim_specif_list vdim;
935  multi_tensor_iterator mti;
936  tensor_ranges e_r;
937  std::vector< tensor_strides > e_str;
938  public:
939  ATN_tensor_from_dofs_data(const base_asm_data *basm_,
940  const vdim_specif_list& d) :
941  basm(basm_), vdim(d) {
942  }
943  void check_shape_update(size_type cv, dim_type) {
944  shape_updated_ = false;
945  r_.resize(vdim.size());
946  for (dim_type i=0; i < vdim.size(); ++i) {
947  if (vdim[i].is_mf_ref()) {
948  size_type nbde = vdim[i].pmf->nb_basic_dof_of_element(cv);
949  if (nbde != ranges()[i])
950  { r_[i] = unsigned(nbde); shape_updated_ = true; }
951  } else if (vdim[i].dim != ranges()[i]) {
952  r_[i] = unsigned(vdim[i].dim);
953  shape_updated_ = true;
954  }
955  }
956  }
957  virtual void init_required_shape() { req_shape = tensor_shape(ranges()); }
958 
959  private:
960  void reinit_() {
961  ATN_tensor_w_data::reinit_();
962  mti.assign(tensor(), true);
963  }
964  void exec_(size_type cv, dim_type ) {
965  vdim.build_strides_for_cv(cv, e_r, e_str);
966  assert(e_r == ranges());
967  mti.rewind();
968  basm->copy_with_mti(e_str, mti, (vdim.nb_mf() >= 1) ? vdim[0].pmf : 0);
969  }
970  };
971 
972  /* enforce symmetry of a 2D tensor
973  (requiring only the upper-triangle of its child and
974  duplicating it) */
975  class ATN_symmetrized_tensor : public ATN_tensor_w_data {
976  multi_tensor_iterator mti;
977  public:
978  ATN_symmetrized_tensor(ATN_tensor& a) { add_child(a); }
979  void check_shape_update(size_type , dim_type) {
980  if ((shape_updated_ = child(0).is_shape_updated())) {
981  if (child(0).ranges().size() != 2 ||
982  child(0).ranges()[0] != child(0).ranges()[1])
983  ASM_THROW_TENSOR_ERROR("can't symmetrize a non-square tensor "
984  "of sizes " << child(0).ranges());
985  r_ = child(0).ranges();
986  }
987  }
988  void update_childs_required_shape() {
989  tensor_shape ts = req_shape;
990  tensor_shape ts2 = req_shape;
991  index_set perm(2); perm[0] = 1; perm[1] = 0; ts2.permute(perm);
992  ts.merge(ts2, false);
993  tensor_mask dm; dm.set_triangular(ranges()[0],0,1);
994  tensor_shape tsdm(2); tsdm.push_mask(dm);
995  ts.merge(tsdm, true);
996  child(0).merge_required_shape(ts);
997  }
998 
999  private:
1000  void reinit_() {
1001  req_shape.set_full(ranges()); // c'est plus simple comme ca
1002  ATN_tensor_w_data::reinit0();
1003  mti.assign(child(0).tensor(),true);
1004  }
1005  void exec_(size_type, dim_type) {
1006  std::fill(data.begin(), data.end(), 0.);
1007  mti.rewind();
1008  index_type n = ranges()[0];
1009  do {
1010  index_type i=mti.index(0), j=mti.index(1);
1011  data[i*n+j]=data[j*n+i]=mti.p(0);
1012  } while (mti.qnext1());
1013  }
1014  };
1015 
1016 
1017  template<class UnaryOp> class ATN_unary_op_tensor
1018  : public ATN_tensor_w_data {
1019  multi_tensor_iterator mti;
1020  public:
1021  ATN_unary_op_tensor(ATN_tensor& a) { add_child(a); }
1022  void check_shape_update(size_type , dim_type) {
1023  if ((shape_updated_ = (ranges() != child(0).ranges())))
1024  r_ = child(0).ranges();
1025  }
1026  private:
1027  void reinit_() {
1028  ATN_tensor_w_data::reinit0();
1029  mti.assign(tensor(), child(0).tensor(),false);
1030  }
1031  void update_cv_(size_type, dim_type) {
1032  mti.rewind();
1033  do {
1034  mti.p(0) = UnaryOp()(mti.p(1));
1035  } while (mti.qnext2());
1036  }
1037  };
1038 
1039  /* sum AND scalar scaling */
1040  class ATN_tensors_sum_scaled : public ATN_tensor_w_data {
1041  std::vector<multi_tensor_iterator> mti;
1042  std::vector<scalar_type> scales; /* utile pour des somme "scaled" du genre 0.5*t1 + 0.5*t2 */
1043  public:
1044  ATN_tensors_sum_scaled(ATN_tensor& t1, scalar_type s1) {
1045  add_child(t1);
1046  scales.resize(1); scales[0]=s1;
1047  }
1048  void push_scaled_tensor(ATN_tensor& t, scalar_type s) {
1049  add_child(t); scales.push_back(s);
1050  }
1051  void check_shape_update(size_type , dim_type) {
1052  if ((shape_updated_ = child(0).is_shape_updated()))
1053  r_ = child(0).ranges();
1054  for (size_type i=1; i < nchilds(); ++i)
1055  if (ranges() != child(i).ranges())
1056  ASM_THROW_TENSOR_ERROR("can't add two tensors of sizes " <<
1057  ranges() << " and " << child(i).ranges());
1058  }
1059  void apply_scale(scalar_type s) {
1060  for (size_type i=0; i < scales.size(); ++i) scales[i] *= s;
1061  }
1062  ATN_tensors_sum_scaled* is_tensors_sum_scaled() { return this; }
1063  private:
1064  void reinit_() {
1065  ATN_tensor_w_data::reinit0();
1066  mti.resize(nchilds());
1067  for (size_type i=0; i < nchilds(); ++i)
1068  mti[i].assign(tensor(), child(i).tensor(),false);
1069  }
1070  void exec_(size_type, dim_type) {
1071  //if (cv == 0) {
1072  // cerr << "ATN_tensors_sum["<< name() << "] req_shape="
1073  // << req_shape << endl;
1074  //}
1075  std::fill(data.begin(), data.end(), 0.);
1076  mti[0].rewind();
1077  do {
1078  mti[0].p(0) = mti[0].p(1)*scales[0];
1079  } while (mti[0].qnext2());
1080  for (size_type i=1; i < nchilds(); ++i) {
1081  mti[i].rewind();
1082  do {
1083  mti[i].p(0) = mti[i].p(0)+mti[i].p(1)*scales[i];
1084  } while (mti[i].qnext2());
1085  }
1086  }
1087  };
1088 
1089  class ATN_tensor_scalar_add : public ATN_tensor_w_data {
1090  scalar_type v;
1091  multi_tensor_iterator mti;
1092  int sgn; /* v+t or v-t ? */
1093  public:
1094  ATN_tensor_scalar_add(ATN_tensor& a, scalar_type v_, int sgn_) :
1095  v(v_), sgn(sgn_) { add_child(a); }
1096  void check_shape_update(size_type , dim_type) {
1097  if ((shape_updated_ = child(0).is_shape_updated()))
1098  r_ = child(0).ranges();
1099  }
1100  private:
1101  void reinit_() {
1102  ATN_tensor_w_data::reinit_();
1103  mti.assign(tensor(), child(0).tensor(),false);
1104  }
1105  void exec_(size_type, dim_type) {
1106  std::fill(data.begin(), data.end(), v);
1107  mti.rewind();
1108  do {
1109  if (sgn > 0)
1110  mti.p(0) += mti.p(1);
1111  else mti.p(0) -= mti.p(1);
1112  } while (mti.qnext2());
1113  }
1114  };
1115 
1116  class ATN_print_tensor : public ATN {
1117  std::string name;
1118  public:
1119  ATN_print_tensor(ATN_tensor& a, std::string n_)
1120  : name(n_) { add_child(a); }
1121  private:
1122  void reinit_() {}
1123  void exec_(size_type cv, dim_type face) {
1124  multi_tensor_iterator mti(child(0).tensor(), true);
1125  cout << "------- > evaluation of " << name << ", at" << endl;
1126  cout << "convex " << cv;
1127  if (face != dim_type(-1)) cout << ", face " << int(face);
1128  cout << endl;
1129  cout << " size = " << child(0).ranges() << endl;
1130  mti.rewind();
1131  do {
1132  cout << " @[";
1133  for (size_type i=0; i < child(0).ranges().size(); ++i)
1134  cout <<(i>0 ? "," : "") << mti.index(dim_type(i));
1135  cout << "] = " << mti.p(0) << endl;
1136  } while (mti.qnext1());
1137  }
1138  };
1139 
1140 
1141  /*
1142  -------------------
1143  analysis of the supplied string
1144  -----------------
1145  */
1146 
1147  std::string asm_tokenizer::syntax_err_print() {
1148  std::string s;
1149  if (tok_pos - err_msg_mark > 80) err_msg_mark = tok_pos - 40;
1150  if (str.length() - err_msg_mark < 80) s = tok_substr(err_msg_mark, str.length());
1151  else { s = tok_substr(err_msg_mark,err_msg_mark+70); s.append(" ... (truncated)"); }
1152  s += "\n" + std::string(std::max(int(tok_pos - err_msg_mark),0), '-') + "^^";
1153  return s;
1154  }
1155 
1156  void asm_tokenizer::get_tok() {
1157  standard_locale sl;
1158  curr_tok_ival = -1;
1159  while (tok_pos < str.length() && isspace(str[tok_pos])) ++tok_pos;
1160  if (tok_pos == str.length()) {
1161  curr_tok_type = END; tok_len = 0;
1162  } else if (strchr("{}(),;:=-.*/+", str[tok_pos])) {
1163  curr_tok_type = tok_type_enum(str[tok_pos]); tok_len = 1;
1164  } else if (str[tok_pos] == '$' || str[tok_pos] == '#' || str[tok_pos] == '%') {
1165  curr_tok_type = str[tok_pos] == '$' ? ARGNUM_SELECTOR :
1166  (str[tok_pos] == '#' ? MFREF : IMREF);
1167  tok_len = 1;
1168  curr_tok_ival = 0;
1169  while (isdigit(str[tok_pos+tok_len])) {
1170  curr_tok_ival*=10;
1171  curr_tok_ival += str[tok_pos+tok_len] - '0';
1172  ++tok_len;
1173  }
1174  curr_tok_ival--;
1175  } else if (isalpha(str[tok_pos])) {
1176  curr_tok_type = IDENT;
1177  tok_len = 0;
1178  while (isalnum(str[tok_pos+tok_len]) || str[tok_pos+tok_len] == '_') ++tok_len;
1179  } else if (isdigit(str[tok_pos])) {
1180  curr_tok_type = NUMBER;
1181  char *p;
1182  curr_tok_dval = strtod(&str[0]+tok_pos, &p);
1183  tok_len = p - &str[0] - tok_pos;
1184  }
1185  if (tok_pos < str.length())
1186  curr_tok = str.substr(tok_pos, tok_len);
1187  else
1188  curr_tok.clear();
1189  }
1190 
1191  const mesh_fem& generic_assembly::do_mf_arg_basic() {
1192  if (tok_type() != MFREF) ASM_THROW_PARSE_ERROR("expecting mesh_fem reference");
1193  if (tok_mfref_num() >= mftab.size())
1194  ASM_THROW_PARSE_ERROR("reference to a non-existant mesh_fem #" << tok_mfref_num()+1);
1195  const mesh_fem& mf_ = *mftab[tok_mfref_num()]; advance();
1196  return mf_;
1197  }
1198 
1199  const mesh_fem& generic_assembly::do_mf_arg(std::vector<const mesh_fem*> * multimf) {
1200  if (!multimf) advance(); // special hack for NonLin$i(#a,#b,..)
1201  accept(OPEN_PAR,"expecting '('");
1202  const mesh_fem &mf_ = do_mf_arg_basic();
1203  if (multimf) {
1204  multimf->resize(1); (*multimf)[0] = &mf_;
1205  while (advance_if(COMMA)) {
1206  if (tok_type() != MFREF) ASM_THROW_PARSE_ERROR("expecting mesh_fem reference");
1207  if (tok_mfref_num() >= mftab.size())
1208  ASM_THROW_PARSE_ERROR("reference to a non-existant mesh_fem #" << tok_mfref_num()+1);
1209  multimf->push_back(mftab[tok_mfref_num()]); advance();
1210  }
1211  }
1212  accept(CLOSE_PAR, "expecting ')'");
1213  return mf_;
1214  }
1215 
1216  /* "inline" reduction operations inside comp(..) */
1217  std::string generic_assembly::do_comp_red_ops() {
1218  std::string s;
1219  if (advance_if(OPEN_PAR)) {
1220  size_type j = 0;
1221  do {
1222  if (tok_type() == COLON) {
1223  s.push_back(' '); advance(); j++;
1224  } else if (tok_type() == IDENT) {
1225  if ((tok().length()==1 && isalpha(tok()[0])) || islower(tok()[0])) {
1226  s.push_back(tok()[0]); advance(); j++;
1227  } else ASM_THROW_PARSE_ERROR("invalid reduction index '" << tok() <<
1228  "', only lower case characters allowed");
1229  }
1230  } while (advance_if(COMMA));
1231  accept(CLOSE_PAR, "expecting ')'");
1232  }
1233  return s;
1234  }
1235 
1236  static mf_comp::field_shape_type get_shape(const std::string &s) {
1237  if (s[0] == 'v') return mf_comp::VECTORIZED_SHAPE;
1238  else if (s[0] == 'm') return mf_comp::MATRIXIZED_SHAPE;
1239  else return mf_comp::PLAIN_SHAPE;
1240  }
1241 
1242  ATN_tensor* generic_assembly::do_comp() {
1243  accept(OPEN_PAR, "expecting '('");
1244  mf_comp_vect what;
1245  bool in_data = false;
1246  /* the first optional argument is the "main" mesh_im, i.e. the one
1247  whose integration methods are used, (and whose linked_mesh is
1248  used for mf_comp::NORMAL, mf_comp::GRADGT etc computations). If
1249  not given, then the first mesh_im pushed is used (then expect
1250  problems when assembling simultaneously on two different
1251  meshes).
1252  */
1253  if (tok_type() == IMREF) {
1254  if (tok_imref_num() >= imtab.size())
1255  ASM_THROW_PARSE_ERROR("reference to a non-existant mesh_im %" << tok_imref_num()+1);
1256  what.set_im(*imtab[tok_imref_num()]); advance();
1257  accept(COMMA, "expecting ','");
1258  } else {
1259  what.set_im(*imtab[0]);
1260  }
1261  do {
1262  if (tok_type() == CLOSE_PAR) break;
1263  if (tok_type() != IDENT) ASM_THROW_PARSE_ERROR("expecting Base or Grad or Hess, Normal, etc..");
1264  std::string f = tok();
1265  const mesh_fem *pmf = 0;
1266  if (f.compare("Base")==0 || f.compare("vBase")==0 || f.compare("mBase")==0) {
1267  pmf = &do_mf_arg();
1268  what.push_back(mf_comp(&what, pmf, mf_comp::BASE, get_shape(f)));
1269  } else if (f.compare("Grad")==0 || f.compare("vGrad")==0 || f.compare("mGrad")==0) {
1270  pmf = &do_mf_arg();
1271  what.push_back(mf_comp(&what, pmf, mf_comp::GRAD, get_shape(f)));
1272  } else if (f.compare("Hess")==0 || f.compare("vHess")==0 || f.compare("mHess")==0) {
1273  pmf = &do_mf_arg();
1274  what.push_back(mf_comp(&what, pmf, mf_comp::HESS, get_shape(f)));
1275  } else if (f.compare("NonLin")==0) {
1276  size_type num = 0; /* default value */
1277  advance();
1278  if (tok_type() == ARGNUM_SELECTOR) { num = tok_argnum(); advance(); }
1279  if (num >= innonlin.size()) ASM_THROW_PARSE_ERROR("NonLin$" << num << " does not exist");
1280  std::vector<const mesh_fem*> allmf;
1281  pmf = &do_mf_arg(&allmf); what.push_back(mf_comp(&what, allmf, innonlin[num]));
1282  } else if (f.compare("Normal") == 0) {
1283  advance();
1284  accept(OPEN_PAR,"expecting '('"); accept(CLOSE_PAR,"expecting ')'");
1285  pmf = 0; what.push_back(mf_comp(&what, pmf, mf_comp::NORMAL, mf_comp::PLAIN_SHAPE));
1286  } else if (f.compare("GradGT") == 0 ||
1287  f.compare("GradGTInv") == 0) {
1288  advance();
1289  accept(OPEN_PAR,"expecting '('"); accept(CLOSE_PAR,"expecting ')'");
1290  pmf = 0;
1291  what.push_back(mf_comp(&what, pmf,
1292  f.compare("GradGT") == 0 ?
1293  mf_comp::GRADGT :
1294  mf_comp::GRADGTINV, mf_comp::PLAIN_SHAPE));
1295  } else {
1296  if (vars.find(f) != vars.end()) {
1297  what.push_back(mf_comp(&what, vars[f]));
1298  in_data = true;
1299  advance();
1300  } else {
1301  ASM_THROW_PARSE_ERROR("expecting Base, Grad, vBase, NonLin ...");
1302  }
1303  }
1304 
1305  if (!in_data && f[0] != 'v' && f[0] != 'm' && pmf && pmf->get_qdim() != 1 && f.compare("NonLin")!=0) {
1306  ASM_THROW_PARSE_ERROR("Attempt to use a vector mesh_fem as a scalar mesh_fem");
1307  }
1308  what.back().reduction = do_comp_red_ops();
1309  } while (advance_if(PRODUCT));
1310  accept(CLOSE_PAR, "expecting ')'");
1311 
1312  return record(std::make_unique<ATN_computed_tensor>(what));
1313  }
1314 
1315  void generic_assembly::do_dim_spec(vdim_specif_list& lst) {
1316  lst.resize(0);
1317  accept(OPEN_PAR, "expecting '('");
1318  while (true) {
1319  if (tok_type() == IDENT) {
1320  if (tok().compare("mdim")==0) lst.push_back(vdim_specif(do_mf_arg().linked_mesh().dim()));
1321  else if (tok().compare("qdim")==0) lst.push_back(vdim_specif(do_mf_arg().get_qdim()));
1322  else ASM_THROW_PARSE_ERROR("expecting mdim(#mf) or qdim(#mf) or a number or a mesh_fem #id");
1323  } else if (tok_type() == NUMBER) {
1324  lst.push_back(vdim_specif(tok_number_ival()+1));
1325  advance();
1326  } else if (tok_type() == MFREF) {
1327  lst.push_back(vdim_specif(&do_mf_arg_basic()));
1328  } else if (tok_type() != CLOSE_PAR) ASM_THROW_PARSE_ERROR("expecting mdim(#mf) or qdim(#mf) or a number or a mesh_fem #id");
1329  /* if (mfcnt && !lst.back().is_mf_ref())
1330  ASM_THROW_PARSE_ERROR("#mf argument must be given after numeric dimensions");*/
1331  if (advance_if(CLOSE_PAR)) break;
1332  accept(COMMA,"expecting ',' or ')'");
1333  }
1334  }
1335 
1336 
1337  ATN_tensor* generic_assembly::do_data() {
1338  // ATN_tensor *t;
1339  size_type datanum = 0; /* par defaut */
1340  if (tok_type() != OPEN_PAR) { /* on peut oublier le numero de dataset */
1341  if (tok_type() != ARGNUM_SELECTOR)
1342  ASM_THROW_PARSE_ERROR("expecting dataset number");
1343  datanum = tok_argnum();
1344  advance();
1345  }
1346  if (datanum >= indata.size())
1347  ASM_THROW_PARSE_ERROR("wrong dataset number: " << datanum);
1348 
1349  vdim_specif_list sz;
1350  do_dim_spec(sz);
1351 
1352  if (sz.nbelt() != indata[datanum]->vect_size())
1353  ASM_THROW_PARSE_ERROR("invalid size for data argument " << datanum+1 <<
1354  " real size is " << indata[datanum]->vect_size()
1355  << " expected size is " << sz.nbelt());
1356  return record(std::make_unique<ATN_tensor_from_dofs_data>(indata[datanum].get(), sz));
1357  }
1358 
1359  std::pair<ATN_tensor*, std::string>
1360  generic_assembly::do_red_ops(ATN_tensor* t) {
1361  std::string s;
1362 
1363  if (advance_if(OPEN_PAR)) {
1364  size_type j = 0;
1365  do {
1366  if (tok_type() == COLON) {
1367  s.push_back(' '); advance(); j++;
1368  } else if (tok_type() == NUMBER) {
1369  t = record(std::make_unique<ATN_sliced_tensor>(*t, dim_type(j),
1370  tok_number_ival())); advance();
1371  } else if (tok_type() == IDENT) {
1372  if ((tok().length()==1 && isalpha(tok()[0])) || islower(tok()[0])) {
1373  s.push_back(tok()[0]); advance(); j++;
1374  } else ASM_THROW_PARSE_ERROR("invalid reduction index '" << tok() <<
1375  "', only lower case chars allowed");
1376  }
1377  } while (advance_if(COMMA));
1378  accept(CLOSE_PAR, "expecting ')'");
1379  }
1380  return std::pair<ATN_tensor*,std::string>(t,s);
1381  }
1382 
1383  /*
1384  ( expr )
1385  variable
1386  comp(..)
1387  data(data)
1388  */
1389  tnode generic_assembly::do_tens() {
1390  tnode t;
1391  push_mark();
1392  if (advance_if(OPEN_PAR)) {
1393  t = do_expr();
1394  accept(CLOSE_PAR, "expecting ')'");
1395  } else if (tok_type() == NUMBER) {
1396  t.assign(tok_number_dval()); advance();
1397  } else if (tok_type() == IDENT) {
1398  if (vars.find(tok()) != vars.end()) {
1399  t.assign(vars[tok()]); advance();
1400  } else if (tok().compare("comp")==0) {
1401  advance(); t.assign(do_comp());
1402  } else if (tok().compare("data")==0) {
1403  advance(); t.assign(do_data());
1404  } else if (tok().compare("sym")==0) {
1405  advance();
1406  tnode t2 = do_expr();
1407  if (t2.type() != tnode::TNTENSOR)
1408  ASM_THROW_PARSE_ERROR("can't symmetrise a scalar!");
1409  t.assign(record(std::make_unique<ATN_symmetrized_tensor>(*t2.tensor())));
1410  } else ASM_THROW_PARSE_ERROR("unknown identifier: " << tok());
1411  } else ASM_THROW_PARSE_ERROR("unexpected token: " << tok());
1412  pop_mark();
1413  return t;
1414  }
1415 
1416  /*
1417  handle tensorial product/reduction
1418 
1419  a(:,i).b(j,i).(c)(1,:,i)
1420  */
1421  tnode generic_assembly::do_prod() {
1422  reduced_tensor_arg_type ttab;
1423 
1424  do {
1425  tnode t = do_tens();
1426  if (t.type() == tnode::TNCONST) {
1427  if (ttab.size() == 0) return t;
1428  else ASM_THROW_PARSE_ERROR("can't mix tensor and scalar into a "
1429  "reduction expression");
1430  }
1431  ttab.push_back(do_red_ops(t.tensor()));
1432  } while (advance_if(PRODUCT));
1433  if (ttab.size() == 1 && ttab[0].second.length() == 0) {
1434  return tnode(ttab[0].first);
1435  } else {
1436  return tnode(record(std::make_unique<ATN_reduced_tensor>(ttab)));
1437  }
1438  }
1439 
1440  /* calls do_prod() once,
1441  and handle successive reordering/diagonals transformations */
1442  tnode generic_assembly::do_prod_trans() {
1443  tnode t = do_prod();
1444  while (advance_if(OPEN_BRACE)) {
1445  index_set reorder;
1446  size_type j = 0;
1447  dal::bit_vector check_permut;
1448  if (t.type() != tnode::TNTENSOR)
1449  ASM_THROW_PARSE_ERROR("can't use reorder braces on a constant!");
1450  for (;; ++j) {
1451  size_type i;
1452  if (tok_type() == COLON) i = j;
1453  else if (tok_type() == NUMBER) i = tok_number_ival(1000);
1454  else ASM_THROW_PARSE_ERROR("only numbers or colons allowed here");
1455  if (check_permut.is_in(i)) { /* on prend la diagonale du tenseur */
1456  t = tnode(record(std::make_unique<ATN_diagonal_tensor>(*t.tensor(), dim_type(i),
1457  dim_type(j))));
1458  check_permut.add(j);
1459  reorder.push_back(dim_type(j));
1460  } else {
1461  check_permut.add(i);
1462  reorder.push_back(dim_type(i));
1463  }
1464  advance();
1465  if (advance_if(CLOSE_BRACE)) break;
1466  accept(COMMA, "expecting ','");
1467  }
1468  if (check_permut.first_false() != reorder.size()) {
1469  cerr << check_permut << endl;
1470  cerr << vref(reorder) << endl;
1471  ASM_THROW_PARSE_ERROR("you did not give a real permutation:"
1472  << vref(reorder));
1473  }
1474  t = tnode(record(std::make_unique<ATN_permuted_tensor>(*t.tensor(), reorder)));
1475  }
1476  return t;
1477  }
1478 
1479  /*
1480  term := prod_trans*prod_trans/prod_trans ...
1481  */
1482  tnode generic_assembly::do_term() {
1483  push_mark();
1484  err_set_mark();
1485  tnode t = do_prod_trans();
1486  while (true) {
1487  bool mult;
1488  if (advance_if(MULTIPLY)) mult = true;
1489  else if (advance_if(DIVIDE)) mult = false;
1490  else break;
1491  tnode t2 = do_prod();
1492  if (mult == false && t.type() == tnode::TNCONST
1493  && t2.type() == tnode::TNTENSOR)
1494  ASM_THROW_PARSE_ERROR("can't divide a constant by a tensor");
1495  if (t.type() == tnode::TNTENSOR && t2.type() == tnode::TNTENSOR) {
1496  ASM_THROW_PARSE_ERROR("tensor term-by-term productor division not "
1497  "implemented yet! are you sure you need it ?");
1498  } else if (t.type() == tnode::TNCONST && t2.type() == tnode::TNCONST) {
1499  if (mult)
1500  t.assign(t.xval()*t2.xval());
1501  else {
1502  t2.check0();
1503  t.assign(t.xval()/t2.xval());
1504  }
1505  } else {
1506  if (t.type() != tnode::TNTENSOR) std::swap(t,t2);
1507  scalar_type v = t2.xval();
1508  if (!mult) {
1509  if (v == 0.) { ASM_THROW_PARSE_ERROR("can't divide by zero"); }
1510  else v = 1./v;
1511  }
1512  if (t.tensor()->is_tensors_sum_scaled() && !t.tensor()->is_frozen()) {
1513  t.tensor()->is_tensors_sum_scaled()->apply_scale(v);
1514  } else {
1515  t.assign(record(std::make_unique<ATN_tensors_sum_scaled>(*t.tensor(), v)));
1516  }
1517  }
1518  }
1519  pop_mark();
1520  return t;
1521  }
1522 
1523  /*
1524  expr := term + term - term + ...
1525  suboptimal for things like t1+1-2-1 (which gives (((t1+1)-2)-1) )
1526  ... could be fixed but noone needs that i guess
1527  */
1528  tnode generic_assembly::do_expr() {
1529  bool negt=false;
1530  push_mark();
1531  if (advance_if(MINUS)) negt = true;
1532  tnode t = do_term();
1533  if (negt) {
1534  if (t.type() == tnode::TNCONST) t.assign(-t.xval());
1535  else t.assign(record(std::make_unique<ATN_tensor_scalar_add>(*t.tensor(), 0., -1)));
1536  }
1537  while (true) {
1538  int plus;
1539  if (advance_if(PLUS)) plus = +1;
1540  else if (advance_if(MINUS)) plus = -1;
1541  else break;
1542  tnode t2 = do_term();
1543  if (t.type() == tnode::TNTENSOR && t2.type() == tnode::TNTENSOR) {
1544  if (!t.tensor()->is_tensors_sum_scaled() || t.tensor()->is_frozen()) {
1545  t.assign(record(std::make_unique<ATN_tensors_sum_scaled>(*t.tensor(), +1)));
1546  }
1547  t.tensor()->is_tensors_sum_scaled()
1548  ->push_scaled_tensor(*t2.tensor(), scalar_type(plus));
1549  } else if (t.type() == tnode::TNCONST && t2.type() == tnode::TNCONST) {
1550  t.assign(t.xval()+t2.xval()*plus);
1551  } else {
1552  int tsgn = 1;
1553  if (t.type() != tnode::TNTENSOR)
1554  { std::swap(t,t2); if (plus<0) tsgn = -1; }
1555  else if (plus<0) t2.assign(-t2.xval());
1556  t.assign(record(std::make_unique<ATN_tensor_scalar_add>(*t.tensor(), t2.xval(),
1557  tsgn)));
1558  }
1559  }
1560  pop_mark();
1561  return t;
1562  }
1563 
1564  /* instr := ident '=' expr |
1565  print expr |
1566  M(#mf,#mf) '+=' expr |
1567  V(#mf) '+=' expr */
1568  void generic_assembly::do_instr() {
1569  enum { wALIAS, wOUTPUT_ARRAY, wOUTPUT_MATRIX, wPRINT, wERROR }
1570  what = wERROR;
1571  std::string ident;
1572 
1573  /* get the rhs */
1574  if (tok_type() != IDENT) ASM_THROW_PARSE_ERROR("expecting identifier");
1575  if (vars.find(tok()) != vars.end())
1576  ASM_THROW_PARSE_ERROR("redefinition of identifier " << tok());
1577 
1578  push_mark();
1579  ident = tok();
1580  advance();
1581 
1582  size_type print_mark = 0;
1583  size_type arg_num = size_type(-1);
1584 
1585  vdim_specif_list vds;
1586 
1587  if (ident.compare("print") == 0) {
1588  print_mark = tok_mark();
1589  what = wPRINT;
1590  } else if (tok_type() == ARGNUM_SELECTOR ||
1591  tok_type() == OPEN_PAR) {
1592  if (tok_type() == ARGNUM_SELECTOR) {
1593  arg_num = tok_argnum();
1594  advance();
1595  } else { arg_num = 0; }
1596 
1597  do_dim_spec(vds);
1598 
1599  /* check the validity of the output statement */
1600  if (ident.compare("V")==0) {
1601  what = wOUTPUT_ARRAY;
1602  if (arg_num >= outvec.size())
1603  { outvec.resize(arg_num+1); outvec[arg_num] = 0; }
1604  /* if we are allowed to dynamically create vectors */
1605  if (outvec[arg_num] == 0) {
1606  if (vec_fact != 0) {
1607  tensor_ranges r(vds.size());
1608  for (size_type i=0; i < vds.size(); ++i)
1609  r[i] = unsigned(vds[i].dim);
1610  outvec[arg_num] = std::shared_ptr<base_asm_vec>(std::shared_ptr<base_asm_vec>(), vec_fact->create_vec(r));
1611  }
1612  else ASM_THROW_PARSE_ERROR("output vector $" << arg_num+1
1613  << " does not exist");
1614  }
1615  } else if (vds.nb_mf()==2 && vds.size() == 2 && ident.compare("M")==0) {
1616  what = wOUTPUT_MATRIX;
1617  if (arg_num >= outmat.size())
1618  { outmat.resize(arg_num+1); outmat[arg_num] = 0; }
1619  /* if we are allowed to dynamically create matrices */
1620  if (outmat[arg_num] == 0) {
1621  if (mat_fact != 0)
1622  outmat[arg_num] = std::shared_ptr<base_asm_mat>
1623  (std::shared_ptr<base_asm_mat>(),
1624  mat_fact->create_mat(vds[0].pmf->nb_dof(),
1625  vds[1].pmf->nb_dof()));
1626  else ASM_THROW_PARSE_ERROR("output matrix $" << arg_num+1
1627  << " does not exist");
1628  }
1629  } else ASM_THROW_PARSE_ERROR("not a valid output statement");
1630 
1631  accept(PLUS);
1632  accept(EQUAL);
1633  } else if (advance_if(EQUAL)) {
1634  what = wALIAS;
1635  } else ASM_THROW_PARSE_ERROR("missing '=' or ':='");
1636 
1637  tnode t = do_expr();
1638  if (t.type() != tnode::TNTENSOR)
1639  ASM_THROW_PARSE_ERROR("left hand side is a constant, not a tensor!");
1640 
1641  switch (what) {
1642  case wPRINT: {
1643  record_out(std::make_unique<ATN_print_tensor>(*t.tensor(), tok_substr(print_mark,
1644  tok_mark())));
1645  } break;
1646  case wOUTPUT_ARRAY: {
1647  record_out(outvec[arg_num]->build_output_tensor(*t.tensor(), vds));
1648  } break;
1649  case wOUTPUT_MATRIX: {
1650  record_out(outmat[arg_num]->build_output_tensor(*t.tensor(),
1651  *vds[0].pmf,
1652  *vds[1].pmf));
1653  } break;
1654  case wALIAS: {
1655  vars[ident] = t.tensor(); t.tensor()->freeze();
1656  } break;
1657  default: GMM_ASSERT3(false, ""); break;
1658  }
1659  pop_mark();
1660  }
1661 
1662  struct atn_number_compare {
1663  bool operator()(const std::unique_ptr<ATN_tensor> &a,
1664  const std::unique_ptr<ATN_tensor> &b) {
1665  assert(a.get() && b.get());
1666  return (a->number() < b->number());
1667  }
1668  };
1669 
1670  struct outvar_compare {
1671  bool operator()(const std::unique_ptr<ATN> &a,
1672  const std::unique_ptr<ATN> &b) {
1673  assert(a.get() && b.get());
1674  return (a->number() < b->number());
1675  }
1676  };
1677 
1678  void generic_assembly::parse() {
1679  if (parse_done) return;
1680  do {
1681  if (tok_type() == END) break;
1682  do_instr();
1683  } while (advance_if(SEMICOLON));
1684  if (tok_type() != END) ASM_THROW_PARSE_ERROR("unexpected token: '"
1685  << tok() << "'");
1686  if (outvars.size() == 0) cerr << "warning: assembly without output\n";
1687 
1688  /* reordering of atn_tensors and outvars */
1689  unsigned gcnt = 0;
1690  for (size_type i=0; i < outvars.size(); ++i)
1691  outvars[i]->set_number(gcnt);
1692 
1693  std::sort(atn_tensors.begin(), atn_tensors.end(), atn_number_compare());
1694  std::sort(outvars.begin(), outvars.end(), outvar_compare());
1695 
1696  /* remove non-numbered (ie unused) atn_tensors */
1697  while (atn_tensors.size()
1698  && atn_tensors.back()->number() == unsigned(-1)) {
1699  cerr << "warning: the expression " << atn_tensors.back()->name()
1700  << " won't be evaluated since it is not used!\n";
1701  atn_tensors.pop_back();
1702  }
1703  parse_done = true;
1704  }
1705 
1706  /* caution: the order of the loops is really important */
1707  void generic_assembly::exec(size_type cv, dim_type face) {
1708  bool update_shapes = false;
1709  for (size_type i=0; i < atn_tensors.size(); ++i) {
1710  atn_tensors[i]->check_shape_update(cv,face);
1711  update_shapes = (update_shapes || atn_tensors[i]->is_shape_updated());
1712  /* if (atn_tensors[i]->is_shape_updated()) {
1713  cerr << "[cv=" << cv << ",f=" << int(face) << "], shape_updated: "
1714  << typeid(*atn_tensors[i]).name()
1715  << " [" << atn_tensors[i]->name()
1716  << "]\n -> r=" << atn_tensors[i]->ranges() << "\n ";
1717  }
1718  */
1719  }
1720 
1721  if (update_shapes) {
1722 
1723  /*cerr << "updated shapes: cv=" << cv << " face=" << int(face) << ": ";
1724  for (size_type k=0; k < mftab.size(); ++k)
1725  cerr << mftab[k]->nb_basic_dof_of_element(cv) << " "; cerr << "\n";
1726  */
1727 
1728  for (auto &&t : atn_tensors)
1729  t->init_required_shape();
1730 
1731  for (auto &&v : outvars)
1732  v->update_childs_required_shape();
1733 
1734  for (size_type i=atn_tensors.size()-1; i!=size_type(-1); --i)
1735  atn_tensors[i]->update_childs_required_shape();
1736 
1737  for (auto &&t : atn_tensors)
1738  t->reinit();
1739 
1740  for (auto &&v : outvars)
1741  v->reinit();
1742  }
1743  for (auto &&t : atn_tensors)
1744  t->exec(cv,face);
1745  for (auto &&v : outvars)
1746  v->exec(cv, face);
1747  }
1748 
1749  struct cv_fem_compare {
1750  const std::vector<const mesh_fem *> &mf;
1751  cv_fem_compare(const std::vector<const mesh_fem *>& mf_) : mf(mf_) {}
1752  bool operator()(size_type a, size_type b) const {
1753  for (size_type i=0; i < mf.size(); ++i) {
1754  pfem pfa(mf[i]->fem_of_element(a));
1755  pfem pfb(mf[i]->fem_of_element(b));
1756  /* sort by nb_dof and then by fem */
1757  unsigned nba = unsigned(pfa->nb_dof(a));
1758  unsigned nbb = unsigned(pfb->nb_dof(b));
1759  if (nba < nbb) {
1760  return true;
1761  } else if (nba > nbb) {
1762  return false;
1763  } else if (pfa < pfb) {
1764  return true;
1765  }
1766  }
1767  return false;
1768  }
1769  };
1770 
1771  /* reorder the convexes in order to minimize the number of
1772  shape modifications during the assembly (since this can be
1773  very expensive) */
1774  static void get_convex_order(const dal::bit_vector& cvlst,
1775  const std::vector<const mesh_im *>& imtab,
1776  const std::vector<const mesh_fem *>& mftab,
1777  const dal::bit_vector& candidates,
1778  std::vector<size_type>& cvorder) {
1779  cvorder.reserve(candidates.card()); cvorder.resize(0);
1780 
1781  for (dal::bv_visitor cv(candidates); !cv.finished(); ++cv) {
1782  if (cvlst.is_in(cv) &&
1783  imtab[0]->int_method_of_element(cv) != im_none()) {
1784  bool ok = true;
1785  for (size_type i=0; i < mftab.size(); ++i) {
1786  if (!mftab[i]->convex_index().is_in(cv)) {
1787  ok = false;
1788  // ASM_THROW_ERROR("the convex " << cv << " has no FEM for the #"
1789  // << i+1 << " mesh_fem");
1790  }
1791  }
1792  if (ok) {
1793  cvorder.push_back(cv);
1794  }
1795  } else if (!imtab[0]->linked_mesh().convex_index().is_in(cv)) {
1796  ASM_THROW_ERROR("the convex " << cv << " is not part of the mesh");
1797  } else {
1798  /* skip convexes without integration method */
1799  }
1800  }
1801  //std::sort(cvorder.begin(), cvorder.end(), cv_fem_compare(mftab));
1802  }
1803 
1804  void generic_assembly::consistency_check() {
1805  //if (mftab.size() == 0) ASM_THROW_ERROR("no mesh_fem for assembly!");
1806  if (imtab.size() == 0)
1807  ASM_THROW_ERROR("no mesh_im (integration methods) given for assembly!");
1808  const mesh& m = imtab[0]->linked_mesh();
1809  for (unsigned i=0; i < mftab.size(); ++i) {
1810  if (&mftab[i]->linked_mesh() != &m)
1811  ASM_THROW_ERROR("the mesh_fem/mesh_im live on different meshes!");
1812  }
1813  for (unsigned i=0; i < imtab.size(); ++i) {
1814  if (&imtab[i]->linked_mesh() != &m)
1815  ASM_THROW_ERROR("the mesh_fem/mesh_im live on different meshes!");
1816  }
1817  if (imtab.size() == 0)
1818  ASM_THROW_ERROR("no integration method !");
1819  }
1820 
1822  std::vector<size_type> cv;
1823  r.from_mesh(imtab.at(0)->linked_mesh());
1824  r.error_if_not_homogeneous();
1825 
1826 
1827  consistency_check();
1828  get_convex_order(imtab.at(0)->convex_index(), imtab, mftab, r.index(), cv);
1829  parse();
1830 
1831  for (size_type i=0; i < cv.size(); ++i) {
1832  mesh_region::face_bitset nf = r[cv[i]];
1833  dim_type f = dim_type(-1);
1834  while (nf.any())
1835  {
1836  if (nf[0]) exec(cv[i],f);
1837  nf >>= 1;
1838  f++;
1839  }
1840  }
1841  }
1842 } /* end of namespace */
void assembly(const mesh_region &region=mesh_region::all_convexes())
do the assembly on the specified region (boundary or set of convexes)
structure used to hold a set of convexes and/or convex faces.
const mesh_region & from_mesh(const mesh &m) const
For regions which have been built with just a number 'id', from_mesh(m) sets the current region to 'm...
const dal::bit_vector & index() const
Index of the region convexes, or the convexes from the partition on the current thread.
Generic assembly implementation.
thread safe standard locale with RAII semantics
elementary computations (used by the generic assembly).
void mult(const L1 &l1, const L2 &l2, L3 &l3)
*‍/
Definition: gmm_blas.h:1664
gmm interface for fortran BLAS.
std::shared_ptr< const getfem::virtual_fem > pfem
type of pointer on a fem description
Definition: getfem_fem.h:244
gmm::uint16_type short_type
used as the common short type integer in the library
Definition: bgeot_config.h:73
size_t size_type
used as the common size type in the library
Definition: bgeot_poly.h:49
std::shared_ptr< const bgeot::geometric_trans > pgeometric_trans
pointer type for a geometric transformation
GEneric Tool for Finite Element Methods.
pmat_elem_type mat_elem_unit_normal(void)
Give a pointer to the structure describing the elementary matrix which compute the unit normal on the...
pmat_elem_type mat_elem_hessian(pfem pfi)
Give a pointer to the structure describing the elementary matrix which compute the integral of the he...
pmat_elem_type mat_elem_nonlinear(pnonlinear_elem_term, std::vector< pfem > pfi)
Give a pointer to the structure describing the elementary matrix which compute the integral of a nonl...
pmat_elem_type mat_elem_base(pfem pfi)
Give a pointer to the structure describing the elementary matrix which compute the integral of the ba...
pmat_elem_type mat_elem_grad_geotrans(bool inverted)
Return the gradient of the geometrical transformation ("K" in the getfem++ kernel doc....
pmat_elem_type mat_elem_product(pmat_elem_type a, pmat_elem_type b)
Give a pointer to the structure describing the elementary matrix which computes the integral of produ...
pintegration_method im_none(void)
return IM_NONE
pmat_elem_type mat_elem_grad(pfem pfi)
Give a pointer to the structure describing the elementary matrix which compute the integral of the gr...