Matrix r4655
Loading...
Searching...
No Matches
subscript.c
Go to the documentation of this file.
1#include "Mdefines.h"
2#include "subscript.h"
3
4#define F_X( _X_) (_X_)
5#define F_ND(_X_) ((_X_) ? 1 : 0)
6#define F_NS(_X_) 1
7
8#define AR21_UP(i, j, m) i + j + ( j * ( j - 1)) / 2
9#define AR21_LO(i, j, m) i + (j * m + j * (m - j - 1)) / 2
10
11static
12SEXP unpackedMatrix_subscript_1ary(SEXP x, SEXP w, const char *cl)
13{
14
15#define SUB1_START(_SEXPTYPE_) \
16 SEXPTYPE typ = _SEXPTYPE_; \
17 R_xlen_t l = 0, len = XLENGTH(w); \
18 SEXP res = allocVector(typ, len); \
19 if (len == 0) \
20 return res; \
21 PROTECT(res); \
22 \
23 SEXP dim = PROTECT(GET_SLOT(x, Matrix_DimSym)); \
24 int *pdim = INTEGER(dim), m = pdim[0], n = pdim[1]; \
25 Matrix_int_fast64_t mn64 = (Matrix_int_fast64_t) m * n; \
26 UNPROTECT(1);
27
28#define SUB1_START_EXTRA(_SEXPTYPE_) \
29 SUB1_START(_SEXPTYPE_); \
30 \
31 int ge = cl[1] == 'g', tr = cl[1] == 't', upper = 1, nonunit = 1; \
32 if (!ge) { \
33 SEXP uplo = PROTECT(GET_SLOT(x, Matrix_uploSym)); \
34 upper = *CHAR(STRING_ELT(uplo, 0)) == 'U'; \
35 UNPROTECT(1); \
36 if (tr) { \
37 SEXP diag = PROTECT(GET_SLOT(x, Matrix_diagSym)); \
38 nonunit = *CHAR(STRING_ELT(diag, 0)) == 'N'; \
39 UNPROTECT(1); \
40 } \
41 }
42
44
45#define SUB1_CASES(_SUB1_N_, _SUB1_X_, _F_N_, _F_X_) \
46 do { \
47 switch (cl[0]) { \
48 case 'n': \
49 _SUB1_N_(int, LOGICAL, NA_LOGICAL, 0, 1, _F_N_); \
50 break; \
51 case 'l': \
52 _SUB1_X_(int, LOGICAL, NA_LOGICAL, 0, 1, _F_X_); \
53 break; \
54 case 'i': \
55 _SUB1_X_(int, INTEGER, NA_INTEGER, 0, 1, _F_X_); \
56 break; \
57 case 'd': \
58 _SUB1_X_(double, REAL, NA_REAL, 0.0, 1.0, _F_X_); \
59 break; \
60 case 'z': \
61 _SUB1_X_(Rcomplex, COMPLEX, \
62 Matrix_zna, Matrix_zzero, Matrix_zone, _F_X_); \
63 break; \
64 default: \
65 break; \
66 } \
67 } while (0)
68
69#define SUB1_N(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_, _F_) \
70 do { \
71 _CTYPE_ *pres = _PTR_(res); \
72 if (TYPEOF(w) == INTSXP) { \
73 int *pw = INTEGER(w); \
74 if (mn64 >= INT_MAX) { \
75 /* index is never out of bounds */ \
76 SUB1_LOOP((pw[l] == NA_INTEGER), \
77 _NA_, _ZERO_, _ONE_, _F_, Matrix_int_fast64_t); \
78 } else { \
79 int mn = m * n; \
80 SUB1_LOOP((pw[l] == NA_INTEGER || pw[l] > mn), \
81 _NA_, _ZERO_, _ONE_, _F_, int); \
82 } \
83 } else { \
84 double *pw = REAL(w); \
85 if (mn64 >= 0x1.0p+53) \
86 /* m*n may not be exactly representable as double */ \
87 /* but it does not exceed INT_MAX * INT_MAX */ \
88 SUB1_LOOP((ISNAN(pw[l]) || pw[l] >= 0x1.0p+62 || \
89 (Matrix_int_fast64_t) pw[l] > mn64), \
90 _NA_, _ZERO_, _ONE_, _F_, Matrix_int_fast64_t); \
91 else { \
92 double mn1a = (double) m * n + 1.0; \
93 SUB1_LOOP((ISNAN(pw[l]) || pw[l] >= mn1a), \
94 _NA_, _ZERO_, _ONE_, _F_, Matrix_int_fast64_t); \
95 } \
96 } \
97 } while (0)
98
99#define SUB1_X(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_, _F_) \
100 do { \
101 PROTECT(x = GET_SLOT(x, Matrix_xSym)); \
102 _CTYPE_ *px = _PTR_(x); \
103 SUB1_N(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_, _F_); \
104 UNPROTECT(1); \
105 } while (0)
106
107#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_, _INT_) \
108 do { \
109 _INT_ index, i_, j_; \
110 for (l = 0; l < len; ++l) { \
111 if (_NA_SUBSCRIPT_) \
112 pres[l] = _NA_; \
113 else { \
114 index = (_INT_) pw[l] - 1; \
115 if (ge) \
116 pres[l] = _F_(px[index]); \
117 else { \
118 i_ = index % m; \
119 j_ = index / m; \
120 if (tr) { \
121 if ((upper) ? i_ > j_ : i_ < j_) \
122 pres[l] = _ZERO_; \
123 else if (!nonunit && i_ == j_) \
124 pres[l] = _ONE_; \
125 else \
126 pres[l] = _F_(px[index]); \
127 } else { \
128 if ((upper) ? i_ > j_ : i_ < j_) \
129 pres[l] = _F_(px[i_ * m + j_]); \
130 else \
131 pres[l] = _F_(px[index]); \
132 } \
133 } \
134 } \
135 } \
136 } while (0)
137
139
140#undef SUB1_LOOP
141
142 UNPROTECT(1);
143 return res;
144}
145
146static
147SEXP packedMatrix_subscript_1ary(SEXP x, SEXP w, const char *cl)
148{
150
151#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_, _INT_) \
152 do { \
153 _INT_ index, i_, j_; \
154 for (l = 0; l < len; ++l) { \
155 if (_NA_SUBSCRIPT_) \
156 pres[l] = _NA_; \
157 else { \
158 index = (_INT_) pw[l] - 1; \
159 i_ = index % m; \
160 j_ = index / m; \
161 if (tr) { \
162 if (upper) { \
163 if (i_ > j_) \
164 pres[l] = _ZERO_; \
165 else if (!nonunit && i_ == j_) \
166 pres[l] = _ONE_; \
167 else \
168 pres[l] = _F_(px[AR21_UP(i_, j_, m)]); \
169 } else { \
170 if (i_ < j_) \
171 pres[l] = _ZERO_; \
172 else if (!nonunit && i_ == j_) \
173 pres[l] = _ONE_; \
174 else \
175 pres[l] = _F_(px[AR21_LO(i_, j_, m)]); \
176 } \
177 } else { \
178 if (upper) { \
179 if (i_ > j_) \
180 pres[l] = _F_(px[AR21_UP(j_, i_, m)]); \
181 else \
182 pres[l] = _F_(px[AR21_UP(i_, j_, m)]); \
183 } else { \
184 if (i_ < j_) \
185 pres[l] = _F_(px[AR21_LO(j_, i_, m)]); \
186 else \
187 pres[l] = _F_(px[AR21_LO(i_, j_, m)]); \
188 } \
189 } \
190 } \
191 } \
192 } while (0)
193
195
196#undef SUB1_LOOP
197
198 UNPROTECT(1);
199 return res;
200}
201
202static
203SEXP CsparseMatrix_subscript_1ary(SEXP x, SEXP w, const char *cl)
204{
206
207 SEXP p = PROTECT(GET_SLOT(x, Matrix_pSym)),
208 i = PROTECT(GET_SLOT(x, Matrix_iSym));
209 int *pp = INTEGER(p), *pi = INTEGER(i), i_, j_, j, k = 0, kend;
210
211#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_, _INT_) \
212 do { \
213 _INT_ index; \
214 if (_NA_SUBSCRIPT_) \
215 j = -1; \
216 else { \
217 index = (_INT_) pw[l] - 1; \
218 i_ = (int) (index % m); \
219 j_ = j = (int) (index / m); \
220 } \
221 while (j >= 0) { \
222 k = pp[j]; \
223 kend = pp[j + 1]; \
224 while (k < kend && j_ == j) { \
225 if (pi[k] < i_) \
226 ++k; \
227 else { \
228 if (pi[k] > i_) \
229 pres[l] = _ZERO_; \
230 else \
231 pres[l] = _F_(px[k]); \
232 ++l; \
233 if (l == len || _NA_SUBSCRIPT_) \
234 j_ = -1; \
235 else { \
236 index = (_INT_) pw[l] - 1; \
237 i_ = (int) (index % m); \
238 j_ = (int) (index / m); \
239 } \
240 } \
241 } \
242 while (j_ == j) { \
243 pres[l] = _ZERO_; \
244 ++l; \
245 if (l == len || _NA_SUBSCRIPT_) \
246 j_ = -1; \
247 else { \
248 index = (_INT_) pw[l] - 1; \
249 i_ = (int) (index % m); \
250 j_ = (int) (index / m); \
251 } \
252 } \
253 j = j_; \
254 } \
255 while (l < len) { \
256 pres[l] = _NA_; \
257 ++l; \
258 } \
259 } while (0)
260
262
263#undef SUB1_LOOP
264
265 UNPROTECT(3);
266 return res;
267}
268
269static
270SEXP RsparseMatrix_subscript_1ary(SEXP x, SEXP w, const char *cl)
271{
273
274 SEXP p = PROTECT(GET_SLOT(x, Matrix_pSym)),
275 j = PROTECT(GET_SLOT(x, Matrix_jSym));
276 int *pp = INTEGER(p), *pj = INTEGER(j), i, i_, j_, k = 0, kend;
277
278#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_, _INT_) \
279 do { \
280 _INT_ index; \
281 if (_NA_SUBSCRIPT_) \
282 i = -1; \
283 else { \
284 index = (_INT_) pw[l] - 1; \
285 i_ = i = (int) (index % m); \
286 j_ = (int) (index / m); \
287 } \
288 while (i >= 0) { \
289 k = pp[i]; \
290 kend = pp[i + 1]; \
291 while (k < kend && i_ == i) { \
292 if (pj[k] < j_) \
293 ++k; \
294 else { \
295 if (pj[k] > j_) \
296 pres[l] = _ZERO_; \
297 else \
298 pres[l] = _F_(px[k]); \
299 ++l; \
300 if (l == len || _NA_SUBSCRIPT_) \
301 i_ = -1; \
302 else { \
303 index = (_INT_) pw[l] - 1; \
304 i_ = (int) (index % m); \
305 j_ = (int) (index / m); \
306 } \
307 } \
308 } \
309 while (i_ == i) { \
310 pres[l] = _ZERO_; \
311 ++l; \
312 if (l == len || _NA_SUBSCRIPT_) \
313 i_ = -1; \
314 else { \
315 index = (_INT_) pw[l] - 1; \
316 i_ = (int) (index % m); \
317 j_ = (int) (index / m); \
318 } \
319 } \
320 i = i_; \
321 } \
322 while (l < len) { \
323 pres[l] = _NA_; \
324 ++l; \
325 } \
326 } while (0)
327
329
330#undef SUB1_LOOP
331
332 UNPROTECT(3);
333 return res;
334}
335
336static
337SEXP diagonalMatrix_subscript_1ary(SEXP x, SEXP w, const char *cl)
338{
340
341 SEXP diag = PROTECT(GET_SLOT(x, Matrix_diagSym));
342 int nonunit = *CHAR(STRING_ELT(diag, 0)) == 'N';
343 UNPROTECT(1);
344
345 Matrix_int_fast64_t index, n1a = (Matrix_int_fast64_t) n + 1;
346
347#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_, _INT_) \
348 do { \
349 for (l = 0; l < len; ++l) { \
350 if (_NA_SUBSCRIPT_) \
351 pres[l] = _NA_; \
352 else if ((index = (Matrix_int_fast64_t) pw[l] - 1) % n1a != 0) \
353 pres[l] = _ZERO_; \
354 else if (!nonunit) \
355 pres[l] = _ONE_; \
356 else \
357 pres[l] = _F_(px[index / n1a]); \
358 } \
359 } while (0)
360
362
363#undef SUB1_LOOP
364
365 UNPROTECT(1);
366 return res;
367}
368
369static
370SEXP indMatrix_subscript_1ary(SEXP x, SEXP w)
371{
372 SUB1_START(LGLSXP);
373
374 SEXP perm = PROTECT(GET_SLOT(x, Matrix_permSym));
375 int *pperm = INTEGER(perm), i_, j_;
376
377 SEXP margin = PROTECT(GET_SLOT(x, Matrix_marginSym));
378 int mg = INTEGER(margin)[0] - 1;
379 UNPROTECT(1);
380
381#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_, _INT_) \
382 do { \
383 _INT_ index; \
384 for (l = 0; l < len; ++l) { \
385 if (_NA_SUBSCRIPT_) \
386 pres[l] = _NA_; \
387 else { \
388 index = (_INT_) pw[l] - 1; \
389 i_ = (int) (index % m); \
390 j_ = (int) (index / m); \
391 if (!mg) \
392 pres[l] = j_ == pperm[i_] - 1; \
393 else \
394 pres[l] = i_ == pperm[j_] - 1; \
395 } \
396 } \
397 } while (0)
398
399 SUB1_N(int, LOGICAL, NA_LOGICAL, 0, 1, );
400
401#undef SUB1_LOOP
402#undef SUB1_N
403#undef SUB1_START
404#undef SUB1_START_EXTRA
405
406 UNPROTECT(2);
407 return res;
408}
409
410/* x[i] with 'i' of type "integer" or "double" {i >= 1 or NA} */
411SEXP R_subscript_1ary(SEXP x, SEXP i)
412{
413 static const char *valid[] = { VALID_NONVIRTUAL_MATRIX, "" };
414 int ivalid = R_check_class_etc(x, valid);
415 if (ivalid < 0)
416 ERROR_INVALID_CLASS(x, __func__);
417 ivalid += VALID_NONVIRTUAL_SHIFT(ivalid, 1);
418 const char *cl = valid[ivalid];
419 validObject(x, cl);
420
421 switch (cl[2]) {
422 case 'e':
423 case 'y':
424 case 'r':
425 return unpackedMatrix_subscript_1ary(x, i, cl);
426 case 'p':
427 return packedMatrix_subscript_1ary(x, i, cl);
428
429 /* NB: for [CRT], the caller must preprocess 'x' and 'i';
430 symmetric and unit triangular 'x' are not handled specially,
431 and it is assumed for speed that 'i' is sorted by row [R]
432 or column [CT] with NA last
433 */
434
435 case 'C':
436 return CsparseMatrix_subscript_1ary(x, i, cl);
437 case 'R':
438 return RsparseMatrix_subscript_1ary(x, i, cl);
439 case 'T':
440 {
441 char cl_[] = "..CMatrix";
442 cl_[0] = cl[0];
443 cl_[1] = cl[1];
444
445 /* defined in ./coerce.c : */
446 SEXP sparse_as_Csparse(SEXP, const char *);
447
448 x = sparse_as_Csparse(x, cl);
449 PROTECT(x);
450 x = CsparseMatrix_subscript_1ary(x, i, cl_);
451 UNPROTECT(1);
452 return x;
453 }
454 case 'i':
455 return diagonalMatrix_subscript_1ary(x, i, cl);
456 default:
457 return indMatrix_subscript_1ary(x, i);
458 }
459}
460
461static
462SEXP unpackedMatrix_subscript_1ary_mat(SEXP x, SEXP w, const char *cl)
463{
464
465#define SUB1_START(_SEXPTYPE_) \
466 SEXPTYPE typ = _SEXPTYPE_; \
467 int l = 0, len = (int) (XLENGTH(w) / 2); \
468 SEXP res = allocVector(typ, len); \
469 if (len == 0) \
470 return res; \
471 PROTECT(res); \
472 int *pw0 = INTEGER(w), *pw1 = pw0 + len;
473
474#define SUB1_START_EXTRA(_SEXPTYPE_) \
475 SUB1_START(_SEXPTYPE_); \
476 \
477 SEXP dim = PROTECT(GET_SLOT(x, Matrix_DimSym)); \
478 int m = INTEGER(dim)[0]; \
479 UNPROTECT(1); \
480 \
481 int ge = cl[1] == 'g', tr = cl[1] == 't', upper = 1, nonunit = 1; \
482 if (!ge) { \
483 SEXP uplo = PROTECT(GET_SLOT(x, Matrix_uploSym)); \
484 upper = *CHAR(STRING_ELT(uplo, 0)) == 'U'; \
485 UNPROTECT(1); \
486 if (tr) { \
487 SEXP diag = PROTECT(GET_SLOT(x, Matrix_diagSym)); \
488 nonunit = *CHAR(STRING_ELT(diag, 0)) == 'N'; \
489 UNPROTECT(1); \
490 } \
491 }
492
494
495 Matrix_int_fast64_t i_, j_;
496
497#define SUB1_N(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_, _F_) \
498 do { \
499 _CTYPE_ *pres = _PTR_(res); \
500 SUB1_LOOP((pw0[l] == NA_INTEGER || pw1[l] == NA_INTEGER), \
501 _NA_, _ZERO_, _ONE_, _F_); \
502 } while (0)
503
504#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_) \
505 do { \
506 for (l = 0; l < len; ++l) { \
507 if (_NA_SUBSCRIPT_) \
508 pres[l] = _NA_; \
509 else { \
510 i_ = pw0[l] - 1; \
511 j_ = pw1[l] - 1; \
512 if (ge) \
513 pres[l] = _F_(px[j_ * m + i_]); \
514 else if (tr) { \
515 if ((upper) ? i_ > j_ : i_ < j_) \
516 pres[l] = _ZERO_; \
517 else if (!nonunit && i_ == j_) \
518 pres[l] = _ONE_; \
519 else \
520 pres[l] = _F_(px[j_ * m + i_]); \
521 } else { \
522 if ((upper) ? i_ > j_ : i_ < j_) \
523 pres[l] = _F_(px[i_ * m + j_]); \
524 else \
525 pres[l] = _F_(px[j_ * m + i_]); \
526 } \
527 } \
528 } \
529 } while (0)
530
532
533#undef SUB1_LOOP
534
535 UNPROTECT(1);
536 return res;
537}
538
539static
540SEXP packedMatrix_subscript_1ary_mat(SEXP x, SEXP w, const char *cl)
541{
543
544 Matrix_int_fast64_t i_, j_;
545
546#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_) \
547 do { \
548 for (l = 0; l < len; ++l) { \
549 if (_NA_SUBSCRIPT_) \
550 pres[l] = _NA_; \
551 else { \
552 i_ = pw0[l] - 1; \
553 j_ = pw1[l] - 1; \
554 if (tr) { \
555 if (upper) { \
556 if (i_ > j_) \
557 pres[l] = _ZERO_; \
558 else if (!nonunit && i_ == j_) \
559 pres[l] = _ONE_; \
560 else \
561 pres[l] = _F_(px[AR21_UP(i_, j_, m)]); \
562 } else { \
563 if (i_ < j_) \
564 pres[l] = _ZERO_; \
565 else if (!nonunit && i_ == j_) \
566 pres[l] = _ONE_; \
567 else \
568 pres[l] = _F_(px[AR21_LO(i_, j_, m)]); \
569 } \
570 } else { \
571 if (upper) { \
572 if (i_ > j_) \
573 pres[l] = _F_(px[AR21_UP(j_, i_, m)]); \
574 else \
575 pres[l] = _F_(px[AR21_UP(i_, j_, m)]); \
576 } else { \
577 if (i_ < j_) \
578 pres[l] = _F_(px[AR21_LO(j_, i_, m)]); \
579 else \
580 pres[l] = _F_(px[AR21_LO(i_, j_, m)]); \
581 } \
582 } \
583 } \
584 } \
585 } while (0)
586
588
589#undef SUB1_LOOP
590
591 UNPROTECT(1);
592 return res;
593}
594
595static
596SEXP CsparseMatrix_subscript_1ary_mat(SEXP x, SEXP w, const char *cl)
597{
599
600 SEXP p = PROTECT(GET_SLOT(x, Matrix_pSym)),
601 i = PROTECT(GET_SLOT(x, Matrix_iSym));
602 int *pp = INTEGER(p), *pi = INTEGER(i), i_, j_, j, k = 0, kend;
603
604#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_) \
605 do { \
606 if (_NA_SUBSCRIPT_) \
607 j = -1; \
608 else { \
609 i_ = pw0[l] - 1; \
610 j_ = j = pw1[l] - 1; \
611 } \
612 while (j >= 0) { \
613 k = pp[j]; \
614 kend = pp[j + 1]; \
615 while (k < kend && j_ == j) { \
616 if (pi[k] < i_) \
617 ++k; \
618 else { \
619 if (pi[k] > i_) \
620 pres[l] = _ZERO_; \
621 else \
622 pres[l] = _F_(px[k]); \
623 ++l; \
624 if (l == len || _NA_SUBSCRIPT_) \
625 j_ = -1; \
626 else { \
627 i_ = pw0[l] - 1; \
628 j_ = pw1[l] - 1; \
629 } \
630 } \
631 } \
632 while (j_ == j) { \
633 pres[l] = _ZERO_; \
634 ++l; \
635 if (l == len || _NA_SUBSCRIPT_) \
636 j_ = -1; \
637 else { \
638 i_ = pw0[l] - 1; \
639 j_ = pw1[l] - 1; \
640 } \
641 } \
642 j = j_; \
643 } \
644 while (l < len) { \
645 pres[l] = _NA_; \
646 ++l; \
647 } \
648 } while (0)
649
651
652#undef SUB1_LOOP
653
654 UNPROTECT(3);
655 return res;
656}
657
658static
659SEXP RsparseMatrix_subscript_1ary_mat(SEXP x, SEXP w, const char *cl)
660{
662
663 SEXP p = PROTECT(GET_SLOT(x, Matrix_pSym)),
664 j = PROTECT(GET_SLOT(x, Matrix_jSym));
665 int *pp = INTEGER(p), *pj = INTEGER(j), i, i_, j_, k = 0, kend;
666
667#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_) \
668 do { \
669 if (_NA_SUBSCRIPT_) \
670 i = -1; \
671 else { \
672 i_ = i = pw0[l] - 1; \
673 j_ = pw1[l] - 1; \
674 } \
675 while (i >= 0) { \
676 k = pp[i]; \
677 kend = pp[i + 1]; \
678 while (k < kend && i_ == i) { \
679 if (pj[k] < j_) \
680 ++k; \
681 else { \
682 if (pj[k] > j_) \
683 pres[l] = _ZERO_; \
684 else \
685 pres[l] = _F_(px[k]); \
686 ++l; \
687 if (l == len || _NA_SUBSCRIPT_) \
688 i_ = -1; \
689 else { \
690 i_ = pw0[l] - 1; \
691 j_ = pw1[l] - 1; \
692 } \
693 } \
694 } \
695 while (i_ == i) { \
696 pres[l] = _ZERO_; \
697 ++l; \
698 if (l == len || _NA_SUBSCRIPT_) \
699 i_ = -1; \
700 else { \
701 i_ = pw0[l] - 1; \
702 j_ = pw1[l] - 1; \
703 } \
704 } \
705 i = i_; \
706 } \
707 while (l < len) { \
708 pres[l] = _NA_; \
709 ++l; \
710 } \
711 } while (0)
712
714
715#undef SUB1_LOOP
716
717 UNPROTECT(3);
718 return res;
719}
720
721static
722SEXP diagonalMatrix_subscript_1ary_mat(SEXP x, SEXP w, const char *cl)
723{
725
726 SEXP diag = PROTECT(GET_SLOT(x, Matrix_diagSym));
727 int nonunit = *CHAR(STRING_ELT(diag, 0)) == 'N';
728 UNPROTECT(1);
729
730#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_) \
731 do { \
732 for (l = 0; l < len; ++l) { \
733 if (_NA_SUBSCRIPT_) \
734 pres[l] = _NA_; \
735 else if (pw0[l] != pw1[l]) \
736 pres[l] = _ZERO_; \
737 else if (!nonunit) \
738 pres[l] = _ONE_; \
739 else \
740 pres[l] = _F_(px[pw0[l] - 1]); \
741 } \
742 } while (0)
743
745
746#undef SUB1_LOOP
747
748 UNPROTECT(1);
749 return res;
750}
751
752static
754{
755 SUB1_START(LGLSXP);
756
757 SEXP perm = PROTECT(GET_SLOT(x, Matrix_permSym));
758 int *pperm = INTEGER(perm);
759
760 SEXP margin = PROTECT(GET_SLOT(x, Matrix_marginSym));
761 int mg = INTEGER(margin)[0] - 1;
762 UNPROTECT(1);
763
764#define SUB1_LOOP(_NA_SUBSCRIPT_, _NA_, _ZERO_, _ONE_, _F_) \
765 do { \
766 for (l = 0; l < len; ++l) { \
767 if (_NA_SUBSCRIPT_) \
768 pres[l] = _NA_; \
769 else if (!mg) \
770 pres[l] = pw1[l] == pperm[pw0[l] - 1]; \
771 else \
772 pres[l] = pw0[l] == pperm[pw1[l] - 1]; \
773 } \
774 } while (0)
775
776 SUB1_N(int, LOGICAL, NA_LOGICAL, 0, 1, );
777
778#undef SUB1_LOOP
779#undef SUB1_N
780#undef SUB1_X
781#undef SUB1_CASES
782#undef SUB1_START
783#undef SUB1_START_EXTRA
784
785 UNPROTECT(2);
786 return res;
787}
788
789/* x[i] with 'i' of type "integer" and dimensions c(.,2)
790 {i[,1] in 1:m or NA, i[,2] in 1:n or NA}
791*/
792SEXP R_subscript_1ary_mat(SEXP x, SEXP i)
793{
794 static const char *valid[] = { VALID_NONVIRTUAL_MATRIX, "" };
795 int ivalid = R_check_class_etc(x, valid);
796 if (ivalid < 0)
797 ERROR_INVALID_CLASS(x, __func__);
798 ivalid += VALID_NONVIRTUAL_SHIFT(ivalid, 1);
799 const char *cl = valid[ivalid];
800 validObject(x, cl);
801
802 switch (cl[2]) {
803 case 'e':
804 case 'y':
805 case 'r':
807 case 'p':
809
810 /* NB: for [CRT], the caller must preprocess 'x' and 'i';
811 symmetric and unit triangular 'x' are not handled specially,
812 and it is assumed for speed that 'i' is sorted by row [R]
813 or column [CT] with NA (incl. out-of-bounds indices) last
814 */
815
816 case 'C':
818 case 'R':
820 case 'T':
821 {
822 char cl_[] = "..CMatrix";
823 cl_[0] = cl[0];
824 cl_[1] = cl[1];
825
826 /* defined in ./coerce.c : */
827 SEXP sparse_as_Csparse(SEXP, const char *);
828
829 x = sparse_as_Csparse(x, cl);
830 PROTECT(x);
832 UNPROTECT(1);
833 return x;
834 }
835 case 'i':
837 default:
838 return indMatrix_subscript_1ary_mat(x, i);
839 }
840}
841
842static
843int keep_tr(int *pi, int *pj, int n, int upper, int nonunit, int checkNA)
844{
845 int k, ident = memcmp(pi, pj, n * sizeof(int)) == 0;
846 if (checkNA) {
847 if (ident) {
848 for (k = 0; k < n; ++k)
849 if (pi[k] == NA_INTEGER)
850 return 0;
851 } else {
852 for (k = 0; k < n; ++k)
853 if (pi[k] == NA_INTEGER || pj[k] == NA_INTEGER)
854 return 0;
855 }
856 }
857 int r = (upper) ? 1 : -1;
858 if (ident) {
859 /* triangular iff monotone; unit diagonal is preserved */
860 if (n >= 2) {
861 if (pi[0] == pi[1])
862 return 0;
863 else if (pi[0] < pi[1]) {
864 for (k = 2; k < n; ++k)
865 if (pi[k - 1] >= pi[k])
866 return 0;
867 /* up->up, lo->lo */
868 } else {
869 for (k = 2; k < n; ++k)
870 if (pi[k - 1] <= pi[k])
871 return 0;
872 /* up->lo, lo->up */
873 r = -r;
874 }
875 }
876 if (!nonunit)
877 r *= 2;
878 return r;
879 } else {
880 /* brute force ... */
881 int ki, kj, j_;
882 if (upper) {
883 for (kj = 0; kj < n; ++kj)
884 for (ki = kj + 1, j_ = pj[kj]; ki < n; ++ki)
885 if (pi[ki] <= j_)
886 goto LO;
887 /* up->up */
888 return r;
889 LO:
890 for (kj = 0; kj < n; ++kj)
891 for (ki = 0, j_ = pj[kj]; ki < kj; ++ki)
892 if (pi[ki] <= j_)
893 return 0;
894 /* up->lo */
895 return -r;
896 } else {
897 for (kj = 0; kj < n; ++kj)
898 for (ki = 0, j_ = pj[kj]; ki < kj; ++ki)
899 if (pi[ki] >= j_)
900 goto UP;
901 /* lo->lo */
902 return r;
903 UP:
904 for (kj = 0; kj < n; ++kj)
905 for (ki = kj + 1, j_ = pj[kj]; ki < n; ++ki)
906 if (pi[ki] >= j_)
907 return 0;
908 /* lo->up */
909 return -r;
910 }
911 }
912}
913
914static
915int keep_sy(int *pi, int *pj, int n, int upper, int checkNA)
916{
917 if (memcmp(pi, pj, n * sizeof(int)) != 0)
918 return 0;
919 int k, r = (upper) ? 1 : -1;
920 if (checkNA) {
921 for (k = 0; k < n; ++k)
922 if (pi[k] == NA_INTEGER)
923 return r;
924 }
925 if (n >= 2) {
926 /* triangular iff monotone */
927 if (pi[0] == pi[1])
928 return r;
929 else if (pi[0] < pi[1]) {
930 for (k = 2; k < n; ++k)
931 if (pi[k - 1] >= pi[k])
932 return r;
933 /* up->up, lo->lo */
934 } else {
935 for (k = 2; k < n; ++k)
936 if (pi[k - 1] <= pi[k])
937 return r;
938 /* up->lo, lo->up */
939 r = -r;
940 }
941 }
942 return 2 * r;
943}
944
945static
946int keep_di(int *pi, int *pj, int n, int nonunit, int checkNA, int lwork)
947{
948 int k, ident = memcmp(pi, pj, n * sizeof(int)) == 0;
949 if (checkNA) {
950 if (ident) {
951 for (k = 0; k < n; ++k)
952 if (pi[k] == NA_INTEGER)
953 return 0;
954 } else {
955 for (k = 0; k < n; ++k)
956 if (pi[k] == NA_INTEGER || pj[k] == NA_INTEGER)
957 return 0;
958 }
959 }
960 if (ident) {
961 /* diagonal iff no duplicates; unit diagonal is preserved */
962 char *work;
963 Matrix_Calloc(work, lwork, char);
964 --work;
965 for (k = 0; k < n; ++k) {
966 if (work[pi[k]])
967 return 0;
968 work[pi[k]] = 1;
969 }
970 ++work;
971 Matrix_Free(work, lwork);
972 return (nonunit) ? 1 : 2;
973 } else {
974 /* brute force ... */
975 int ki, kj, j_;
976 for (kj = 0; kj < n; ++kj) {
977 j_ = pj[kj];
978 for (ki = 0; ki < kj; ++ki)
979 if (pi[ki] == j_)
980 return 0;
981 for (ki = kj + 1; ki < n; ++ki)
982 if (pi[ki] == j_)
983 return 0;
984 }
985 return 1;
986 }
987}
988
989static
990void sort_cr(SEXP obj, const char *cl)
991{
992 SEXP dim = PROTECT(GET_SLOT(obj, Matrix_DimSym));
993 int *pdim = INTEGER(dim),
994 m = pdim[(cl[2] == 'C') ? 0 : 1],
995 n = pdim[(cl[2] == 'C') ? 1 : 0],
996 r = (m < n) ? n : m;
997 UNPROTECT(1); /* dim */
998
999 SEXP iSym = (cl[2] == 'C') ? Matrix_iSym : Matrix_jSym,
1000 p = PROTECT(GET_SLOT(obj, Matrix_pSym)),
1001 i = PROTECT(GET_SLOT(obj, iSym));
1002 int *pp = INTEGER(p), *pi = INTEGER(i);
1003
1004 int i_, j_, k, kend, nnz = pp[n], *workA, *workB, *workC;
1005 size_t lwork = (size_t) m + 1 + r + nnz;
1006 Matrix_Calloc(workA, lwork, int);
1007 workB = workA + m + 1;
1008 workC = workB + r;
1009
1010#define SORT_LOOP(_MASK_) \
1011 do { \
1012 ++workA; \
1013 for (k = 0; k < nnz; ++k) \
1014 ++workA[pi[k]]; \
1015 --workA; \
1016 \
1017 for (i_ = 1; i_ < m; ++i_) \
1018 workA[i_] = workB[i_] = workB[i_ - 1] + workA[i_]; \
1019 workA[m] = nnz; \
1020 \
1021 ++pp; \
1022 k = 0; \
1023 for (j_ = 0; j_ < n; ++j_) { \
1024 kend = pp[j_]; \
1025 while (k < kend) { \
1026 i_ = pi[k]; \
1027 workC[workB[i_]] = j_; \
1028 _MASK_(workD[workB[i_]] = px[k]); \
1029 ++workB[i_]; \
1030 ++k; \
1031 } \
1032 } \
1033 --pp; \
1034 \
1035 for (j_ = 0; j_ < n; ++j_) \
1036 workB[j_] = pp[j_]; \
1037 \
1038 ++workA; \
1039 k = 0; \
1040 for (i_ = 0; i_ < m; ++i_) { \
1041 kend = workA[i_]; \
1042 while (k < kend) { \
1043 j_ = workC[k]; \
1044 pi[workB[j_]] = i_; \
1045 _MASK_(px[workB[j_]] = workD[k]); \
1046 ++workB[j_]; \
1047 ++k; \
1048 } \
1049 } \
1050 --workA; \
1051 } while (0)
1052
1053#define SORT(_CTYPE_, _PTR_) \
1054 do { \
1055 _CTYPE_ *px = _PTR_(x), *workD; \
1056 Matrix_Calloc(workD, nnz, _CTYPE_); \
1057 SORT_LOOP(SHOW); \
1058 Matrix_Free(workD, nnz); \
1059 } while (0)
1060
1061 if (cl[0] == 'n')
1062 SORT_LOOP(HIDE);
1063 else {
1064 SEXP x = PROTECT(GET_SLOT(obj, Matrix_xSym));
1065 switch (cl[0]) {
1066 case 'l':
1067 SORT(int, LOGICAL);
1068 break;
1069 case 'i':
1070 SORT(int, INTEGER);
1071 break;
1072 case 'd':
1073 SORT(double, REAL);
1074 break;
1075 case 'z':
1076 SORT(Rcomplex, COMPLEX);
1077 break;
1078 default:
1079 break;
1080 }
1081 UNPROTECT(1); /* x */
1082 }
1083
1084#undef SORT_LOOP
1085#undef SORT
1086
1087 Matrix_Free(workA, lwork);
1088 UNPROTECT(2); /* i, p */
1089 return;
1090}
1091
1092#define XIJ_GE( _X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1093 *(_X_ + _J_ * _M_ + _I_)
1094
1095#define XIJ_TR_U_N(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1096 ((_I_ <= _J_) \
1097 ? XIJ_GE(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1098 : _ZERO_)
1099
1100#define XIJ_TR_U_U(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1101 ((_I_ < _J_) \
1102 ? XIJ_GE(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1103 : ((_I_ == _J_) ? _ONE_ : _ZERO_))
1104
1105#define XIJ_TR_L_N(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1106 ((_I_ >= _J_) \
1107 ? XIJ_GE(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1108 : _ZERO_)
1109
1110#define XIJ_TR_L_U(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1111 ((_I_ > _J_) \
1112 ? XIJ_GE(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1113 : ((_I_ == _J_) ? _ONE_ : _ZERO_))
1114
1115#define XIJ_TP_U_N(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1116 ((_I_ <= _J_) \
1117 ? *(_X_ + AR21_UP(_I_, _J_, _M_)) \
1118 : _ZERO_)
1119
1120#define XIJ_TP_U_U(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1121 ((_I_ < _J_) \
1122 ? *(_X_ + AR21_UP(_I_, _J_, _M_)) \
1123 : ((_I_ == _J_) ? _ONE_ : _ZERO_))
1124
1125#define XIJ_TP_L_N(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1126 ((_I_ >= _J_) \
1127 ? *(_X_ + AR21_LO(_I_, _J_, _M_)) \
1128 : _ZERO_)
1129
1130#define XIJ_TP_L_U(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1131 ((_I_ > _J_) \
1132 ? *(_X_ + AR21_LO(_I_, _J_, _M_)) \
1133 : ((_I_ == _J_) ? _ONE_ : _ZERO_))
1134
1135#define XIJ_SY_U( _X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1136 ((_I_ <= _J_) \
1137 ? XIJ_GE(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1138 : XIJ_GE(_X_, _J_, _I_, _M_, _ZERO_, _ONE_))
1139
1140#define XIJ_SY_L( _X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1141 ((_I_ >= _J_) \
1142 ? XIJ_GE(_X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1143 : XIJ_GE(_X_, _J_, _I_, _M_, _ZERO_, _ONE_))
1144
1145#define XIJ_SP_U( _X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1146 ((_I_ <= _J_) \
1147 ? *(_X_ + AR21_UP(_I_, _J_, _M_)) \
1148 : *(_X_ + AR21_UP(_J_, _I_, _M_)))
1149
1150#define XIJ_SP_L( _X_, _I_, _J_, _M_, _ZERO_, _ONE_) \
1151 ((_I_ >= _J_) \
1152 ? *(_X_ + AR21_LO(_I_, _J_, _M_)) \
1153 : *(_X_ + AR21_LO(_J_, _I_, _M_)))
1154
1155static
1156SEXP unpackedMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
1157{
1158
1159#define SUB2_START \
1160 SEXP dim = PROTECT(GET_SLOT(x, Matrix_DimSym)); \
1161 int *pdim = INTEGER(dim), m = pdim[0], n = pdim[1]; \
1162 UNPROTECT(1); /* dim */ \
1163 \
1164 int ki, kj, \
1165 mi = i == R_NilValue, \
1166 mj = j == R_NilValue, \
1167 ni = (mi) ? m : LENGTH(i), \
1168 nj = (mj) ? n : LENGTH(j), \
1169 *pi = (mi) ? NULL : INTEGER(i), \
1170 *pj = (mj) ? NULL : INTEGER(j);
1171
1172#define SUB2_START_EXTRA(_E_, _R_, _Y_, _DENSE_) \
1173 SUB2_START; \
1174 \
1175 int upper = 1, nonunit = 1, keep = 0; \
1176 SEXP uplo, diag; \
1177 if (cl[1] != 'g') { \
1178 PROTECT(uplo = GET_SLOT(x, Matrix_uploSym)); \
1179 upper = *CHAR(STRING_ELT(uplo, 0)) == 'U'; \
1180 UNPROTECT(1); /* uplo */ \
1181 if (cl[1] == 't') { \
1182 PROTECT(diag = GET_SLOT(x, Matrix_diagSym)); \
1183 nonunit = *CHAR(STRING_ELT(diag, 0)) == 'N'; \
1184 UNPROTECT(1); /* diag */ \
1185 } \
1186 } \
1187 \
1188 char cl_[] = "...Matrix"; \
1189 cl_[0] = cl[0]; \
1190 cl_[1] = 'g'; \
1191 cl_[2] = _E_; \
1192 if (cl[1] != 'g' && !(mi || mj) && ni == nj) { \
1193 if (cl[1] == 't') { \
1194 keep = keep_tr(pi, pj, ni, upper, nonunit, _DENSE_); \
1195 if (keep != 0) { \
1196 cl_[1] = 't'; \
1197 cl_[2] = _R_; \
1198 } \
1199 } else { \
1200 keep = keep_sy(pi, pj, ni, upper, 0); \
1201 if ((_DENSE_) ? keep != 0 : keep < -1 || keep > 1) { \
1202 cl_[1] = 's'; \
1203 cl_[2] = _Y_; \
1204 } \
1205 } \
1206 } \
1207 SEXP res = PROTECT(newObject(cl_)); \
1208 \
1209 PROTECT(dim = GET_SLOT(res, Matrix_DimSym)); \
1210 pdim = INTEGER(dim); \
1211 pdim[0] = ni; \
1212 pdim[1] = nj; \
1213 UNPROTECT(1); /* dim */ \
1214 \
1215 if ((cl[1] != 's') ? keep < 0 : keep < -1) { \
1216 PROTECT(uplo = GET_SLOT(res, Matrix_uploSym)); \
1217 SEXP uplo_ = PROTECT(mkChar("L")); \
1218 SET_STRING_ELT(uplo, 0, uplo_); \
1219 UNPROTECT(2); /* uplo_, uplo */ \
1220 } \
1221 if (cl[1] == 't' && (keep < -1 || keep > 1)) { \
1222 PROTECT(diag = GET_SLOT(res, Matrix_diagSym)); \
1223 SEXP diag_ = PROTECT(mkChar("U")); \
1224 SET_STRING_ELT(diag, 0, diag_); \
1225 UNPROTECT(2); /* diag_, diag */ \
1226 }
1227
1228 SUB2_START_EXTRA('e', 'r', 'y', 1);
1229
1230 double ninj = (double) ni * nj;
1231 if (ninj > R_XLEN_T_MAX)
1232 error(_("attempt to allocate vector of length exceeding %s"),
1233 "R_XLEN_T_MAX");
1234
1235 SEXP x0 = PROTECT(GET_SLOT(x, Matrix_xSym)),
1236 x1 = PROTECT(allocVector(TYPEOF(x0), (R_xlen_t) ninj));
1237
1238 int i_, j_;
1239 Matrix_int_fast64_t m_ = m;
1240
1241#define SUB2_CASES(_SUB2_) \
1242 do { \
1243 switch (cl[0]) { \
1244 case 'n': \
1245 case 'l': \
1246 _SUB2_(int, LOGICAL, NA_LOGICAL, 0, 1); \
1247 break; \
1248 case 'i': \
1249 _SUB2_(int, INTEGER, NA_INTEGER, 0, 1); \
1250 break; \
1251 case 'd': \
1252 _SUB2_(double, REAL, NA_REAL, 0.0, 1.0); \
1253 break; \
1254 case 'z': \
1255 _SUB2_(Rcomplex, COMPLEX, \
1256 Matrix_zna, Matrix_zzero, Matrix_zone); \
1257 break; \
1258 default: \
1259 break; \
1260 } \
1261 } while (0)
1262
1263#define SUB2(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_) \
1264 do { \
1265 _CTYPE_ *px0 = _PTR_(x0), *px1 = _PTR_(x1); \
1266 if (cl_[1] == 'g') { \
1267 if (cl[1] == 'g') \
1268 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1269 XIJ_GE, , , _NA_, _ZERO_, _ONE_); \
1270 else if (cl[1] == 't') { \
1271 if (upper) { \
1272 if (nonunit) \
1273 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1274 XIJ_TR_U_N, , , _NA_, _ZERO_, _ONE_); \
1275 else \
1276 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1277 XIJ_TR_U_U, , , _NA_, _ZERO_, _ONE_); \
1278 } else { \
1279 if (nonunit) \
1280 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1281 XIJ_TR_L_N, , , _NA_, _ZERO_, _ONE_); \
1282 else \
1283 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1284 XIJ_TR_L_U, , , _NA_, _ZERO_, _ONE_); \
1285 } \
1286 } else { \
1287 if (upper) \
1288 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1289 XIJ_SY_U, , , _NA_, _ZERO_, _ONE_); \
1290 else \
1291 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1292 XIJ_SY_L, , , _NA_, _ZERO_, _ONE_); \
1293 } \
1294 } else if (cl_[1] == 't') { \
1295 Matrix_memset(px1, 0, XLENGTH(x1), sizeof(_CTYPE_)); \
1296 if (upper) { \
1297 if (nonunit) { \
1298 if (keep > 0) \
1299 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1300 XIJ_TR_U_N, , px1 += ni - kj - 1, \
1301 _NA_, _ZERO_, _ONE_); \
1302 else \
1303 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1304 XIJ_TR_U_N, px1 += kj, , \
1305 _NA_, _ZERO_, _ONE_); \
1306 } else { \
1307 if (keep > 0) \
1308 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1309 XIJ_TR_U_U, , px1 += ni - kj - 1, \
1310 _NA_, _ZERO_, _ONE_); \
1311 else \
1312 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1313 XIJ_TR_U_U, px1 += kj, , \
1314 _NA_, _ZERO_, _ONE_); \
1315 } \
1316 } else { \
1317 if (nonunit) { \
1318 if (keep > 0) \
1319 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1320 XIJ_TR_L_N, , px1 += ni - kj - 1, \
1321 _NA_, _ZERO_, _ONE_); \
1322 else \
1323 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1324 XIJ_TR_L_N, px1 += kj, , \
1325 _NA_, _ZERO_, _ONE_); \
1326 } else { \
1327 if (keep > 0) \
1328 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1329 XIJ_TR_L_U, , px1 += ni - kj - 1, \
1330 _NA_, _ZERO_, _ONE_); \
1331 else \
1332 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1333 XIJ_TR_L_U, px1 += kj, , \
1334 _NA_, _ZERO_, _ONE_); \
1335 } \
1336 } \
1337 } else { \
1338 Matrix_memset(px1, 0, XLENGTH(x1), sizeof(_CTYPE_)); \
1339 if (upper) { \
1340 if (keep > 0) \
1341 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1342 XIJ_SY_U, , px1 += ni - kj - 1, \
1343 _NA_, _ZERO_, _ONE_); \
1344 else \
1345 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1346 XIJ_SY_U, px1 += kj, , \
1347 _NA_, _ZERO_, _ONE_); \
1348 } else { \
1349 if (keep > 0) \
1350 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1351 XIJ_SY_L, , px1 += ni - kj - 1, \
1352 _NA_, _ZERO_, _ONE_); \
1353 else \
1354 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1355 XIJ_SY_L, px1 += kj, , \
1356 _NA_, _ZERO_, _ONE_); \
1357 } \
1358 } \
1359 } while (0)
1360
1361#define SUB2_LOOP(_FOR_, _XIJ_, _JUMP1_, _JUMP2_, \
1362 _NA_, _ZERO_, _ONE_) \
1363 do { \
1364 for (kj = 0; kj < nj; ++kj) { \
1365 if (mj) \
1366 j_ = kj; \
1367 else { \
1368 j_ = pj[kj]; \
1369 if (j_ != NA_INTEGER) \
1370 j_ -= 1; \
1371 else { \
1372 _JUMP1_; \
1373 _FOR_ { \
1374 *(px1++) = _NA_; \
1375 } \
1376 _JUMP2_; \
1377 continue; \
1378 } \
1379 } \
1380 _JUMP1_; \
1381 _FOR_ { \
1382 if (mi) \
1383 i_ = ki; \
1384 else { \
1385 i_ = pi[ki]; \
1386 if (i_ != NA_INTEGER) \
1387 i_ -= 1; \
1388 else { \
1389 *(px1++) = _NA_; \
1390 continue; \
1391 } \
1392 } \
1393 *(px1++) = _XIJ_(px0, i_, j_, m_, _ZERO_, _ONE_); \
1394 } \
1395 _JUMP2_; \
1396 } \
1397 } while (0)
1398
1400
1401#undef SUB2
1402
1403 SET_SLOT(res, Matrix_xSym, x1);
1404
1405 UNPROTECT(3); /* x1, x0, res */
1406 return res;
1407}
1408
1409static
1410SEXP packedMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
1411{
1412 SUB2_START_EXTRA('e', 'p', 'p', 1);
1413
1414 double ninj = (double) ni * nj,
1415 ninj_ = (keep) ? 0.5 * (ninj + ni) : ninj;
1416 if (ninj_ > R_XLEN_T_MAX)
1417 error(_("attempt to allocate vector of length exceeding %s"),
1418 "R_XLEN_T_MAX");
1419
1420 SEXP x0 = PROTECT(GET_SLOT(x, Matrix_xSym)),
1421 x1 = PROTECT(allocVector(TYPEOF(x0), (R_xlen_t) ninj_));
1422
1423 int i_, j_;
1424 Matrix_int_fast64_t m_ = m;
1425
1426#define SUB2(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_) \
1427 do { \
1428 _CTYPE_ *px0 = _PTR_(x0), *px1 = _PTR_(x1); \
1429 if (cl_[1] == 'g') { \
1430 if (cl[1] == 't') { \
1431 if (upper) { \
1432 if (nonunit) \
1433 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1434 XIJ_TP_U_N, , , _NA_, _ZERO_, _ONE_); \
1435 else \
1436 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1437 XIJ_TP_U_U, , , _NA_, _ZERO_, _ONE_); \
1438 } else { \
1439 if (nonunit) \
1440 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1441 XIJ_TP_L_N, , , _NA_, _ZERO_, _ONE_); \
1442 else \
1443 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1444 XIJ_TP_L_U, , , _NA_, _ZERO_, _ONE_); \
1445 } \
1446 } else { \
1447 if (upper) \
1448 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1449 XIJ_SP_U, , , _NA_, _ZERO_, _ONE_); \
1450 else \
1451 SUB2_LOOP(for (ki = 0; ki < ni; ++ki), \
1452 XIJ_SP_L, , , _NA_, _ZERO_, _ONE_); \
1453 } \
1454 } else if (cl_[1] == 't') { \
1455 if (upper) { \
1456 if (nonunit) { \
1457 if (keep > 0) \
1458 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1459 XIJ_TP_U_N, , , _NA_, _ZERO_, _ONE_); \
1460 else \
1461 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1462 XIJ_TP_U_N, , , _NA_, _ZERO_, _ONE_); \
1463 } else { \
1464 if (keep > 0) \
1465 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1466 XIJ_TP_U_U, , , _NA_, _ZERO_, _ONE_); \
1467 else \
1468 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1469 XIJ_TP_U_U, , , _NA_, _ZERO_, _ONE_); \
1470 } \
1471 } else { \
1472 if (nonunit) { \
1473 if (keep > 0) \
1474 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1475 XIJ_TP_L_N, , , _NA_, _ZERO_, _ONE_); \
1476 else \
1477 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1478 XIJ_TP_L_N, , , _NA_, _ZERO_, _ONE_); \
1479 } else { \
1480 if (keep > 0) \
1481 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1482 XIJ_TP_L_U, , , _NA_, _ZERO_, _ONE_); \
1483 else \
1484 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1485 XIJ_TP_L_U, , , _NA_, _ZERO_, _ONE_); \
1486 } \
1487 } \
1488 } else { \
1489 if (upper) { \
1490 if (keep > 0) \
1491 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1492 XIJ_SP_U, , , _NA_, _ZERO_, _ONE_); \
1493 else \
1494 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1495 XIJ_SP_U, , , _NA_, _ZERO_, _ONE_); \
1496 } else { \
1497 if (keep > 0) \
1498 SUB2_LOOP(for (ki = 0; ki <= kj; ++ki), \
1499 XIJ_SP_L, , , _NA_, _ZERO_, _ONE_); \
1500 else \
1501 SUB2_LOOP(for (ki = kj; ki < ni; ++ki), \
1502 XIJ_SP_L, , , _NA_, _ZERO_, _ONE_); \
1503 } \
1504 } \
1505 } while (0)
1506
1508
1509#undef SUB2_LOOP
1510#undef SUB2
1511
1512 SET_SLOT(res, Matrix_xSym, x1);
1513
1514 UNPROTECT(3); /* x1, x0, res */
1515 return res;
1516}
1517
1518static
1519SEXP CsparseMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
1520{
1521 SUB2_START_EXTRA('C', 'C', 'C', 0);
1522
1523 if (cl[1] != 'g' && cl_[1] == 'g') {
1524 /* defined in ./coerce.c : */
1525 SEXP sparse_as_general(SEXP, const char *);
1526 x = sparse_as_general(x, cl);
1527 }
1528 PROTECT(x);
1529
1530 SEXP p0 = PROTECT(GET_SLOT(x, Matrix_pSym)),
1531 i0 = PROTECT(GET_SLOT(x, Matrix_iSym)),
1532 p1 = PROTECT(allocVector(INTSXP, (R_xlen_t) nj + 1)),
1533 i1 = NULL;
1534 int *pp0 = INTEGER(p0), *pi0 = INTEGER(i0),
1535 *pp1 = INTEGER(p1), *pi1 = NULL,
1536 d, k, kend, doSort = 0;
1537 Matrix_int_fast64_t nnz = 0;
1538 *(pp1++) = 0;
1539
1540#define SUB2_FINISH \
1541 if (nnz > INT_MAX) \
1542 error(_("%s too dense for %s; would have more than %s nonzero entries"), \
1543 "x[i,j]", "[CR]sparseMatrix", "2^31-1"); \
1544 \
1545 PROTECT(i1 = allocVector(INTSXP, (R_xlen_t) nnz)); \
1546 pi1 = INTEGER(i1); \
1547 \
1548 if (cl[0] == 'n') \
1549 SUB2_LOOP(HIDE); \
1550 else { \
1551 SEXP x0 = PROTECT(GET_SLOT(x, Matrix_xSym)), \
1552 x1 = PROTECT(allocVector(TYPEOF(x0), (R_xlen_t) nnz)); \
1553 SUB2_CASES(SUB2); \
1554 SET_SLOT(res, Matrix_xSym, x1); \
1555 UNPROTECT(2); /* x1, x0 */ \
1556 }
1557
1558#define SUB2(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_) \
1559 do { \
1560 _CTYPE_ *px0 = _PTR_(x0), *px1 = _PTR_(x1); \
1561 SUB2_LOOP(SHOW); \
1562 } while (0)
1563
1564 if (mi) {
1565
1566 for (kj = 0; kj < nj; ++kj) {
1567 nnz += pp0[pj[kj]] - pp0[pj[kj] - 1];
1568 pp1[kj] = (int) nnz;
1569 }
1570
1571#define SUB2_LOOP(_MASK_) \
1572 do { \
1573 for (kj = 0; kj < nj; ++kj) { \
1574 k = pp0[pj[kj] - 1]; \
1575 kend = pp0[pj[kj]]; \
1576 d = kend - k; \
1577 if (d) { \
1578 Matrix_memcpy(pi1, pi0 + k, d, sizeof(int)); \
1579 pi1 += d; \
1580 _MASK_(Matrix_memcpy(px1, px0 + k, d, sizeof(*px1))); \
1581 _MASK_(px1 += d); \
1582 } \
1583 } \
1584 } while (0)
1585
1587
1588#undef SUB2_LOOP
1589
1590 } else {
1591
1592 int *workA, *workB, *workC;
1593 size_t lwork = (size_t) m + m + ni;
1594 Matrix_Calloc(workA, lwork, int);
1595 workB = workA + m;
1596 workC = workB + m;
1597
1598 /* workA[ i] : size of the set { ki : pi[ki] - 1 == i }
1599 workB[ i] : smallest ki such that pi[ki] - 1 == i
1600 workC[ki] : smallest ki' > ki such that pi[ki'] == pi[ki]
1601 */
1602
1603 int i_, j_, i_prev = m;
1604
1605 for (ki = ni - 1; ki >= 0; --ki) {
1606 i_ = pi[ki] - 1;
1607 ++workA[i_];
1608 workC[ki] = workB[i_];
1609 workB[i_] = ki;
1610 if (i_ > i_prev)
1611 doSort = 1;
1612 i_prev = i_;
1613 }
1614
1615 for (kj = 0; kj < nj; ++kj) {
1616 j_ = (mj) ? kj : pj[kj] - 1;
1617 k = pp0[j_];
1618 kend = pp0[j_ + 1];
1619 while (k < kend) {
1620 nnz += workA[pi0[k]];
1621 ++k;
1622 }
1623 pp1[kj] = (int) nnz;
1624 }
1625
1626#define SUB2_LOOP(_MASK_) \
1627 do { \
1628 for (kj = 0; kj < nj; ++kj) { \
1629 j_ = (mj) ? kj : pj[kj] - 1; \
1630 k = pp0[j_]; \
1631 kend = pp0[j_ + 1]; \
1632 while (k < kend) { \
1633 i_ = pi0[k]; \
1634 d = workA[i_]; \
1635 ki = workB[i_]; \
1636 while (d--) { \
1637 *(pi1++) = ki; \
1638 _MASK_(*(px1++) = px0[k]); \
1639 ki = workC[ki]; \
1640 } \
1641 ++k; \
1642 } \
1643 } \
1644 } while (0)
1645
1647
1648#undef SUB2_LOOP
1649
1650 Matrix_Free(workA, lwork);
1651
1652 }
1653
1654#undef SUB2_FINISH
1655
1656 SET_SLOT(res, Matrix_pSym, p1);
1657 SET_SLOT(res, Matrix_iSym, i1);
1658 UNPROTECT(4); /* i1, p1, i0, p0 */
1659
1660 if (doSort)
1661 sort_cr(res, cl);
1662 if (cl[1] == 's' && (keep == -1 || keep == 1)) {
1663 /* defined in ./sparse.c : */
1664 SEXP sparse_force_symmetric(SEXP, const char *, char);
1665 res = sparse_force_symmetric(res, cl_, (keep == 1) ? 'U' : 'L');
1666 }
1667 UNPROTECT(2); /* x, res */
1668 return res;
1669}
1670
1671static
1672SEXP RsparseMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
1673{
1674 SUB2_START_EXTRA('R', 'R', 'R', 0);
1675
1676 if (cl[1] != 'g' && cl_[1] == 'g') {
1677 /* defined in ./coerce.c : */
1678 SEXP sparse_as_general(SEXP, const char *);
1679 x = sparse_as_general(x, cl);
1680 }
1681 PROTECT(x);
1682
1683 SEXP p0 = PROTECT(GET_SLOT(x, Matrix_pSym)),
1684 j0 = PROTECT(GET_SLOT(x, Matrix_jSym)),
1685 p1 = PROTECT(allocVector(INTSXP, (R_xlen_t) ni + 1)),
1686 j1 = NULL;
1687 int *pp0 = INTEGER(p0), *pj0 = INTEGER(j0),
1688 *pp1 = INTEGER(p1), *pj1 = NULL,
1689 d, k, kend, doSort = 0;
1690 Matrix_int_fast64_t nnz = 0;
1691 *(pp1++) = 0;
1692
1693#define SUB2_FINISH \
1694 if (nnz > INT_MAX) \
1695 error(_("%s too dense for %s; would have more than %s nonzero entries"), \
1696 "x[i,j]", "[CR]sparseMatrix", "2^31-1"); \
1697 \
1698 PROTECT(j1 = allocVector(INTSXP, (R_xlen_t) nnz)); \
1699 pj1 = INTEGER(j1); \
1700 \
1701 if (cl[0] == 'n') \
1702 SUB2_LOOP(HIDE); \
1703 else { \
1704 SEXP x0 = PROTECT(GET_SLOT(x, Matrix_xSym)), \
1705 x1 = PROTECT(allocVector(TYPEOF(x0), (R_xlen_t) nnz)); \
1706 SUB2_CASES(SUB2); \
1707 SET_SLOT(res, Matrix_xSym, x1); \
1708 UNPROTECT(2); /* x1, x0 */ \
1709 }
1710
1711 if (mj) {
1712
1713 for (ki = 0; ki < ni; ++ki) {
1714 nnz += pp0[pi[ki]] - pp0[pi[ki] - 1];
1715 pp1[ki] = (int) nnz;
1716 }
1717
1718#define SUB2_LOOP(_MASK_) \
1719 do { \
1720 for (ki = 0; ki < ni; ++ki) { \
1721 k = pp0[pi[ki] - 1]; \
1722 kend = pp0[pi[ki]]; \
1723 d = kend - k; \
1724 if (d) { \
1725 Matrix_memcpy(pj1, pj0 + k, d, sizeof(int)); \
1726 pj1 += d; \
1727 _MASK_(Matrix_memcpy(px1, px0 + k, d, sizeof(*px1))); \
1728 _MASK_(px1 += d); \
1729 } \
1730 } \
1731 } while (0)
1732
1734
1735#undef SUB2_LOOP
1736
1737 } else {
1738
1739 int *workA, *workB, *workC;
1740 size_t lwork = (size_t) n + n + nj;
1741 Matrix_Calloc(workA, lwork, int);
1742 workB = workA + n;
1743 workC = workB + n;
1744
1745 /* workA[ j] : size of the set { kj : pj[kj] - 1 == j }
1746 workB[ j] : smallest ki such that pj[kj] - 1 == j
1747 workC[kj] : smallest kj' > kj such that pj[kj'] == pj[kj]
1748 */
1749
1750 int i_, j_, j_prev = n;
1751
1752 for (kj = nj - 1; kj >= 0; --kj) {
1753 j_ = pj[kj] - 1;
1754 ++workA[j_];
1755 workC[kj] = workB[j_];
1756 workB[j_] = kj;
1757 if (j_ > j_prev)
1758 doSort = 1;
1759 j_prev = j_;
1760 }
1761
1762 for (ki = 0; ki < ni; ++ki) {
1763 i_ = (mi) ? ki : pi[ki] - 1;
1764 k = pp0[i_];
1765 kend = pp0[i_ + 1];
1766 while (k < kend) {
1767 nnz += workA[pj0[k]];
1768 ++k;
1769 }
1770 pp1[ki] = (int) nnz;
1771 }
1772
1773#define SUB2_LOOP(_MASK_) \
1774 do { \
1775 for (ki = 0; ki < ni; ++ki) { \
1776 i_ = (mi) ? ki : pi[ki] - 1; \
1777 k = pp0[i_]; \
1778 kend = pp0[i_ + 1]; \
1779 while (k < kend) { \
1780 j_ = pj0[k]; \
1781 d = workA[j_]; \
1782 kj = workB[j_]; \
1783 while (d--) { \
1784 *(pj1++) = kj; \
1785 _MASK_(*(px1++) = px0[k]); \
1786 kj = workC[kj]; \
1787 } \
1788 ++k; \
1789 } \
1790 } \
1791 } while (0)
1792
1794
1795#undef SUB2_LOOP
1796
1797 Matrix_Free(workA, lwork);
1798
1799 }
1800
1801#undef SUB2_FINISH
1802#undef SUB2
1803
1804 SET_SLOT(res, Matrix_pSym, p1);
1805 SET_SLOT(res, Matrix_jSym, j1);
1806 UNPROTECT(4); /* j1, p1, j0, p0 */
1807
1808 if (doSort)
1809 sort_cr(res, cl);
1810 if (cl[1] == 's' && (keep == -1 || keep == 1)) {
1811 /* defined in ./sparse.c : */
1812 SEXP sparse_force_symmetric(SEXP, const char *, char);
1813 res = sparse_force_symmetric(res, cl_, (keep == 1) ? 'U' : 'L');
1814 }
1815 UNPROTECT(2); /* x, res */
1816 return res;
1817}
1818
1819static
1820SEXP diagonalMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
1821{
1822 SUB2_START;
1823
1824 int nonunit = 1, keep = 0;
1825 SEXP diag = PROTECT(GET_SLOT(x, Matrix_diagSym));
1826 nonunit = *CHAR(STRING_ELT(diag, 0)) == 'N';
1827 UNPROTECT(1); /* diag */
1828
1829 char cl_[] = ".gCMatrix";
1830 cl_[0] = cl[0];
1831 if (!(mi || mj) && ni == nj) {
1832 keep = keep_di(pi, pj, ni, nonunit, 0, m);
1833 if (keep > 0) {
1834 cl_[1] = 'd';
1835 cl_[2] = 'i';
1836 }
1837 }
1838
1839 SEXP res = PROTECT(newObject(cl_));
1840
1841 PROTECT(dim = GET_SLOT(res, Matrix_DimSym));
1842 pdim = INTEGER(dim);
1843 pdim[0] = (int) ni;
1844 pdim[1] = (int) nj;
1845 UNPROTECT(1); /* dim */
1846
1847 if (keep > 1) {
1848
1849 PROTECT(diag = GET_SLOT(res, Matrix_diagSym));
1850 SEXP diag_ = PROTECT(mkChar("U"));
1851 SET_STRING_ELT(diag, 0, diag_);
1852 UNPROTECT(2); /* diag_, diag */
1853
1854 } else if (keep > 0) {
1855
1856 SEXP x0 = PROTECT(GET_SLOT(x, Matrix_xSym)),
1857 x1 = PROTECT(allocVector(TYPEOF(x0), ni));
1858 int j_;
1859
1860#define SUB2(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_) \
1861 do { \
1862 _CTYPE_ *px0 = _PTR_(x0), *px1 = _PTR_(x1); \
1863 while (ni--) \
1864 *(px1++) = \
1865 (*(pi++) != (j_ = *(pj++))) \
1866 ? _ZERO_ \
1867 : ((nonunit) ? px0[j_ - 1] : _ONE_); \
1868 } while (0)
1869
1871
1872#undef SUB2
1873
1874 SET_SLOT(res, Matrix_xSym, x1);
1875 UNPROTECT(2); /* x0, x1 */
1876
1877 } else {
1878
1879 SEXP x0 = PROTECT(GET_SLOT(x, Matrix_xSym));
1880 char *work = NULL;
1881 int j_;
1882
1883 if (nonunit) {
1884
1885 Matrix_Calloc(work, n, char);
1886
1887#define SUB2_WORK(_CTYPE_, _PTR_, _ISNZ_) \
1888 do { \
1889 _CTYPE_ *px0 = _PTR_(x0); \
1890 for (j_ = 0; j_ < n; ++j_) \
1891 work[j_] = _ISNZ_(px0[j_]); \
1892 } while (0)
1893
1894 switch (cl[0]) {
1895 case 'n':
1896 SUB2_WORK(int, LOGICAL, ISNZ_PATTERN);
1897 break;
1898 case 'l':
1899 SUB2_WORK(int, LOGICAL, ISNZ_LOGICAL);
1900 break;
1901 case 'i':
1902 SUB2_WORK(int, INTEGER, ISNZ_INTEGER);
1903 break;
1904 case 'd':
1905 SUB2_WORK(double, REAL, ISNZ_REAL);
1906 break;
1907 case 'z':
1908 SUB2_WORK(Rcomplex, COMPLEX, ISNZ_COMPLEX);
1909 break;
1910 default:
1911 break;
1912 }
1913
1914#undef SUB2_WORK
1915
1916 }
1917
1918 SEXP p1 = PROTECT(allocVector(INTSXP, (R_xlen_t) nj + 1));
1919 int *pp1 = INTEGER(p1);
1920 *(pp1++) = 0;
1921
1922 for (kj = 0; kj < nj; ++kj) {
1923 pp1[kj] = 0;
1924 j_ = (mj) ? kj : pj[kj] - 1;
1925 if (!nonunit || work[j_]) {
1926 if (mi) {
1927 for (ki = 0; ki < ni; ++ki)
1928 if (ki == j_)
1929 ++pp1[kj];
1930 } else {
1931 for (ki = 0; ki < ni; ++ki)
1932 if (pi[ki] - 1 == j_)
1933 ++pp1[kj];
1934 }
1935 if (pp1[kj] > INT_MAX - pp1[kj - 1]) {
1936 if (nonunit)
1937 Matrix_Free(work, n);
1938 error(_("%s too dense for %s; would have more than %s nonzero entries"),
1939 "x[i,j]", "[CR]sparseMatrix", "2^31-1");
1940 }
1941 }
1942 pp1[kj] += pp1[kj-1];
1943 }
1944
1945 SEXP i1 = PROTECT(allocVector(INTSXP, pp1[nj - 1])),
1946 x1 = PROTECT(allocVector(TYPEOF(x0), pp1[nj - 1]));
1947 int *pi1 = INTEGER(i1);
1948
1949#define SUB2(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_) \
1950 do { \
1951 _CTYPE_ *px0 = _PTR_(x0), *px1 = _PTR_(x1); \
1952 for (kj = 0; kj < nj; ++kj) { \
1953 j_ = (mj) ? kj : pj[kj] - 1; \
1954 if (!nonunit || work[j_]) { \
1955 for (ki = 0; ki < ni; ++ki) { \
1956 if (((mi) ? ki : pi[ki] - 1) == j_) { \
1957 *(pi1++) = ki; \
1958 *(px1++) = (nonunit) ? px0[j_] : _ONE_; \
1959 } \
1960 } \
1961 } \
1962 } \
1963 } while (0)
1964
1966
1967#undef SUB2
1968
1969 SET_SLOT(res, Matrix_pSym, p1);
1970 SET_SLOT(res, Matrix_iSym, i1);
1971 SET_SLOT(res, Matrix_xSym, x1);
1972 UNPROTECT(4); /* x1, x0, i1, p1 */
1973
1974 if (nonunit)
1975 Matrix_Free(work, n);
1976
1977 }
1978
1979 UNPROTECT(1); /* res */
1980 return res;
1981}
1982
1983static
1984SEXP indMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
1985{
1986 PROTECT_INDEX pidA;
1987 PROTECT_WITH_INDEX(x, &pidA);
1988
1989 PROTECT_INDEX pidB;
1990 SEXP perm0 = GET_SLOT(x, Matrix_permSym);
1991 int *pperm0 = INTEGER(perm0);
1992 PROTECT_WITH_INDEX(perm0, &pidB);
1993
1994 SEXP margin = PROTECT(GET_SLOT(x, Matrix_marginSym));
1995 int mg = INTEGER(margin)[0] - 1;
1996
1997 SEXP dim = PROTECT(GET_SLOT(x, Matrix_DimSym));
1998 int *pdim = INTEGER(dim), m = pdim[mg], n = pdim[!mg];
1999 UNPROTECT(1); /* dim */
2000
2001 if (mg) {
2002 SEXP i_tmp = i;
2003 i = j;
2004 j = i_tmp;
2005 }
2006
2007 int ki, kj,
2008 mi = i == R_NilValue,
2009 mj = j == R_NilValue,
2010 ni = (mi) ? m : LENGTH(i),
2011 nj = (mj) ? n : LENGTH(j),
2012 *pi = (mi) ? NULL : INTEGER(i),
2013 *pj = (mj) ? NULL : INTEGER(j),
2014 isP = cl[0] == 'p';
2015
2016 if (!mi) {
2017 isP = isP && ni == m;
2018 if (isP) {
2019 char *work;
2020 Matrix_Calloc(work, m, char);
2021 --work; /* now 1-indexed */
2022 for (ki = 0; ki < ni; ++ki) {
2023 if (work[pi[ki]]) {
2024 isP = 0;
2025 break;
2026 }
2027 work[pi[ki]] = 1;
2028 }
2029 ++work; /* now 0-indexed */
2030 Matrix_Free(work, m);
2031 }
2032
2033 x = newObject((isP) ? "pMatrix" : "indMatrix");
2034 REPROTECT(x, pidA);
2035
2036 PROTECT(dim = GET_SLOT(x, Matrix_DimSym));
2037 pdim = INTEGER(dim);
2038 pdim[ mg] = ni;
2039 pdim[!mg] = n;
2040 UNPROTECT(1); /* dim */
2041
2042 if (mg)
2043 SET_SLOT(x, Matrix_marginSym, margin);
2044
2045 SEXP perm1 = PROTECT(allocVector(INTSXP, ni));
2046 int *pperm1 = INTEGER(perm1);
2047 --pperm0; /* now 1-indexed */
2048 for (ki = 0; ki < ni; ++ki)
2049 pperm1[ki] = pperm0[pi[ki]];
2050 SET_SLOT(x, Matrix_permSym, perm1);
2051 UNPROTECT(1); /* perm1 */
2052
2053 perm0 = perm1;
2054 pperm0 = pperm1;
2055 REPROTECT(perm0, pidB);
2056
2057 m = ni;
2058 }
2059
2060 if (!mj) {
2061 isP = isP && nj == n;
2062 if (isP) {
2063 char *work;
2064 Matrix_Calloc(work, nj, char);
2065 --work; /* now 1-indexed */
2066 for (kj = 0; kj < nj; ++kj) {
2067 if (work[pj[kj]]) {
2068 isP = 0;
2069 break;
2070 }
2071 work[pj[kj]] = 1;
2072 }
2073 ++work; /* now 0-indexed */
2074 Matrix_Free(work, nj);
2075 }
2076
2077 x = newObject((isP)
2078 ? "pMatrix"
2079 : ((!mg) ? "ngCMatrix" : "ngRMatrix"));
2080 REPROTECT(x, pidA);
2081
2082 PROTECT(dim = GET_SLOT(x, Matrix_DimSym));
2083 pdim = INTEGER(dim);
2084 pdim[ mg] = m;
2085 pdim[!mg] = nj;
2086 UNPROTECT(1); /* dim */
2087
2088 if (isP) {
2089 SEXP perm1 = PROTECT(allocVector(INTSXP, nj));
2090 int *pperm1 = INTEGER(perm1), *work;
2091 Matrix_Calloc(work, nj, int);
2092 --work; /* now 1-indexed */
2093 for (kj = 0; kj < nj; ++kj)
2094 work[pj[kj]] = kj + 1;
2095 for (kj = 0; kj < nj; ++kj)
2096 pperm1[kj] = work[pperm0[kj]];
2097 ++work; /* now 0-indexed */
2098 Matrix_Free(work, nj);
2099 SET_SLOT(x, Matrix_permSym, perm1);
2100 UNPROTECT(1); /* perm1 */
2101 } else {
2102 int *workA, *workB, *workC;
2103 size_t lwork = (size_t) n + n + m;
2104 Matrix_Calloc(workA, lwork, int);
2105 workB = workA + n;
2106 workC = workB + n;
2107 --workA; /* now 1-indexed */
2108 --workB; /* now 1-indexed */
2109
2110 SEXP p1 = PROTECT(allocVector(INTSXP, (R_xlen_t) nj + 1));
2111 int *pp1 = INTEGER(p1), k, kend;
2112
2113 /* 1. Compute old column counts in 'workA' */
2114 for (k = 0; k < m; ++k)
2115 ++workA[pperm0[k]];
2116
2117 /* 2. Compute new column pointers in 'pp1' */
2118 *(pp1++) = 0;
2119 for (kj = 0; kj < nj; ++kj) {
2120 pp1[kj] = workA[pj[kj]];
2121 if (pp1[kj] > INT_MAX - pp1[kj - 1])
2122 error(_("%s too dense for %s; would have more than %s nonzero entries"), \
2123 "x[i,j]", "[CR]sparseMatrix", "2^31-1"); \
2124
2125 pp1[kj] += pp1[kj - 1];
2126 }
2127
2128 /* 3. Compute old column pointers in 'workB' and copy to 'workA' */
2129 workB[1] = 0;
2130 for (k = 1; k < n; ++k) {
2131 workB[k + 1] = workB[k] + workA[k];
2132 workA[k] = workB[k];
2133 }
2134 workA[n] = workB[n];
2135
2136 /* 4. Sort old row indices into 'workC' */
2137 for (k = 0; k < m; ++k)
2138 workC[workA[pperm0[k]]++] = k;
2139
2140 SEXP i1 = PROTECT(allocVector(INTSXP, pp1[nj - 1]));
2141 int *pi1 = INTEGER(i1), pos;
2142
2143 /* 5. Copy row indices from 'workC' to 'pi1' */
2144 k = 0;
2145 for (kj = 0; kj < nj; ++kj) {
2146 kend = pp1[kj];
2147 pos = workB[pj[kj]];
2148 while (k < kend)
2149 pi1[k++] = workC[pos++];
2150 }
2151
2152 ++workA; /* now 0-indexed */
2153 Matrix_Free(workA, lwork);
2154 SET_SLOT(x, Matrix_pSym, p1);
2155 SET_SLOT(x, (!mg) ? Matrix_iSym : Matrix_jSym, i1);
2156 UNPROTECT(2); /* i1, p1 */
2157 }
2158
2159 n = nj;
2160 }
2161
2162#undef SUB2_CASES
2163#undef SUB2_START
2164#undef SUB2_START_EXTRA
2165
2166 UNPROTECT(3); /* margin, perm0, x */
2167 return x;
2168}
2169
2170/* x[i,j,drop=FALSE] with 'i' and 'j' of type "integer" and length
2171 not exceeding 2^31-1 {'i' in 1:m or NA, 'j' in 1:n or NA} ...
2172 but _not_ handling 'Dimnames'
2173*/
2174SEXP R_subscript_2ary(SEXP x, SEXP i, SEXP j)
2175{
2176 if (i == R_NilValue && j == R_NilValue)
2177 return x;
2178
2179 static const char *valid[] = { VALID_NONVIRTUAL_MATRIX, "" };
2180 int ivalid = R_check_class_etc(x, valid);
2181 if (ivalid < 0)
2182 ERROR_INVALID_CLASS(x, __func__);
2183 ivalid += VALID_NONVIRTUAL_SHIFT(ivalid, 0);
2184 const char *cl = valid[ivalid];
2185 validObject(x, cl);
2186
2187 switch (cl[2]) {
2188 case 'e':
2189 case 'y':
2190 case 'r':
2191 return unpackedMatrix_subscript_2ary(x, i, j, cl);
2192 case 'p':
2193 return packedMatrix_subscript_2ary(x, i, j, cl);
2194 default:
2195 break;
2196 }
2197
2198 static SEXP anyNA = NULL;
2199 if (!anyNA)
2200 anyNA = install("anyNA");
2201 SEXP call = PROTECT(lang2(anyNA, R_NilValue)), value;
2202
2203#define ERROR_IF_ANYNA(_I_) \
2204 do { \
2205 if ((_I_) != R_NilValue) { \
2206 SETCADR(call, _I_); \
2207 PROTECT(value = eval(call, R_BaseEnv)); \
2208 if (asLogical(value)) \
2209 error(_("NA subscripts in %s not supported for '%s' inheriting from %s"), \
2210 "x[i,j]", "x", "sparseMatrix"); \
2211 UNPROTECT(1); \
2212 } \
2213 } while (0)
2214
2215 ERROR_IF_ANYNA(i);
2216 ERROR_IF_ANYNA(j);
2217
2218#undef ERROR_IF_ANYNA
2219
2220 UNPROTECT(1);
2221
2222 switch (cl[2]) {
2223 case 'C':
2224 return CsparseMatrix_subscript_2ary(x, i, j, cl);
2225 case 'R':
2226 return RsparseMatrix_subscript_2ary(x, i, j, cl);
2227 case 'T':
2228 {
2229 char cl_[] = "..CMatrix";
2230 cl_[0] = cl[0];
2231 cl_[1] = cl[1];
2232
2233 /* defined in ./coerce.c : */
2234 SEXP sparse_as_Csparse(SEXP, const char *);
2235 SEXP sparse_as_Tsparse(SEXP, const char *);
2236
2237 x = sparse_as_Csparse(x, cl);
2238 PROTECT(x);
2239 x = CsparseMatrix_subscript_2ary(x, i, j, cl_);
2240 UNPROTECT(1);
2241 PROTECT(x);
2242 x = sparse_as_Tsparse(x, valid[R_check_class_etc(x, valid)]);
2243 UNPROTECT(1);
2244 return x;
2245 }
2246 case 'i':
2247 return diagonalMatrix_subscript_2ary(x, i, j, cl);
2248 default:
2249 return indMatrix_subscript_2ary(x, i, j, cl);
2250 }
2251}
long long Matrix_int_fast64_t
Definition Mdefines.h:27
#define ERROR_INVALID_CLASS(_X_, _FUNC_)
Definition Mdefines.h:208
#define _(String)
Definition Mdefines.h:44
#define ISNZ_REAL(_X_)
Definition Mdefines.h:111
#define ISNZ_PATTERN(_X_)
Definition Mdefines.h:108
#define VALID_NONVIRTUAL_MATRIX
Definition Mdefines.h:220
SEXPTYPE kindToType(char)
Definition objects.c:28
#define HIDE(...)
Definition Mdefines.h:202
#define ISNZ_LOGICAL(_X_)
Definition Mdefines.h:109
#define Matrix_Free(_VAR_, _N_)
Definition Mdefines.h:77
#define ISNZ_INTEGER(_X_)
Definition Mdefines.h:110
#define SET_SLOT(x, what, value)
Definition Mdefines.h:86
#define GET_SLOT(x, what)
Definition Mdefines.h:85
#define Matrix_Calloc(_VAR_, _N_, _CTYPE_)
Definition Mdefines.h:66
#define ISNZ_COMPLEX(_X_)
Definition Mdefines.h:112
void validObject(SEXP, const char *)
Definition validity.c:1802
SEXP newObject(const char *)
Definition objects.c:4
#define VALID_NONVIRTUAL_SHIFT(i, pToInd)
Definition Mdefines.h:247
SEXP Matrix_permSym
Definition Msymbols.h:18
SEXP Matrix_DimSym
Definition Msymbols.h:3
SEXP Matrix_marginSym
Definition Msymbols.h:16
SEXP Matrix_xSym
Definition Msymbols.h:22
SEXP Matrix_iSym
Definition Msymbols.h:13
SEXP Matrix_jSym
Definition Msymbols.h:14
SEXP Matrix_diagSym
Definition Msymbols.h:11
SEXP Matrix_pSym
Definition Msymbols.h:17
static const char * valid[]
Definition bind.c:5
cholmod_common cl
Definition cholmod-etc.c:6
SEXP sparse_as_Csparse(SEXP from, const char *class)
Definition coerce.c:3721
SEXP sparse_as_Tsparse(SEXP from, const char *class)
Definition coerce.c:3919
SEXP sparse_as_general(SEXP from, const char *class)
Definition coerce.c:2831
SEXP sparse_force_symmetric(SEXP from, const char *class, char ul)
Definition sparse.c:1239
static SEXP indMatrix_subscript_1ary_mat(SEXP x, SEXP w)
Definition subscript.c:753
#define SORT(_CTYPE_, _PTR_)
static int keep_di(int *pi, int *pj, int n, int nonunit, int checkNA, int lwork)
Definition subscript.c:946
#define SUB2_START
static SEXP indMatrix_subscript_1ary(SEXP x, SEXP w)
Definition subscript.c:370
#define SUB2(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_)
static SEXP unpackedMatrix_subscript_1ary_mat(SEXP x, SEXP w, const char *cl)
Definition subscript.c:462
static SEXP CsparseMatrix_subscript_1ary_mat(SEXP x, SEXP w, const char *cl)
Definition subscript.c:596
#define F_ND(_X_)
Definition subscript.c:5
SEXP R_subscript_2ary(SEXP x, SEXP i, SEXP j)
Definition subscript.c:2174
static SEXP unpackedMatrix_subscript_1ary(SEXP x, SEXP w, const char *cl)
Definition subscript.c:12
static SEXP packedMatrix_subscript_1ary(SEXP x, SEXP w, const char *cl)
Definition subscript.c:147
static SEXP diagonalMatrix_subscript_1ary(SEXP x, SEXP w, const char *cl)
Definition subscript.c:337
static SEXP packedMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
Definition subscript.c:1410
#define SUB1_START(_SEXPTYPE_)
#define SUB1_N(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_, _F_)
#define SUB2_FINISH
#define SUB2_WORK(_CTYPE_, _PTR_, _ISNZ_)
static SEXP CsparseMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
Definition subscript.c:1519
#define SUB2_CASES(_SUB2_)
#define SUB1_CASES(_SUB1_N_, _SUB1_X_, _F_N_, _F_X_)
static SEXP indMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
Definition subscript.c:1984
static SEXP diagonalMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
Definition subscript.c:1820
#define SORT_LOOP(_MASK_)
#define SUB2_START_EXTRA(_E_, _R_, _Y_, _DENSE_)
static int keep_sy(int *pi, int *pj, int n, int upper, int checkNA)
Definition subscript.c:915
#define ERROR_IF_ANYNA(_I_)
#define SUB1_START_EXTRA(_SEXPTYPE_)
SEXP R_subscript_1ary(SEXP x, SEXP i)
Definition subscript.c:411
#define F_NS(_X_)
Definition subscript.c:6
static SEXP RsparseMatrix_subscript_1ary(SEXP x, SEXP w, const char *cl)
Definition subscript.c:270
SEXP R_subscript_1ary_mat(SEXP x, SEXP i)
Definition subscript.c:792
static int keep_tr(int *pi, int *pj, int n, int upper, int nonunit, int checkNA)
Definition subscript.c:843
static void sort_cr(SEXP obj, const char *cl)
Definition subscript.c:990
#define F_X(_X_)
Definition subscript.c:4
static SEXP RsparseMatrix_subscript_1ary_mat(SEXP x, SEXP w, const char *cl)
Definition subscript.c:659
static SEXP packedMatrix_subscript_1ary_mat(SEXP x, SEXP w, const char *cl)
Definition subscript.c:540
#define SUB1_X(_CTYPE_, _PTR_, _NA_, _ZERO_, _ONE_, _F_)
static SEXP RsparseMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
Definition subscript.c:1672
static SEXP CsparseMatrix_subscript_1ary(SEXP x, SEXP w, const char *cl)
Definition subscript.c:203
static SEXP unpackedMatrix_subscript_2ary(SEXP x, SEXP i, SEXP j, const char *cl)
Definition subscript.c:1156
static SEXP diagonalMatrix_subscript_1ary_mat(SEXP x, SEXP w, const char *cl)
Definition subscript.c:722