Skip to content

Commit 1526f4c

Browse files
committed
Fix internal calls to avoid fortran warnings in tutorials
1 parent b8e183b commit 1526f4c

File tree

6 files changed

+74
-29
lines changed

6 files changed

+74
-29
lines changed

pyttb/cp_apr.py

+9-5
Original file line numberDiff line numberDiff line change
@@ -521,7 +521,9 @@ def tt_cp_apr_pdnr( # noqa: PLR0912,PLR0913,PLR0915
521521
if isinstance(input_tensor, ttb.tensor) and isSparse is False:
522522
# Data is not a sparse tensor.
523523
Pi = tt_calcpi_prowsubprob(input_tensor, M, rank, n, N, isSparse)
524-
X_mat = input_tensor.to_tenmat(np.array([n]), copy=False).data
524+
X_mat = input_tensor.to_tenmat(
525+
np.array([n], order=input_tensor.order), copy=False
526+
).data
525527

526528
num_rows = M.factor_matrices[n].shape[0]
527529
isRowNOTconverged = np.zeros((num_rows,))
@@ -876,7 +878,9 @@ def tt_cp_apr_pqnr( # noqa: PLR0912,PLR0913,PLR0915
876878
if not isinstance(input_tensor, ttb.sptensor) and not isSparse:
877879
# Data is not a sparse tensor.
878880
Pi = tt_calcpi_prowsubprob(input_tensor, M, rank, n, N, isSparse)
879-
X_mat = input_tensor.to_tenmat(np.array([n]), copy=False).data
881+
X_mat = input_tensor.to_tenmat(
882+
np.array([n], order=input_tensor.order), copy=False
883+
).data
880884

881885
num_rows = M.factor_matrices[n].shape[0]
882886
isRowNOTconverged = np.zeros((num_rows,))
@@ -1772,7 +1776,7 @@ def calculate_phi( # noqa: PLR0913
17721776
)
17731777
Phi[:, r] = Yr
17741778
else:
1775-
Xn = Data.to_tenmat(np.array([factorIndex]), copy=False).data
1779+
Xn = Data.to_tenmat(np.array([factorIndex], order=Data.order), copy=False).data
17761780
V = Model.factor_matrices[factorIndex].dot(Pi.transpose())
17771781
W = Xn / np.maximum(V, epsilon)
17781782
Y = W.dot(Pi)
@@ -1817,8 +1821,8 @@ def tt_loglikelihood(
18171821
np.sum(Data.vals * np.log(np.sum(A, axis=1))[:, None])
18181822
- np.sum(Model.factor_matrices[0])
18191823
)
1820-
dX = Data.to_tenmat(np.array([1]), copy=False).data
1821-
dM = Model.to_tenmat(np.array([1]), copy=False).data
1824+
dX = Data.to_tenmat(np.array([1], order=Data.order), copy=False).data
1825+
dM = Model.to_tenmat(np.array([1], order=Model.order), copy=False).data
18221826
f = 0
18231827
for i in range(dX.shape[0]):
18241828
for j in range(dX.shape[1]):

pyttb/ktensor.py

+22-7
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ class ktensor:
7474

7575
__slots__ = ("weights", "factor_matrices")
7676

77-
def __init__(
77+
def __init__( # noqa: PLR0912
7878
self,
7979
factor_matrices: Optional[Sequence[np.ndarray]] = None,
8080
weights: Optional[np.ndarray] = None,
@@ -147,7 +147,7 @@ def __init__(
147147

148148
# Empty constructor
149149
if factor_matrices is None and weights is None:
150-
self.weights = np.array([])
150+
self.weights = np.array([], order=self.order)
151151
self.factor_matrices: List[np.ndarray] = []
152152
return
153153

@@ -183,12 +183,17 @@ def __init__(
183183
)
184184
# make copy or use reference
185185
if copy:
186-
self.weights = weights.copy()
186+
self.weights = weights.copy(self.order)
187187
else:
188-
self.weights = weights
188+
if not self._matches_order(weights):
189+
logging.warning(
190+
f"Selected no copy, but input data isn't {self.order} ordered "
191+
"so must copy."
192+
)
193+
self.weights = np.asfortranarray(weights)
189194
else:
190195
# create weights if not provided
191-
self.weights = np.ones(num_components)
196+
self.weights = np.ones(num_components, order=self.order)
192197

193198
# process factor_matrices
194199
if copy:
@@ -419,6 +424,14 @@ def order(self) -> Literal["F"]:
419424
"""Return the data layout of the underlying storage."""
420425
return "F"
421426

427+
def _matches_order(self, array: np.ndarray) -> bool:
428+
"""Check if provided array matches tensor memory layout."""
429+
if array.flags["C_CONTIGUOUS"] and self.order == "C":
430+
return True
431+
if array.flags["F_CONTIGUOUS"] and self.order == "F":
432+
return True
433+
return False
434+
422435
def arrange(
423436
self,
424437
weight_factor: Optional[int] = None,
@@ -924,7 +937,9 @@ def min_split_dims(dims: Tuple[int, ...]):
924937
data = (
925938
ttb.khatrirao(*self.factor_matrices[:i_split], reverse=True) * self.weights
926939
) @ ttb.khatrirao(*self.factor_matrices[i_split:], reverse=True).T
927-
return ttb.tensor(data, self.shape, copy=False)
940+
# Copy needed to ensure F order. Transpose above means both elements are
941+
# different layout. If originally in C order can save on this copy.
942+
return ttb.tensor(data, self.shape, copy=True)
928943

929944
def to_tenmat(
930945
self,
@@ -1678,7 +1693,7 @@ def score(
16781693
# Compute all possible vector-vector congruences.
16791694

16801695
# Compute every pair for each mode
1681-
Cbig = ttb.tensor.from_function(np.zeros, (RA, RB, N))
1696+
Cbig = ttb.tensor(np.zeros((RA, RB, N), order=self.order))
16821697
for n in range(N):
16831698
Cbig[:, :, n] = np.abs(A.factor_matrices[n].T @ B.factor_matrices[n])
16841699

pyttb/sptensor.py

+6-1
Original file line numberDiff line numberDiff line change
@@ -386,6 +386,11 @@ def copy(self) -> sptensor:
386386
"""
387387
return ttb.sptensor(self.subs, self.vals, self.shape, copy=True)
388388

389+
@property
390+
def order(self) -> Literal["F"]:
391+
"""Return the data layout of the underlying storage."""
392+
return "F"
393+
389394
def __deepcopy__(self, memo):
390395
"""Return deep copy of this sptensor."""
391396
return self.copy()
@@ -708,7 +713,7 @@ def full(self) -> ttb.tensor:
708713
return ttb.tensor()
709714

710715
# Create a dense zero tensor B that is the same shape as A
711-
B = ttb.tensor(np.zeros(shape=self.shape), copy=False)
716+
B = ttb.tensor(np.zeros(shape=self.shape, order=self.order), copy=False)
712717

713718
if self.subs.size == 0:
714719
return B

pyttb/tenmat.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
from __future__ import annotations
88

9+
import logging
910
from math import prod
1011
from typing import Literal, Optional, Tuple, Union
1112

@@ -255,7 +256,12 @@ def to_tensor(self, copy: bool = True) -> ttb.tensor:
255256
data = self.data.copy()
256257
data = np.reshape(data, np.array(shape)[order], order=self.order)
257258
if order.size > 1:
258-
data = np.transpose(data, np.argsort(order))
259+
if not copy:
260+
logging.warning(
261+
"This tenmat cannot be trivially unwrapped into tensor "
262+
"so must copy."
263+
)
264+
data = np.asfortranarray(np.transpose(data, np.argsort(order)))
259265
return ttb.tensor(data, shape, copy=False)
260266

261267
def ctranspose(self) -> tenmat:

pyttb/tensor.py

+23-12
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ def __init__(
152152
f"Selected no copy, but input data isn't {self.order} ordered "
153153
"so must copy."
154154
)
155-
self.data = data
155+
self.data = np.asfortranarray(data)
156156
self.shape = shape
157157
return
158158

@@ -611,6 +611,7 @@ def to_tenmat(
611611
(rprod, cprod),
612612
order=self.order,
613613
)
614+
assert data.flags["F_CONTIGUOUS"]
614615
return ttb.tenmat(data, rdims, cdims, tshape=tshape, copy=copy)
615616

616617
def innerprod(
@@ -1161,7 +1162,8 @@ def permute(self, order: OneDArray) -> tensor:
11611162
return self.copy()
11621163

11631164
# Np transpose does error checking on order, acts as permutation
1164-
return ttb.tensor(np.transpose(self.data, order), copy=False)
1165+
1166+
return ttb.tensor(np.asfortranarray(np.transpose(self.data, order)), copy=False)
11651167

11661168
def reshape(self, shape: Shape) -> tensor:
11671169
"""
@@ -1361,7 +1363,7 @@ def symmetrize( # noqa: PLR0912,PLR0915
13611363
avg = classSum / classNum
13621364

13631365
newdata = avg[linclassidx]
1364-
data = np.reshape(newdata, self.shape)
1366+
data = np.reshape(newdata, self.shape, order=self.order)
13651367

13661368
return ttb.tensor(data, copy=False)
13671369

@@ -1521,7 +1523,7 @@ def ttm(
15211523
)
15221524
Y_data: np.ndarray = np.reshape(newdata, newshape, order=self.order)
15231525
Y_data = np.transpose(Y_data, np.argsort(order))
1524-
return ttb.tensor(Y_data, copy=False)
1526+
return ttb.tensor(Y_data, copy=True)
15251527

15261528
def ttt(
15271529
self,
@@ -1880,12 +1882,20 @@ def tenfun_binary(
18801882
if not isinstance(other, (float, int)):
18811883
Y = other.data
18821884
else:
1883-
Y = np.array(other)
1885+
Y = np.array(other, order=self.order)
18841886

18851887
if not first:
18861888
Y, X = X, Y
18871889
data = function_handle(X, Y)
1888-
Z = ttb.tensor(data, copy=False)
1890+
copy = False
1891+
if not self._matches_order(data):
1892+
copy = True
1893+
logging.warning(
1894+
f"Tenfun function expects data of order {self.order}."
1895+
f" Update function to return data or the order to avoid "
1896+
"extra data copy."
1897+
)
1898+
Z = ttb.tensor(data, copy=copy)
18891899
return Z
18901900

18911901
def tenfun_unary(
@@ -1913,14 +1923,14 @@ def tenfun_unary(
19131923
), f"Tensor {i} is not the same size as the first tensor input"
19141924
if len(inputs) == 0:
19151925
X = self.data
1916-
X = np.reshape(X, (1, -1))
1926+
X = np.reshape(X, (1, -1), order=self.order)
19171927
else:
1918-
X = np.zeros((len(inputs) + 1, np.prod(sz)))
1919-
X[0, :] = np.reshape(self.data, (np.prod(sz)))
1928+
X = np.zeros((len(inputs) + 1, np.prod(sz)), order=self.order)
1929+
X[0, :] = np.reshape(self.data, (np.prod(sz)), order=self.order)
19201930
for i, an_input in enumerate(inputs):
1921-
X[i + 1, :] = np.reshape(an_input.data, (np.prod(sz)))
1931+
X[i + 1, :] = np.reshape(an_input.data, (np.prod(sz)), order=self.order)
19221932
data = function_handle(X)
1923-
data = np.reshape(data, sz)
1933+
data = np.reshape(data, sz, order=self.order)
19241934
Z = ttb.tensor(data, copy=False)
19251935
return Z
19261936

@@ -2170,7 +2180,8 @@ def __getitem__(self, item): # noqa: PLR0912
21702180
if newsiz.size == 0:
21712181
a = newdata.item()
21722182
else:
2173-
a = ttb.tensor(newdata, copy=False)
2183+
# Copy data to ensure correct data ordering
2184+
a = ttb.tensor(newdata, copy=True)
21742185
return a
21752186

21762187
# *** CASE 2a: Subscript indexing ***

tests/test_tenmat.py

+7-3
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the
33
# U.S. Government retains certain rights in this software.
44

5+
import logging
56
from copy import deepcopy
67

78
import numpy as np
@@ -324,7 +325,7 @@ def test_tenmat_initialization_from_tensor_type(
324325
assert exc in str(excinfo)
325326

326327

327-
def test_tenmat_to_tensor():
328+
def test_tenmat_to_tensor(caplog):
328329
tensorInstance = ttb.tenrand((4, 3))
329330
tensorInstance4 = ttb.tenrand((4, 3, 2, 2))
330331
# tenmat
@@ -345,8 +346,11 @@ def test_tenmat_to_tensor():
345346
assert not np.may_share_memory(tensorTenmatInstance4.data, tenmatInstance4.data)
346347

347348
# Reference instead of copy
348-
tensorTenmatInstance4_ref = tenmatInstance4.to_tensor(copy=False)
349-
assert np.may_share_memory(tensorTenmatInstance4_ref.data, tenmatInstance4.data)
349+
with caplog.at_level(logging.WARNING):
350+
tensorTenmatInstance4_ref = tenmatInstance4.to_tensor(copy=False)
351+
assert not np.may_share_memory(
352+
tensorTenmatInstance4_ref.data, tenmatInstance4.data
353+
)
350354

351355

352356
def test_tenmat_ctranspose(sample_tenmat_4way):

0 commit comments

Comments
 (0)