Skip to content

Commit 128af13

Browse files
Github action: auto-update.
1 parent 8891776 commit 128af13

115 files changed

Lines changed: 1220 additions & 947 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

dev/_downloads/02fe230fcb90df96787f11e615bb0af8/plot_guide_for_constrained_cp.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@
88
# Introduction
99
# -----------------------
1010
# Since version 0.7, Tensorly includes constrained CP decomposition which penalizes or
11-
# constrains factors as chosen by the user. The proposed implementation of constrained CP uses the
11+
# constrains factors as chosen by the user. The proposed implementation of constrained CP uses the
1212
# Alternating Optimization Alternating Direction Method of Multipliers (AO-ADMM) algorithm from [1] which
1313
# solves alternatively convex optimization problem using primal-dual optimization. In constrained CP
14-
# decomposition, an auxilliary factor is introduced which is constrained or regularized using an operator called the
14+
# decomposition, an auxilliary factor is introduced which is constrained or regularized using an operator called the
1515
# proximal operator. The proximal operator may therefore change according to the selected constraint or penalization.
1616
#
1717
# Tensorly provides several constraints and their corresponding proximal operators, each can apply to one or all factors in the CP decomposition:
@@ -94,7 +94,7 @@
9494
fig = plt.figure()
9595
for i in range(rank):
9696
plt.plot(factors[0][:, i])
97-
plt.legend(['1. column', '2. column', '3. column'], loc='upper left')
97+
plt.legend(["1. column", "2. column", "3. column"], loc="upper left")
9898

9999
##############################################################################
100100
# Constraints requiring a scalar input can be used similarly as follows:
@@ -103,11 +103,11 @@
103103
##############################################################################
104104
# The same regularization coefficient l1_reg is used for all the modes. Here the l1 penalization induces sparsity given that the regularization coefficient is large enough.
105105
fig = plt.figure()
106-
plt.title('Histogram of 1. factor')
106+
plt.title("Histogram of 1. factor")
107107
_, _, _ = plt.hist(factors[0].flatten())
108108

109109
fig = plt.figure()
110-
plt.title('Histogram of 2. factor')
110+
plt.title("Histogram of 2. factor")
111111
_, _, _ = plt.hist(factors[1].flatten())
112112

113113
##############################################################################
@@ -133,15 +133,15 @@
133133
_, factors = constrained_parafac(tensor, rank=rank, l1_reg=[0.01, 0.02, 0.03])
134134

135135
fig = plt.figure()
136-
plt.title('Histogram of 1. factor')
136+
plt.title("Histogram of 1. factor")
137137
_, _, _ = plt.hist(factors[0].flatten())
138138

139139
fig = plt.figure()
140-
plt.title('Histogram of 2. factor')
140+
plt.title("Histogram of 2. factor")
141141
_, _, _ = plt.hist(factors[1].flatten())
142142

143143
fig = plt.figure()
144-
plt.title('Histogram of 3. factor')
144+
plt.title("Histogram of 3. factor")
145145
_, _, _ = plt.hist(factors[2].flatten())
146146

147147
##############################################################################
@@ -150,8 +150,9 @@
150150
# To use different constraint for different modes, the dictionary structure
151151
# should be preferred:
152152

153-
_, factors = constrained_parafac(tensor, rank=rank, non_negative={1:True}, l1_reg={0: 0.01},
154-
l2_square_reg={2: 0.01})
153+
_, factors = constrained_parafac(
154+
tensor, rank=rank, non_negative={1: True}, l1_reg={0: 0.01}, l2_square_reg={2: 0.01}
155+
)
155156

156157
##############################################################################
157158
# In the dictionary, `key` is the selected mode and `value` is a scalar value or
Binary file not shown.

dev/_downloads/0d7c4ccdff2f531825995c8fa152400c/plot_guide_for_constrained_cp.ipynb

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
"cell_type": "markdown",
1212
"metadata": {},
1313
"source": [
14-
"## Introduction\nSince version 0.7, Tensorly includes constrained CP decomposition which penalizes or\nconstrains factors as chosen by the user. The proposed implementation of constrained CP uses the \nAlternating Optimization Alternating Direction Method of Multipliers (AO-ADMM) algorithm from [1] which\nsolves alternatively convex optimization problem using primal-dual optimization. In constrained CP\ndecomposition, an auxilliary factor is introduced which is constrained or regularized using an operator called the \nproximal operator. The proximal operator may therefore change according to the selected constraint or penalization.\n\nTensorly provides several constraints and their corresponding proximal operators, each can apply to one or all factors in the CP decomposition:\n\n1. Non-negativity\n * `non_negative` in signature\n * Prevents negative values in CP factors.\n2. L1 regularization\n * `l1_reg` in signature\n * Adds a L1 regularization term on the CP factors to the CP cost function, this promotes sparsity in the CP factors. The user chooses the regularization amount.\n3. L2 regularization\n * `l2_reg` in signature\n * Adds a L2 regularization term on the CP factors to the CP cost function. The user chooses the regularization amount.\n4. L2 square regularization\n * `l2_square_reg` in signature\n * Adds a L2 regularization term on the CP factors to the CP cost function. The user chooses the regularization amount.\n5. Unimodality\n * `unimodality` in signature\n * This constraint acts columnwise on the factors\n * Impose that each column of the factors is unimodal (there is only one local maximum, like a Gaussian).\n6. Simplex\n * `simplex` in signature\n * This constraint acts columnwise on the factors\n * Impose that each column of the factors lives on the simplex or user-defined radius (entries are nonnegative and sum to a user-defined positive parameter columnwise).\n7. Normalization\n * `normalize` in signature\n * Impose that the largest absolute value in the factors elementwise is 1.\n8. Normalized sparsity\n * `normalized_sparsity` in signature\n * This constraint acts columnwise on the factors\n * Impose that the columns of factors are both normalized with the L2 norm, and k-sparse (at most k-nonzeros per column) with k user-defined.\n9. Soft sparsity\n * `soft_sparsity` in signature\n * This constraint acts columnwise on the factors\n * Impose that the columns of factors have L1 norm bounded by a user-defined threshold.\n10. Smoothness\n * `smoothness` in signature\n * This constraint acts columnwise on the factors\n * Favor smoothness in factors columns by penalizing the L2 norm of finite differences. The user chooses the regularization amount. The proximal operator in fact solves a banded system.\n11. Monotonicity\n * `monotonicity` in signature\n * This constraint acts columnwise on the factors\n * Impose that the factors are either always increasing or decreasing (user-specified) columnwise. This is based on isotonic regression.\n12. Hard sparsity\n * `hard_sparsity` in signature\n * This constraint acts columnwise on the factors\n * Impose that each column of the factors has at most k nonzero entries (k is user-defined).\n\nWhile some of these constraints (2, 3, 4, 6, 8, 9, 12) require a scalar\ninput as its parameter or regularizer, boolean input could be enough\nfor other constraints (1, 5, 7, 10, 11). Selection of one of these\nconstraints for all mode (or factors) or using different constraints for different modes are both supported.\n\n"
14+
"## Introduction\nSince version 0.7, Tensorly includes constrained CP decomposition which penalizes or\nconstrains factors as chosen by the user. The proposed implementation of constrained CP uses the\nAlternating Optimization Alternating Direction Method of Multipliers (AO-ADMM) algorithm from [1] which\nsolves alternatively convex optimization problem using primal-dual optimization. In constrained CP\ndecomposition, an auxilliary factor is introduced which is constrained or regularized using an operator called the\nproximal operator. The proximal operator may therefore change according to the selected constraint or penalization.\n\nTensorly provides several constraints and their corresponding proximal operators, each can apply to one or all factors in the CP decomposition:\n\n1. Non-negativity\n * `non_negative` in signature\n * Prevents negative values in CP factors.\n2. L1 regularization\n * `l1_reg` in signature\n * Adds a L1 regularization term on the CP factors to the CP cost function, this promotes sparsity in the CP factors. The user chooses the regularization amount.\n3. L2 regularization\n * `l2_reg` in signature\n * Adds a L2 regularization term on the CP factors to the CP cost function. The user chooses the regularization amount.\n4. L2 square regularization\n * `l2_square_reg` in signature\n * Adds a L2 regularization term on the CP factors to the CP cost function. The user chooses the regularization amount.\n5. Unimodality\n * `unimodality` in signature\n * This constraint acts columnwise on the factors\n * Impose that each column of the factors is unimodal (there is only one local maximum, like a Gaussian).\n6. Simplex\n * `simplex` in signature\n * This constraint acts columnwise on the factors\n * Impose that each column of the factors lives on the simplex or user-defined radius (entries are nonnegative and sum to a user-defined positive parameter columnwise).\n7. Normalization\n * `normalize` in signature\n * Impose that the largest absolute value in the factors elementwise is 1.\n8. Normalized sparsity\n * `normalized_sparsity` in signature\n * This constraint acts columnwise on the factors\n * Impose that the columns of factors are both normalized with the L2 norm, and k-sparse (at most k-nonzeros per column) with k user-defined.\n9. Soft sparsity\n * `soft_sparsity` in signature\n * This constraint acts columnwise on the factors\n * Impose that the columns of factors have L1 norm bounded by a user-defined threshold.\n10. Smoothness\n * `smoothness` in signature\n * This constraint acts columnwise on the factors\n * Favor smoothness in factors columns by penalizing the L2 norm of finite differences. The user chooses the regularization amount. The proximal operator in fact solves a banded system.\n11. Monotonicity\n * `monotonicity` in signature\n * This constraint acts columnwise on the factors\n * Impose that the factors are either always increasing or decreasing (user-specified) columnwise. This is based on isotonic regression.\n12. Hard sparsity\n * `hard_sparsity` in signature\n * This constraint acts columnwise on the factors\n * Impose that each column of the factors has at most k nonzero entries (k is user-defined).\n\nWhile some of these constraints (2, 3, 4, 6, 8, 9, 12) require a scalar\ninput as its parameter or regularizer, boolean input could be enough\nfor other constraints (1, 5, 7, 10, 11). Selection of one of these\nconstraints for all mode (or factors) or using different constraints for different modes are both supported.\n\n"
1515
]
1616
},
1717
{
@@ -58,7 +58,7 @@
5858
},
5959
"outputs": [],
6060
"source": [
61-
"fig = plt.figure()\nfor i in range(rank):\n plt.plot(factors[0][:, i])\n plt.legend(['1. column', '2. column', '3. column'], loc='upper left')"
61+
"fig = plt.figure()\nfor i in range(rank):\n plt.plot(factors[0][:, i])\n plt.legend([\"1. column\", \"2. column\", \"3. column\"], loc=\"upper left\")"
6262
]
6363
},
6464
{
@@ -94,7 +94,7 @@
9494
},
9595
"outputs": [],
9696
"source": [
97-
"fig = plt.figure()\nplt.title('Histogram of 1. factor')\n_, _, _ = plt.hist(factors[0].flatten())\n\nfig = plt.figure()\nplt.title('Histogram of 2. factor')\n_, _, _ = plt.hist(factors[1].flatten())"
97+
"fig = plt.figure()\nplt.title(\"Histogram of 1. factor\")\n_, _, _ = plt.hist(factors[0].flatten())\n\nfig = plt.figure()\nplt.title(\"Histogram of 2. factor\")\n_, _, _ = plt.hist(factors[1].flatten())"
9898
]
9999
},
100100
{
@@ -137,7 +137,7 @@
137137
},
138138
"outputs": [],
139139
"source": [
140-
"_, factors = constrained_parafac(tensor, rank=rank, l1_reg=[0.01, 0.02, 0.03])\n\nfig = plt.figure()\nplt.title('Histogram of 1. factor')\n_, _, _ = plt.hist(factors[0].flatten())\n\nfig = plt.figure()\nplt.title('Histogram of 2. factor')\n_, _, _ = plt.hist(factors[1].flatten())\n\nfig = plt.figure()\nplt.title('Histogram of 3. factor')\n_, _, _ = plt.hist(factors[2].flatten())"
140+
"_, factors = constrained_parafac(tensor, rank=rank, l1_reg=[0.01, 0.02, 0.03])\n\nfig = plt.figure()\nplt.title(\"Histogram of 1. factor\")\n_, _, _ = plt.hist(factors[0].flatten())\n\nfig = plt.figure()\nplt.title(\"Histogram of 2. factor\")\n_, _, _ = plt.hist(factors[1].flatten())\n\nfig = plt.figure()\nplt.title(\"Histogram of 3. factor\")\n_, _, _ = plt.hist(factors[2].flatten())"
141141
]
142142
},
143143
{
@@ -155,7 +155,7 @@
155155
},
156156
"outputs": [],
157157
"source": [
158-
"_, factors = constrained_parafac(tensor, rank=rank, non_negative={1:True}, l1_reg={0: 0.01},\n l2_square_reg={2: 0.01})"
158+
"_, factors = constrained_parafac(\n tensor, rank=rank, non_negative={1: True}, l1_reg={0: 0.01}, l2_square_reg={2: 0.01}\n)"
159159
]
160160
},
161161
{

dev/_downloads/2af0682e07ba2fb9ad2fd36324f584e8/plot_cp_line_search.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
55
Example on how to use :func:`tensorly.decomposition.parafac` with line search to accelerate convergence.
66
"""
7+
78
import matplotlib.pyplot as plt
89

910
from time import time
@@ -24,7 +25,7 @@
2425
err_min = tl.norm(tl.cp_to_tensor(fac) - tensor)
2526

2627
for ii, toll in enumerate(tol):
27-
# Run PARAFAC decomposition without line search and time
28+
# Run PARAFAC decomposition without line search and time
2829
start = time()
2930
cp = CP(rank=3, n_iter_max=2000000, tol=toll, linesearch=False)
3031
fac = cp.fit_transform(tensor)
@@ -44,10 +45,10 @@
4445

4546
fig = plt.figure()
4647
ax = fig.add_subplot(1, 1, 1)
47-
ax.loglog(tt, err - err_min, '.', label="No line search")
48-
ax.loglog(tt_ls, err_ls - err_min, '.r', label="Line search")
48+
ax.loglog(tt, err - err_min, ".", label="No line search")
49+
ax.loglog(tt_ls, err_ls - err_min, ".r", label="Line search")
4950
ax.legend()
5051
ax.set_ylabel("Time")
5152
ax.set_xlabel("Error")
5253

53-
plt.show()
54+
plt.show()

dev/_downloads/2d1781e05b6ef942fb097ff181023668/plot_covid.py

Lines changed: 23 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
# to comprehensively profile the interactions between the antibodies and
1919
# `Fc receptors <https://en.wikipedia.org/wiki/Fc_receptor>`_ alongside other types of immunological
2020
# and demographic data. Here, we will apply CP decomposition to a
21-
# `COVID-19 system serology dataset <https://www.sciencedirect.com/science/article/pii/S0092867420314598>`_.
21+
# `COVID-19 system serology dataset <https://www.sciencedirect.com/science/article/pii/S0092867420314598>`_.
2222
# In this dataset, serum antibodies
2323
# of 438 samples collected from COVID-19 patients were systematically profiled by their binding behavior
2424
# to SARS-CoV-2 (the virus that causes COVID-19) antigens and Fc receptors activities. Samples are
@@ -45,20 +45,26 @@
4545
# Now we apply CP decomposition to this dataset.
4646

4747
comps = np.arange(1, 7)
48-
CMTFfacs = [parafac(data.tensor, cc, tol=1e-10, n_iter_max=1000,
49-
linesearch=True, orthogonalise=2) for cc in comps]
48+
CMTFfacs = [
49+
parafac(
50+
data.tensor, cc, tol=1e-10, n_iter_max=1000, linesearch=True, orthogonalise=2
51+
)
52+
for cc in comps
53+
]
5054

5155
##############################################################################
5256
# To evaluate how well CP decomposition explains the variance in the dataset, we plot the percent
5357
# variance reconstructed (R2X) for a range of ranks.
5458

59+
5560
def reconstructed_variance(tFac, tIn=None):
56-
""" This function calculates the amount of variance captured (R2X) by the tensor method. """
61+
"""This function calculates the amount of variance captured (R2X) by the tensor method."""
5762
tMask = np.isfinite(tIn)
5863
vTop = np.sum(np.square(tl.cp_to_tensor(tFac) * tMask - np.nan_to_num(tIn)))
5964
vBottom = np.sum(np.square(np.nan_to_num(tIn)))
6065
return 1.0 - vTop / vBottom
6166

67+
6268
fig1 = plt.figure()
6369
CMTFR2X = np.array([reconstructed_variance(f, data.tensor) for f in CMTFfacs])
6470
plt.plot(comps, CMTFR2X, "bo")
@@ -81,8 +87,8 @@ def reconstructed_variance(tFac, tIn=None):
8187
tfac.factors[1][:, 0] *= -1
8288
tfac.factors[2][:, 0] *= -1
8389

84-
fig2, ax = plt.subplots(1, 3, figsize=(16,6))
85-
for ii in [0,1,2]:
90+
fig2, ax = plt.subplots(1, 3, figsize=(16, 6))
91+
for ii in [0, 1, 2]:
8692
fac = tfac.factors[ii]
8793
scales = np.linalg.norm(fac, ord=np.inf, axis=0)
8894
fac /= scales
@@ -92,12 +98,20 @@ def reconstructed_variance(tFac, tIn=None):
9298
ax[ii].set_xticklabels(["Comp. 1", "Comp. 2"])
9399
ax[ii].set_yticks(range(len(data.ticks[ii])))
94100
if ii == 0:
95-
ax[0].set_yticklabels([data.ticks[0][i] if i==0 or data.ticks[0][i]!=data.ticks[0][i-1]
96-
else "" for i in range(len(data.ticks[0]))])
101+
ax[0].set_yticklabels(
102+
[
103+
(
104+
data.ticks[0][i]
105+
if i == 0 or data.ticks[0][i] != data.ticks[0][i - 1]
106+
else ""
107+
)
108+
for i in range(len(data.ticks[0]))
109+
]
110+
)
97111
else:
98112
ax[ii].set_yticklabels(data.ticks[ii])
99113
ax[ii].set_title(data.dims[ii])
100-
ax[ii].set_aspect('auto')
114+
ax[ii].set_aspect("auto")
101115

102116
fig2.colorbar(ScalarMappable(norm=plt.Normalize(-1, 1), cmap="PiYG"))
103117

0 commit comments

Comments
 (0)