import FrankWolfe; include(joinpath(dirname(pathof(FrankWolfe)), "../examples/plot_utils.jl")) # hideDifference-of-Convex Algorithm with Frank-Wolfe
This example shows the optimization of a difference-of-convex problem of the form:
\[min\limits_{x \in \mathcal{X}} \phi(x) = f(x) - g(x)\]
with $f$, $g$ convex functions with access to subgradients and $f$ smooth.
The DCA-FW algorithm constructs local convex models of $\phi$ by linearizing $g$ and approximately optimizes them with FW. It is a local method that converges to a stationary point
using FrankWolfe
using LinearAlgebra
using Random
using SparseArrays
using StableRNGs
using RandomThe convex functions $f$, $g$ will be generated as random convex quadratics:
\[\begin{align} f(x) & = \frac12 x^\top A x + a^\top x + c \\ g(x) & = \frac12 x^\top B x + b^\top x + d \end{align}\]
Setting up the problem functions and data
const n = 500 # Reduced dimension
# Generate random positive definite matrices to ensure convexity
function generate_problem_data(rng)
A_raw = randn(rng, n, n)
A_raw ./= opnorm(A_raw)
A = A_raw' * A_raw + 0.1 * I
B_raw = randn(rng, n, n)
B_raw ./= opnorm(B_raw)
B = B_raw' * B_raw + 0.1 * I
a = randn(rng, n)
b = randn(rng, n)
c = randn(rng)
d = -359434.0
return A, B, a, b, c, d
end
const rng = StableRNGs.StableRNG(1)
const A, B, a, b, c, d = generate_problem_data(rng)([0.36647164615458394 0.007434201971305419 … 0.004451785375884825 0.015514411527022771; 0.007434201971305419 0.3482281394741432 … 0.013654308840956 -0.025910486878046358; … ; 0.004451785375884825 0.013654308840956 … 0.3806261404722966 0.011896910948147398; 0.015514411527022771 -0.025910486878046358 … 0.011896910948147398 0.34299337924542406], [0.36858235928505045 0.003647736865763906 … -0.01474711849204959 -0.00939075884232428; 0.003647736865763906 0.3683227732422839 … -0.00470842392230873 0.01931972691415089; … ; -0.01474711849204959 -0.00470842392230873 … 0.37182444470673925 -0.007058980450873037; -0.00939075884232428 0.01931972691415089 … -0.007058980450873037 0.36976970853592184], [-0.5056985849854947, 1.8254695540945816, 1.1686948924955953, 0.025865043318803068, 0.7618207158940428, -1.9599078996573032, -0.28701522778989075, -1.210555966368952, -0.3476555815275666, 0.36622748810084865 … -1.2418975260665193, -2.042717239142079, -0.977247840514734, -0.9480533072182336, 0.2354971732415276, 1.1489207372892907, 0.26713737924876685, -0.11111819389928967, 1.2106291210589706, -0.36135542009200855], [-1.7352231531269595, -1.0889348569606767, -1.9696069675033783, -0.6944538146281424, 0.12594479576421444, 2.43890963984164, -0.3332102207771672, -1.598587279152474, 0.8970898814075347, 0.7441204116527911 … -0.07885951683533911, 0.921491279700674, 0.8755353895807455, 0.4142785039140312, 1.0607172005644177, -0.9845905380360871, 0.7603797972500105, 1.0314248598279463, -1.0727076372535254, -0.1591576537893938], -0.6622940864130684, -359434.0)We can now define the two functions
function f(x)
return 0.5 * FrankWolfe.fast_dot(x, A, x) + dot(a, x) + c
end
function grad_f!(storage, x)
mul!(storage, A, x)
storage .+= a
return nothing
end
function g(x)
return 0.5 * FrankWolfe.fast_dot(x, B, x) + dot(b, x) + d
end
function grad_g!(storage, x)
mul!(storage, B, x)
storage .+= b
return nothing
end
# True objective function for verification
# It is not needed by the solver
function phi(x)
return f(x) - g(x)
end
lmo = FrankWolfe.KSparseLMO(5, 1000.0)
x0 = FrankWolfe.compute_extreme_point(lmo, randn(n))
x_final, primal_final, dca_gap_final, iterations, status, traj_data = FrankWolfe.dca_fw(
f,
grad_f!,
g,
grad_g!,
lmo,
copy(x0),
max_iteration=200, # Outer iterations
max_inner_iteration=10000, # Inner iterations
epsilon=1e-5, # Tolerance for DCA gap
line_search=FrankWolfe.Secant(),
verbose=true,
trajectory=true,
verbose_inner=false,
print_iter=10,
use_corrective_fw=true,
use_dca_early_stopping=true,
grad_f_workspace=collect(x0),
grad_g_workspace=collect(x0),
)
res_boost = FrankWolfe.dca_fw(
f,
grad_f!,
g,
grad_g!,
lmo,
copy(x0),
boosted=true,
max_iteration=200, # Outer iterations
max_inner_iteration=10000, # Inner iterations
epsilon=1e-5, # Tolerance for DCA gap
line_search=FrankWolfe.Secant(),
verbose=true,
trajectory=true,
verbose_inner=false,
print_iter=10,
use_corrective_fw=true,
use_dca_early_stopping=true,
grad_f_workspace=collect(x0),
grad_g_workspace=collect(x0),
)(x = [107] = -1000.0
[150] = -1000.0
[155] = 267.802
[234] = 1000.0
[255] = 732.198
[490] = -1000.0, primal = 84959.58577045205, dca_gap = 9.410905477125198e-6, iterations = 103, status = FrankWolfe.STATUS_OPTIMAL, traj_data = Any[(1, 324819.5846517107, 216109.38505388348, 108710.1995978272, 0.00017695), (2, 276258.70870472584, 228201.19975238532, 48057.50895234053, 0.001256135), (3, 248139.6162655822, 216499.97850278427, 31639.637762797942, 0.002014791), (4, 226482.0441226497, 205735.5070242758, 20746.53709837389, 0.002721761), (5, 207550.86397473345, 187387.16013034453, 20163.703844388925, 0.00348192), (6, 188901.70910210384, 175691.2675499674, 13210.441552136439, 0.00413548), (7, 174573.217172755, 160294.79942574678, 14278.417747008203, 0.004842469), (8, 159890.63961896952, 145692.25131401897, 14198.388304950555, 0.00574829), (9, 141851.354244509, 123249.1356349792, 18602.218609529802, 0.006565235), (10, 125434.62793669518, 106648.20802082255, 18786.419915872626, 0.007297222) … (95, 84959.58601507114, 84959.58598744054, 2.7630594559013844e-5, 0.027759529), (96, 84959.5859683425, 84959.58594419247, 2.4150027456926182e-5, 0.02794731), (97, 84959.58592749992, 84959.58590639153, 2.1108397049829364e-5, 0.028137054), (98, 84959.58589180233, 84959.58587335283, 1.8449507479090244e-5, 0.028319304), (99, 84959.58586060128, 84959.58584447602, 1.612525871290531e-5, 0.028500091), (100, 84959.58583333017, 84959.58581923586, 1.4094308426138014e-5, 0.028717297), (101, 84959.58580949472, 84959.58579717611, 1.2318610970396549e-5, 0.029074183), (102, 84959.58578866126, 84959.58577789436, 1.076690386980772e-5, 0.029384302), (103, 84959.58577045205, 84959.58576104115, 9.410905477125198e-6, 0.029630421), (103, 84959.58577045205, 84959.58576104115, 9.410905477125198e-6, 0.029835374)])Plotting the resulting trajectory
We modify the y axis to highlight that we are plotting the DCA gap, not the FW gap
data = [traj_data, res_boost[6].traj_data]
label = ["DCA-FW", "DCA-FW-B"]
p_res = plot_trajectories(data, label, marker_shapes=[:o, :x])
ylabel!(p_res.subplots[3], "DCA gap")display(p_res)This page was generated using Literate.jl.