Skip to content

Commit

Permalink
Merge pull request #19 from axelparmentier/giom
Browse files Browse the repository at this point in the history
Prepare for v0.3.0
  • Loading branch information
gdalle authored Jul 11, 2022
2 parents ddc1f6a + b179f16 commit 693b634
Show file tree
Hide file tree
Showing 11 changed files with 31 additions and 36 deletions.
4 changes: 2 additions & 2 deletions CITATION.bib
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ @misc{InferOpt.jl
author = {Guillaume Dalle, Léo Baty, Louis Bouvier and Axel Parmentier},
title = {InferOpt.jl},
url = {https://github.com/axelparmentier/InferOpt.jl},
version = {v0.2.0},
version = {v0.3.0},
year = {2022},
month = {6}
month = {7}
}
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "InferOpt"
uuid = "4846b161-c94e-4150-8dac-c7ae193c601f"
authors = ["Guillaume Dalle", "Léo Baty", "Louis Bouvier", "Axel Parmentier"]
version = "0.2.0"
version = "0.3.0"

[deps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
Expand Down
2 changes: 1 addition & 1 deletion docs/src/tutorial.md
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ Thanks to this smoothing, we can now train our model with a standard gradient op

````@example tutorial
encoder = deepcopy(initial_encoder)
opt = ADAM();
opt = Adam();
losses = Float64[]
for epoch in 1:200
l = 0.0
Expand Down
7 changes: 4 additions & 3 deletions src/InferOpt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ using StatsBase: StatsBase, sample
using Test

include("utils/probability_distribution.jl")
include("utils/composition.jl")
include("utils/pushforward.jl")

include("interpolation/interpolation.jl")

Expand All @@ -42,8 +42,9 @@ include("ssvm/isbaseloss.jl")
include("ssvm/zeroone_baseloss.jl")
include("ssvm/ssvm_loss.jl")

export FixedAtomsProbabilityDistribution, sample, compute_expectation
export ProbabilisticComposition
export FixedAtomsProbabilityDistribution
export compute_expectation, compress_distribution!
export Pushforward
export compute_probability_distribution

export Interpolation
Expand Down
6 changes: 4 additions & 2 deletions src/utils/probability_distribution.jl
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,13 @@ end
Base.rand(probadist::FixedAtomsProbabilityDistribution) = rand(GLOBAL_RNG, probadist)

"""
compress!(probadist[; atol])
compress_distribution!(probadist[; atol])
Remove duplicated atoms in `probadist` (up to a tolerance on equality).
"""
function compress!(probadist::FixedAtomsProbabilityDistribution{A,W}; atol=0) where {A,W}
function compress_distribution!(
probadist::FixedAtomsProbabilityDistribution{A,W}; atol=0
) where {A,W}
(; atoms, weights) = probadist
to_delete = Int[]
for i in length(probadist):-1:1
Expand Down
18 changes: 8 additions & 10 deletions src/utils/composition.jl → src/utils/pushforward.jl
Original file line number Diff line number Diff line change
@@ -1,24 +1,24 @@
"""
ProbabilisticComposition{L,G}
Pushforward{L,G}
Differentiable composition of a probabilistic `layer` with an arbitrary function `post_processing`.
`ProbabilisticComposition` can be used for direct regret minimization (aka learning by experience) when the post-processing returns a cost.
`Pushforward` can be used for direct regret minimization (aka learning by experience) when the post-processing returns a cost.
# Fields
- `layer::L`: anything that implements `compute_probability_distribution(layer, θ; kwargs...)`
- `post_processing::P`: callable
See also: [`FixedAtomsProbabilityDistribution`](@ref).
"""
struct ProbabilisticComposition{L,P}
struct Pushforward{L,P}
layer::L
post_processing::P
end

function Base.show(io::IO, composition::ProbabilisticComposition)
function Base.show(io::IO, composition::Pushforward)
(; layer, post_processing) = composition
return print(io, "ProbabilisticComposition($layer, $post_processing)")
return print(io, "Pushforward($layer, $post_processing)")
end

"""
Expand All @@ -30,25 +30,23 @@ This function is not differentiable if `composition.post_processing` isn't.
See also: [`apply_on_atoms`](@ref).
"""
function compute_probability_distribution(
composition::ProbabilisticComposition, θ; kwargs...
)
function compute_probability_distribution(composition::Pushforward, θ; kwargs...)
(; layer, post_processing) = composition
probadist = compute_probability_distribution(layer, θ; kwargs...)
post_processed_probadist = apply_on_atoms(post_processing, probadist; kwargs...)
return post_processed_probadist
end

"""
(composition::ProbabilisticComposition)(θ)
(composition::Pushforward)(θ)
Output the expectation of `composition.post_processing(X)`, where `X` follows the distribution defined by `composition.layer` applied to `θ`.
Unlike [`compute_probability_distribution(composition, θ)`](@ref), this function is differentiable, even if `composition.post_processing` isn't.
See also: [`compute_expectation`](@ref).
"""
function (composition::ProbabilisticComposition)(θ::AbstractArray{<:Real}; kwargs...)
function (composition::Pushforward)(θ::AbstractArray{<:Real}; kwargs...)
(; layer, post_processing) = composition
probadist = compute_probability_distribution(layer, θ; kwargs...)
return compute_expectation(probadist, post_processing; kwargs...)
Expand Down
8 changes: 3 additions & 5 deletions test/argmax.jl
Original file line number Diff line number Diff line change
Expand Up @@ -77,21 +77,19 @@ pipelines_experience = [
(
encoder=encoder_factory(),
maximizer=identity,
loss=ProbabilisticComposition(
PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost
),
loss=Pushforward(PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost),
),
(
encoder=encoder_factory(),
maximizer=identity,
loss=ProbabilisticComposition(
loss=Pushforward(
PerturbedMultiplicative(true_maximizer; ε=1.0, nb_samples=10), cost
),
),
(
encoder=encoder_factory(),
maximizer=identity,
loss=ProbabilisticComposition(
loss=Pushforward(
RegularizedGeneric(true_maximizer, half_square_norm, identity), cost
),
),
Expand Down
8 changes: 3 additions & 5 deletions test/paths.jl
Original file line number Diff line number Diff line change
Expand Up @@ -79,21 +79,19 @@ pipelines_experience = [
(
encoder=encoder_factory(),
maximizer=identity,
loss=ProbabilisticComposition(
PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost
),
loss=Pushforward(PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost),
),
(
encoder=encoder_factory(),
maximizer=identity,
loss=ProbabilisticComposition(
loss=Pushforward(
PerturbedMultiplicative(true_maximizer; ε=1.0, nb_samples=10), cost
),
),
(
encoder=encoder_factory(),
maximizer=identity,
loss=ProbabilisticComposition(
loss=Pushforward(
RegularizedGeneric(true_maximizer, half_square_norm, identity), cost
),
),
Expand Down
8 changes: 3 additions & 5 deletions test/ranking.jl
Original file line number Diff line number Diff line change
Expand Up @@ -71,21 +71,19 @@ pipelines_experience = [
(
encoder=encoder_factory(),
maximizer=identity,
loss=ProbabilisticComposition(
PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost
),
loss=Pushforward(PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost),
),
(
encoder=encoder_factory(),
maximizer=identity,
loss=ProbabilisticComposition(
loss=Pushforward(
PerturbedMultiplicative(true_maximizer; ε=1.0, nb_samples=10), cost
),
),
(
encoder=encoder_factory(),
maximizer=identity,
loss=ProbabilisticComposition(
loss=Pushforward(
RegularizedGeneric(true_maximizer, half_square_norm, identity), cost
),
),
Expand Down
2 changes: 1 addition & 1 deletion test/tutorial.jl
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ Thanks to this smoothing, we can now train our model with a standard gradient op
=#

encoder = deepcopy(initial_encoder)
opt = ADAM();
opt = Adam();
losses = Float64[]
for epoch in 1:200
l = 0.0
Expand Down
2 changes: 1 addition & 1 deletion test/utils/pipeline.jl
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ function test_pipeline!(
@info "Testing $setting_name" maximizer loss

## Optimization
opt = ADAM()
opt = Adam()
perf_storage = init_perf()
prog = Progress(epochs; enabled=verbose)

Expand Down

0 comments on commit 693b634

Please sign in to comment.