Ce serveur Gitlab sera éteint le 30 juin 2020, pensez à migrer vos projets vers les serveurs gitlab-research.centralesupelec.fr et gitlab-student.centralesupelec.fr !

Commit 696abb44 authored by Dahua Lin's avatar Dahua Lin

Refactor tests

Original tests in test/distributions.jl has been dispatched to
test/categorical.jl, test/univariate.jl, and test/multivariate.jl.

Contents test/univariate.jl are re-organized to make it easier to
browse and add tests.

Add tests for a variety of vectorized evaluation functions.
parent a99c73b0
tests = [
"distributions",
"utils",
"fit",
"categorical",
"truncate",
"univariate",
"multivariate",
......
......@@ -26,8 +26,9 @@ export # types
Binomial,
Categorical,
Cauchy,
Chi,
Chisq,
# Cosine,
Cosine,
Dirichlet,
DiscreteUniform,
DoubleExponential,
......
immutable Chi <: ContinuousUnivariateDistribution
df::Float64
Chi(df::Real) = new(float64(df))
end
cdf(d::Chi, x::Real) = regularized_gamma(d.df / 2.0, x^2 / 2.0)
......
......@@ -16,7 +16,7 @@ DiscreteUniform() = DiscreteUniform(0, 1)
function cdf(d::DiscreteUniform, k::Real)
if k < d.a
return 0.0
elseif <= d.b
elseif k <= d.b
return (ifloor(k) - d.a + 1.0) / (d.b - d.a + 1.0)
else
return 1.0
......
using Distributions
using Base.Test
d = Categorical([0.25, 0.5, 0.25])
d = Categorical(3)
d = Categorical([0.25, 0.5, 0.25])
@test !insupport(d, 0)
@test insupport(d, 1)
@test insupport(d, 2)
@test insupport(d, 3)
@test !insupport(d, 4)
@test logpmf(d, 1) == log(0.25)
@test pmf(d, 1) == 0.25
@test logpmf(d, 2) == log(0.5)
@test pmf(d, 2) == 0.5
@test logpmf(d, 0) == -Inf
@test pmf(d, 0) == 0.0
@test 1.0 <= rand(d) <= 3.0
A = Array(Int, 10)
rand!(d, A)
@test 1.0 <= mean(A) <= 3.0
# Examples of sample()
a = [1, 6, 19]
p = rand(Dirichlet(3))
x = sample(a, p)
@test x == 1 || x == 6 || x == 19
a = 19.0 * [1.0, 0.0]
x = sample(a)
@test x == 0.0 || x == 19.0
using Distributions
using Base.Test
# n probability points, i.e. the midpoints of the intervals [0, 1/n],...,[1-1/n, 1]
probpts(n::Int) = ((1:n) - 0.5)/n
pp = float(probpts(1000)) # convert from a Range{Float64}
lpp = log(pp)
tol = sqrt(eps())
function absdiff{T<:Real}(current::AbstractArray{T}, target::AbstractArray{T})
@test all(size(current) == size(target))
max(abs(current - target))
end
function reldiff{T<:Real}(current::T, target::T)
abs((current - target)/(bool(target) ? target : 1))
end
function reldiff{T<:Real}(current::AbstractArray{T}, target::AbstractArray{T})
@test all(size(current) == size(target))
max([reldiff(current[i], target[i]) for i in 1:length(target)])
end
## Checks on ContinuousDistribution instances
for d in (Beta(),
Cauchy(),
Chisq(12),
Exponential(),
Exponential(23.1),
FDist(2, 21),
Gamma(3),
Gamma(),
Gumbel(),
Gumbel(5, 3),
Logistic(),
LogNormal(),
Normal(),
TDist(1),
TDist(28),
TruncatedNormal(Normal(0, 1), -3, 3),
# TruncatedNormal(Normal(-100, 1), 0, 1),
TruncatedNormal(Normal(27, 3), 0, Inf),
Uniform(),
Weibull(2.3))
# println(d) # uncomment if an assertion fails
qq = quantile(d, pp)
@test_approx_eq cdf(d, qq) pp
@test_approx_eq ccdf(d, qq) 1 - pp
@test_approx_eq cquantile(d, 1 - pp) qq
@test_approx_eq logpdf(d, qq) log(pdf(d, qq))
@test_approx_eq logcdf(d, qq) lpp
@test_approx_eq logccdf(d, qq) lpp[end:-1:1]
@test_approx_eq invlogcdf(d, lpp) qq
@test_approx_eq invlogccdf(d, lpp) qq[end:-1:1]
end
# Additional tests on the Multinomial and Dirichlet constructors
d = Multinomial(1, [0.5, 0.4, 0.1])
d = Multinomial(1, 3)
d = Multinomial(2)
mean(d)
var(d)
@test insupport(d, [1, 0])
@test !insupport(d, [1, 1])
@test insupport(d, [0, 1])
pmf(d, [1, 0])
pmf(d, [1, 1])
pmf(d, [0, 1])
logpmf(d, [1, 0])
logpmf(d, [1, 1])
logpmf(d, [0, 1])
d = Multinomial(10)
rand(d)
A = Array(Int, 10, 2)
rand!(d, A)
d = Dirichlet([1.0, 2.0, 1.0])
d = Dirichlet(3)
mean(d)
var(d)
insupport(d, [0.1, 0.8, 0.1])
insupport(d, [0.1, 0.8, 0.2])
insupport(d, [0.1, 0.8])
pdf(d, [0.1, 0.8, 0.1])
rand(d)
A = Array(Float64, 3, 10)
rand!(d, A)
d = Categorical([0.25, 0.5, 0.25])
d = Categorical(3)
d = Categorical([0.25, 0.5, 0.25])
@test !insupport(d, 0)
@test insupport(d, 1)
@test insupport(d, 2)
@test insupport(d, 3)
@test !insupport(d, 4)
@test logpmf(d, 1) == log(0.25)
@test pmf(d, 1) == 0.25
@test logpmf(d, 2) == log(0.5)
@test pmf(d, 2) == 0.5
@test logpmf(d, 0) == -Inf
@test pmf(d, 0) == 0.0
@test 1.0 <= rand(d) <= 3.0
A = Array(Int, 10)
rand!(d, A)
@test 1.0 <= mean(A) <= 3.0
# Examples of sample()
a = [1, 6, 19]
p = rand(Dirichlet(3))
x = sample(a, p)
@test x == 1 || x == 6 || x == 19
a = 19.0 * [1.0, 0.0]
x = sample(a)
@test x == 0.0 || x == 19.0
## Link function tests
const ep = eps()
const oneMeps = 1 - ep
srand(1)
etas = (linspace(-7., 7., 15), # equal spacing to asymptotic area
14 * rand(17) - 7, # random sample from wide uniform dist
clamp(rand(Normal(0, 4), 17), -7., 7.), # random sample from wide normal dist
[-7., rand(Normal(0, 4),15), 7.])
## sample linear predictor values for the families in which eta must be positive
etapos = (float64(1:20), rand(Exponential(), 20), rand(Gamma(3), 20), max(ep, rand(Normal(2.), 20)))
## values of mu in the (0,1) interval
mubinom = (rand(100), rand(Beta(1,3), 100),
[ccall((:rbeta, :libRmath), Float64, (Float64,Float64), 0.1, 3) for i in 1:100],
[ccall((:rbeta, :libRmath), Float64, (Float64,Float64), 3, 0.1) for i in 1:100])
#for ll in (LogitLink(), ProbitLink()#, CloglogLink() # edge cases for CloglogLink are tricky
# , CauchitLink())
# println(ll) # Having problems with the edge when eta is very large or very small
# for i in 1:size(etas,1)
# println(i)
# @test all(isapprox(linkfun(ll, clamp(linkinv(ll, etas[i]), realmin(Float64), 1.-eps())), etas[i]))
# end
# for mu in mubinom
# mm = clamp(mu, realmin(), oneMeps)
# @test_approx_eq linkinv(ll, linkfun(ll, mm)) mm
# end
#end
# Multivariate normal
d = MultivariateNormal(zeros(2), eye(2))
@test abs(pdf(d, [0., 0.]) - 0.159155) < 1.0e-5
@test abs(pdf(d, [1., 0.]) - 0.0965324) < 1.0e-5
@test abs(pdf(d, [1., 1.]) - 0.0585498) < 1.0e-5
d = MultivariateNormal(zeros(3), [4. -2. -1.; -2. 5. -1.; -1. -1. 6.])
@test_approx_eq logpdf(d, [3., 4., 5.]) (-15.75539253001834)
x = [3. 4. 5.; 1. 2. 3.; -4. -3. -2.; -1. -3. -2.]'
r0 = zeros(4)
for i = 1 : 4
r0[i] = logpdf(d, x[:,i])
end
@test_approx_eq logpdf(d, x) r0
# Dirichlet
d = Dirichlet([1.5, 2.0, 2.5])
x = [0.2 0.5 0.3; 0.1 0.5 0.4; 0.8 0.1 0.1; 0.05 0.15 0.8]'
r0 = zeros(4)
for i = 1 : 4
r0[i] = logpdf(d, x[:,i])
end
@test_approx_eq logpdf(d, x) r0
# Truncated normal
for d in (TruncatedNormal(Normal(0, 1), -1, 1),
TruncatedNormal(Normal(3, 10), 7, 8),
TruncatedNormal(Normal(-5, 1), -Inf, -10))
@test all(insupport(d, rand(d, 1000)))
end
......@@ -69,3 +69,72 @@ for d in [Dirichlet([100.0, 17.0, 31.0, 45.0]),
# TODO: Test kurtosis
# TODO: Test skewness
end
#####
#
# Specialized testings
#
#####
# Multinomial
d = Multinomial(1, [0.5, 0.4, 0.1])
d = Multinomial(1, 3)
d = Multinomial(2)
mean(d)
var(d)
@test insupport(d, [1, 0])
@test !insupport(d, [1, 1])
@test insupport(d, [0, 1])
pmf(d, [1, 0])
pmf(d, [1, 1])
pmf(d, [0, 1])
logpmf(d, [1, 0])
logpmf(d, [1, 1])
logpmf(d, [0, 1])
d = Multinomial(10)
rand(d)
A = Array(Int, 10, 2)
rand!(d, A)
# Dirichlet
d = Dirichlet([1.0, 2.0, 1.0])
d = Dirichlet(3)
mean(d)
var(d)
insupport(d, [0.1, 0.8, 0.1])
insupport(d, [0.1, 0.8, 0.2])
insupport(d, [0.1, 0.8])
pdf(d, [0.1, 0.8, 0.1])
rand(d)
A = Array(Float64, 3, 10)
rand!(d, A)
d = Dirichlet([1.5, 2.0, 2.5])
x = [0.2 0.5 0.3; 0.1 0.5 0.4; 0.8 0.1 0.1; 0.05 0.15 0.8]'
r0 = zeros(4)
for i = 1 : 4
r0[i] = logpdf(d, x[:,i])
end
@test_approx_eq logpdf(d, x) r0
# MultivariateNormal
d = MultivariateNormal(zeros(2), eye(2))
@test abs(pdf(d, [0., 0.]) - 0.159155) < 1.0e-5
@test abs(pdf(d, [1., 0.]) - 0.0965324) < 1.0e-5
@test abs(pdf(d, [1., 1.]) - 0.0585498) < 1.0e-5
d = MultivariateNormal(zeros(3), [4. -2. -1.; -2. 5. -1.; -1. -1. 6.])
@test_approx_eq logpdf(d, [3., 4., 5.]) (-15.75539253001834)
x = [3. 4. 5.; 1. 2. 3.; -4. -3. -2.; -1. -3. -2.]'
r0 = zeros(4)
for i = 1 : 4
r0[i] = logpdf(d, x[:,i])
end
@test_approx_eq logpdf(d, x) r0
using Distributions
using Base.Test
for d in (TruncatedNormal(Normal(0, 1), -1, 1),
TruncatedNormal(Normal(3, 10), 7, 8),
TruncatedNormal(Normal(-5, 1), -Inf, -10))
@test all(insupport(d, rand(d, 1000)))
end
d = TruncatedNormal(Normal(0, 1), -0.1, +0.1)
@test pdf(d, 0.0) > pdf(Normal(0, 1), 0.0)
......@@ -24,3 +30,5 @@ d = TruncatedNormal(Normal(0, 1), -0.1, +0.1)
@test abs(cdf(d, quantile(d, 0.01)) - 0.01) < 1e-8
@test abs(cdf(d, quantile(d, 0.50)) - 0.50) < 1e-8
@test abs(cdf(d, quantile(d, 0.99)) - 0.99) < 1e-8
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment