Le serveur gitlab sera inaccessible le mercredi 19 février 2020 de 13h à 14h pour une intervention de maintenance programmée.

Commit 4fb16b1c by Dahua Lin

### change +/- between arrays and numbers to .+/.-

parent 1e54508c
 ... ... @@ -159,7 +159,7 @@ function logpdf!{T <: Real}(r::AbstractArray, d::Dirichlet, x::Matrix{T}) end b::Float64 = d.lmnB Base.LinAlg.BLAS.gemv!('T', 1.0, log(x), d.alpha - 1.0, 0.0, r) Base.LinAlg.BLAS.gemv!('T', 1.0, log(x), d.alpha .- 1.0, 0.0, r) # At_mul_B(r, log(x), d.alpha - 1.0) for i in 1:n r[i] -= b ... ...
 ... ... @@ -10,7 +10,7 @@ immutable VonMisesFisher <: ContinuousMultivariateDistribution kappa::Float64 function VonMisesFisher{T <: Real}(mu::Vector{T}, kappa::Float64) mu = mu / norm(mu) mu = mu ./ norm(mu) if kappa < 0 throw(ArgumentError("kappa must be a nonnegative real number.")) end ... ... @@ -56,7 +56,7 @@ function randvonMisesFisher(n, kappa, mu) w = rW(n, kappa, m) v = rand(MvNormal(zeros(m-1), eye(m-1)), n) v = normalize(v',2,2) r = sqrt(1 - w .^ 2) r = sqrt(1.0 .- w .^ 2) for j = 1:size(v,2) v[:,j] = v[:,j] .* r; end x = hcat(v, w) mu = mu / norm(mu) ... ...
 ... ... @@ -11,11 +11,7 @@ X = reshape(Float64[1:12], p, n) w = rand(n) Xw = X * diagm(w) # Convoluted way to put 1's on diag Sigma = eye(p) Sigma += 0.25 Sigma -= 0.25*eye(p) Sigma = 0.75 * eye(p) + fill(0.25, 4, 4) ss = suffstats(MvNormalKnownSigma(Sigma), X) ssw = suffstats(MvNormalKnownSigma(Sigma), X, w) ... ...
 ... ... @@ -47,7 +47,7 @@ x = rand(Normal(2.0, 3.0), n) p = posterior((2.0, pri), Normal, x) @test isa(p, InverseGamma) @test_approx_eq p.shape pri.shape + n / 2 @test_approx_eq p.scale pri.scale + sum(abs2(x - 2.0)) / 2 @test_approx_eq p.scale pri.scale + sum(abs2(x .- 2.0)) / 2 r = posterior_mode((2.0, pri), Normal, x) @test_approx_eq r mode(p) ... ... @@ -60,7 +60,7 @@ f = fit_map((2.0, pri), Normal, x) p = posterior((2.0, pri), Normal, x, w) @test isa(p, InverseGamma) @test_approx_eq p.shape pri.shape + sum(w) / 2 @test_approx_eq p.scale pri.scale + dot(w, abs2(x - 2.0)) / 2 @test_approx_eq p.scale pri.scale + dot(w, abs2(x .- 2.0)) / 2 r = posterior_mode((2.0, pri), Normal, x, w) @test_approx_eq r mode(p) ... ... @@ -79,7 +79,7 @@ x = rand(Normal(2.0, 3.0), n) p = posterior((2.0, pri), Normal, x) @test isa(p, Gamma) @test_approx_eq p.shape pri.shape + n / 2 @test_approx_eq p.scale pri.scale + sum(abs2(x - 2.0)) / 2 @test_approx_eq p.scale pri.scale + sum(abs2(x .- 2.0)) / 2 r = posterior_mode((2.0, pri), Normal, x) @test_approx_eq r mode(p) ... ... @@ -92,7 +92,7 @@ f = fit_map((2.0, pri), Normal, x) p = posterior((2.0, pri), Normal, x, w) @test isa(p, Gamma) @test_approx_eq p.shape pri.shape + sum(w) / 2 @test_approx_eq p.scale pri.scale + dot(w, abs2(x - 2.0)) / 2 @test_approx_eq p.scale pri.scale + dot(w, abs2(x .- 2.0)) / 2 r = posterior_mode((2.0, pri), Normal, x, w) @test_approx_eq r mode(p) ... ...
 ... ... @@ -138,11 +138,11 @@ for d in [ xf = float64(x) xmean = dot(p, xf) xvar = dot(p, abs2(xf - xmean)) xvar = dot(p, abs2(xf .- xmean)) xstd = sqrt(xvar) xentropy = NumericExtensions.entropy(p) xskew = dot(p, (xf - xmean).^3) / (xstd.^3) xkurt = dot(p, (xf - xmean).^4) / (xvar.^2) - 3.0 xskew = dot(p, (xf .- xmean).^3) / (xstd.^3) xkurt = dot(p, (xf .- xmean).^4) / (xvar.^2) - 3.0 @test_approx_eq mean(d) xmean @test_approx_eq var(d) xvar ... ...
 ... ... @@ -165,25 +165,25 @@ ss = suffstats(Normal, x) @test isa(ss, Distributions.NormalStats) @test_approx_eq ss.s sum(x) @test_approx_eq ss.m mean(x) @test_approx_eq ss.s2 sum((x - ss.m).^2) @test_approx_eq ss.s2 sum((x .- ss.m).^2) @test_approx_eq ss.tw n0 ss = suffstats(Normal, x, w) @test isa(ss, Distributions.NormalStats) @test_approx_eq ss.s dot(x, w) @test_approx_eq ss.m dot(x, w) / sum(w) @test_approx_eq ss.s2 dot((x - ss.m).^2, w) @test_approx_eq ss.s2 dot((x .- ss.m).^2, w) @test_approx_eq ss.tw sum(w) d = fit(Normal, x) @test isa(d, Normal) @test_approx_eq d.μ mean(x) @test_approx_eq d.σ sqrt(mean((x - d.μ).^2)) @test_approx_eq d.σ sqrt(mean((x .- d.μ).^2)) d = fit(Normal, x, w) @test isa(d, Normal) @test_approx_eq d.μ dot(x, w) / sum(w) @test_approx_eq d.σ sqrt(dot((x - d.μ).^2, w) / sum(w)) @test_approx_eq d.σ sqrt(dot((x .- d.μ).^2, w) / sum(w)) d = fit(Normal, rand(Normal(μ, σ), N)) @test isa(d, Normal) ... ... @@ -195,24 +195,24 @@ import Distributions.NormalKnownMu, Distributions.NormalKnownSigma ss = suffstats(NormalKnownMu(μ), x) @test isa(ss, Distributions.NormalKnownMuStats) @test ss.μ == μ @test_approx_eq ss.s2 sum((x - μ).^2) @test_approx_eq ss.s2 sum((x .- μ).^2) @test_approx_eq ss.tw n0 ss = suffstats(NormalKnownMu(μ), x, w) @test isa(ss, Distributions.NormalKnownMuStats) @test ss.μ == μ @test_approx_eq ss.s2 dot((x - μ).^2, w) @test_approx_eq ss.s2 dot((x .- μ).^2, w) @test_approx_eq ss.tw sum(w) d = fit_mle(Normal, x; mu=μ) @test isa(d, Normal) @test d.μ == μ @test_approx_eq d.σ sqrt(mean((x - d.μ).^2)) @test_approx_eq d.σ sqrt(mean((x .- d.μ).^2)) d = fit_mle(Normal, x, w; mu=μ) @test isa(d, Normal) @test d.μ == μ @test_approx_eq d.σ sqrt(dot((x - d.μ).^2, w) / sum(w)) @test_approx_eq d.σ sqrt(dot((x .- d.μ).^2, w) / sum(w)) ss = suffstats(NormalKnownSigma(σ), x) ... ...
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!