Ce serveur Gitlab sera éteint le 30 juin 2020, pensez à migrer vos projets vers les serveurs gitlab-research.centralesupelec.fr et gitlab-student.centralesupelec.fr !

Commit 9a94b804 authored by Nick Foti's avatar Nick Foti

Conjugate normal distributions for random mean and (co)variance. Also added

`posterior_sample` to update an observation model from a posterior sample.
parent 748ddb47
......@@ -14,7 +14,12 @@ tests = [
"kolmogorov",
"edgeworth",
"matrix",
"vonmisesfisher"]
"vonmisesfisher",
"conjugate-normal/normalgamma",
"conjugate-normal/normalinversegamma",
"conjugate-normal/normalwishart",
"conjugate-normal/normalinversewishart",
"conjugate-normal/normalknowncov"]
println("Running tests:")
......
......@@ -70,6 +70,10 @@ export
NoncentralF,
NoncentralT,
Normal,
NormalGamma,
NormalInverseGamma,
NormalInverseWishart,
NormalWishart,
Pareto,
Poisson,
Rayleigh,
......@@ -119,6 +123,7 @@ export
posterior_rand, # draw samples from the posterior distribution
posterior_rand!,
posterior_make, # create a distribution/model from params obtained from posterior
posterior_sample,
scale, # scale parameter of a distribution
rate, # rate parameter of a distribution
sqmahal, # squared Mahalanobis distance to Gaussian center
......@@ -273,7 +278,13 @@ include("kde.jl")
# Expectations, entropy, KL divergence
include("functionals.jl")
# Posteriors and conjugate priors
include("conjugates.jl")
include(joinpath("conjugate-normal", "normalgamma.jl"))
include(joinpath("conjugate-normal", "normalinversegamma.jl"))
include(joinpath("conjugate-normal", "normalwishart.jl"))
include(joinpath("conjugate-normal", "normalinversewishart.jl"))
include(joinpath("conjugate-normal", "normalknowncov.jl"))
include("qq.jl")
......
# Used "Conjugate Bayesian analysis of the Gaussian distribution" by Murphy as
# a reference. Note that there were some typos in that document so the code
# here may not correspond exactly.
immutable NormalGamma <: Distribution
mu::Float64
nu::Float64 # scales precision of Normal
shape::Float64
rate::Float64
function NormalGamma(mu::Real, nu::Real, sh::Real, r::Real)
nu > zero(nu) && sh > zero(sh) && r > zero(r) || error("Both shape and scale must be positive")
new(mu, nu, sh, r)
end
end
mu(d::NormalGamma) = d.mu
nu(d::NormalGamma) = d.nu
shape(d::NormalGamma) = d.shape
scale(d::NormalGamma) = 1. / d.rate
rate(d::NormalGamma) = d.rate
insupport(::Type{NormalGamma}, x::Real, tau2::Real) =
isfinite(x) && zero(tau2) <= tau2 < Inf
# Probably should guard agains dividing by and taking the log of 0.
function pdf(d::NormalGamma, x::Real, tau2::Real)
Zinv = d.rate.^d.shape / gamma(d.shape) * sqrt(d.nu / (2.*pi))
return Zinv * tau2.^(d.shape-0.5) * exp(-0.5*tau2*(d.nu*(x-d.mu).^2 + 2.*d.rate))
end
function logpdf(d::NormalGamma, x::Real, tau2::Real)
lZinv = d.shape*log(d.rate) - lgamma(d.shape) + 0.5*(log(d.nu) - log(2.*pi))
return lZinv + (d.shape-0.5)*log(tau2) - 0.5*tau2*(d.nu*(x-d.mu).^2 + 2*d.rate)
end
function rand(d::NormalGamma)
# Guard against invalid precisions
tau2 = rand(Gamma(d.shape, d.rate))
if tau2 <= zero(Float64)
tau2 = eps(Float64)
end
mu = rand(Normal(d.mu, 1./(tau2*d.nu)))
return mu, tau2
end
function posterior(prior::NormalGamma, ss::NormalStats)
mu0 = prior.mu
nu0 = prior.nu
shape0 = prior.shape
rate0 = prior.rate
# ss.tw contains the number of observations if weight wasn't used to
# compute the sufficient statistics.
nu = nu0 + ss.tw
mu = (nu0*mu0 + ss.s) / nu
shape = shape0 + 0.5*ss.tw
rate = rate0 + 0.5*ss.s2 + 0.5*nu0/nu*ss.tw*(ss.m-mu0).^2
return NormalGamma(mu, nu, shape, rate)
end
function posterior{T<:Real}(prior::NormalGamma, ::Type{Normal}, x::Array{T})
return posterior(prior, suffstats(Normal, x))
end
function posterior{T<:Real}(prior::NormalGamma, ::Type{Normal}, x::Array{T}, w::Array{Float64})
return posterior(prior, suffstats(Normal, x, w))
end
function posterior_sample{T<:Real}(prior::NormalGamma, ::Type{Normal}, x::Array{T})
mu, tau2 = rand(posterior(prior, suffstats(Normal, x)))
return Normal(mu, 1./sqrt(tau2))
end
function posterior_sample{T<:Real}(prior::NormalGamma, ::Type{Normal}, x::Array{T}, w::Array{Float64})
mu, tau2 = rand(posterior(prior, suffstats(Normal, x, w)))
return Normal(mu, 1./sqrt(tau2))
end
# Used "Conjugate Bayesian analysis of the Gaussian distribution" by Murphy as
# a reference. Note that there were some typos in that document so the code
# here may not correspond exactly.
immutable NormalInverseGamma <: Distribution
mu::Float64
v0::Float64 # scales variance of Normal
shape::Float64
scale::Float64
function NormalInverseGamma(mu::Real, v0::Real, sh::Real, r::Real)
v0 > zero(v0) && sh > zero(sh) && r > zero(r) || error("Both shape and scale must be positive")
new(mu, v0, sh, r)
end
end
mu(d::NormalInverseGamma) = d.mu
v0(d::NormalInverseGamma) = d.v0
shape(d::NormalInverseGamma) = d.shape
scale(d::NormalInverseGamma) = d.scale
rate(d::NormalInverseGamma) = 1. / d.scale
insupport(::Type{NormalInverseGamma}, x::Real, sig2::Real) =
isfinite(x) && zero(sig2) <= sig2 < Inf
# Probably should guard agains dividing by and taking the log of 0.
function pdf(d::NormalInverseGamma, x::Real, sig2::Real)
Zinv = d.scale.^d.shape / gamma(d.shape) / sqrt(d.v0 * 2.*pi)
return Zinv * 1./(sqrt(sig2)*sig2.^(d.shape+1.)) * exp(-d.scale/sig2 - 0.5/(sig2*d.v0)*(x-d.mu).^2)
end
function logpdf(d::NormalInverseGamma, x::Real, sig2::Real)
lZinv = d.shape*log(d.scale) - lgamma(d.shape) - 0.5*(log(d.v0) + log(2pi))
return lZinv - 0.5*log(sig2) - (d.shape+1.)*log(sig2) - d.scale/sig2 - 0.5/(sig2*d.v0)*(x-d.mu).^2
end
function rand(d::NormalInverseGamma)
# Guard against invalid precisions
sig2 = rand(InverseGamma(d.shape, d.scale))
if sig2 <= zero(Float64)
sig2 = eps(Float64)
end
mu = rand(Normal(d.mu, sig2*d.v0))
return mu, sig2
end
function posterior(prior::NormalInverseGamma, ss::NormalStats)
mu0 = prior.mu
v0 = prior.v0
shape0 = prior.shape
scale0 = prior.scale
# ss.tw contains the number of observations if weight wasn't used to
# compute the sufficient statistics.
vn_inv = 1./v0 + ss.tw
mu = (mu0/v0 + ss.s) / vn_inv # ss.s = ss.tw*ss.m = n*xbar
shape = shape0 + 0.5*ss.tw
scale = scale0 + 0.5*ss.s2 + 0.5/(vn_inv*v0)*ss.tw*(ss.m-mu0).^2
return NormalInverseGamma(mu, 1./vn_inv, shape, scale)
end
function posterior{T<:Real}(prior::NormalInverseGamma, ::Type{Normal}, x::Array{T})
return posterior(prior, suffstats(Normal, x))
end
function posterior{T<:Real}(prior::NormalInverseGamma, ::Type{Normal}, x::Array{T}, w::Array{Float64})
return posterior(prior, suffstats(Normal, x, w))
end
function posterior_sample{T<:Real}(prior::NormalInverseGamma, ::Type{Normal}, x::Array{T})
mu, sig2 = rand(posterior(prior, suffstats(Normal, x)))
return Normal(mu, sqrt(sig2))
end
function posterior_sample{T<:Real}(prior::NormalInverseGamma, ::Type{Normal}, x::Array{T}, w::Array{Float64})
mu, sig2 = rand(posterior(prior, suffstats(Normal, x, w)))
return Normal(mu, sqrt(sig2))
end
# Used "Conjugate Bayesian analysis of the Gaussian distribution" by Murphy as
# a reference. Note that there were some typos in that document so the code
# here may not correspond exactly.
import NumericExtensions.PDMat
import NumericExtensions.invquad
immutable NormalInverseWishart <: Distribution
dim::Int
zeromean::Bool
mu::Vector{Float64}
kappa::Float64 # This scales precision (inverse covariance)
Lamchol::Cholesky{Float64} # Covariance matrix (well, sqrt of one)
nu::Float64
function NormalInverseWishart(mu::Vector{Float64}, kappa::Real,
Lamchol::Cholesky{Float64}, nu::Real)
# Probably should put some error checking in here
d = length(mu)
zmean::Bool = true
for i = 1:d
if mu[i] != 0.
zmean = false
break
end
end
new(d, zmean, mu, float64(kappa), Lamchol, float64(nu))
end
end
function NormalInverseWishart(mu::Vector{Float64}, kappa::Real,
Lambda::Matrix{Float64}, nu::Real)
NormalInverseWishart(mu, kappa, cholfact(Lambda), nu)
end
function insupport(::Type{NormalInverseWishart}, x::Vector{Float64}, Sig::Matrix{Float64})
return (all(isfinite(x)) &&
size(Sig, 1) == size(Sig, 2) &&
isApproxSymmmetric(Sig) &&
size(Sig, 1) == length(x) &&
hasCholesky(Sig))
end
pdf(niw::NormalInverseWishart, x::Vector{Float64}, Sig::Matrix{Float64}) =
exp(logpdf(niw, x, Sig))
function logpdf(niw::NormalInverseWishart, x::Vector{Float64}, Sig::Matrix{Float64})
if !insupport(NormalInverseWishart, x, Sig)
return -Inf
else
p = size(x, 1)
nu = niw.nu
kappa = niw.kappa
mu = niw.mu
Lamchol = niw.Lamchol
hnu = 0.5 * nu
hp = 0.5 * p
# Normalization
logp::Float64 = hnu * logdet(Lamchol)
logp -= hnu * p * log(2.)
logp -= lpgamma(p, hnu)
logp -= hp * (log(2.*pi) - log(kappa))
# Inverse-Wishart
logp -= (hnu + hp + 1.) * logdet(Sig)
logp -= 0.5 * trace(Sig \ (Lamchol[:U]' * Lamchol[:U]))
# Normal
z = niw.zeromean ? x : x - mu
logp -= 0.5 * kappa * invquad(PDMat(Sig), z)
return logp
end
end
function rand(niw::NormalInverseWishart)
Sig = rand(InverseWishart(niw.nu, niw.Lamchol))
mu = rand(MvNormal(niw.mu, Sig ./ niw.kappa))
return (mu, Sig)
end
function posterior(prior::NormalInverseWishart, ss::MvNormalStats)
mu0 = prior.mu
kappa0 = prior.kappa
LamC0 = prior.Lamchol
nu0 = prior.nu
kappa = kappa0 + ss.tw
mu = (kappa0.*mu0 + ss.s) ./ kappa
nu = nu0 + ss.tw
Lam0 = LamC0[:U]'*LamC0[:U]
z = prior.zeromean ? ss.m : ss.m - mu0
Lam = Lam0 + ss.s2 + kappa0*ss.tw/kappa*(z*z')
return NormalInverseWishart(mu, kappa, cholfact(Lam), nu)
end
function posterior{T<:Real}(prior::NormalInverseWishart, ::Type{MvNormal}, X::Matrix{T})
return posterior(prior, suffstats(MvNormal, X))
end
function posterior{T<:Real}(prior::NormalInverseWishart, ::Type{MvNormal}, X::Matrix{T}, w::Array{Float64})
return posterior(prior, suffstats(MvNormal, X, w))
end
function posterior_rand{T<:Real}(prior::NormalInverseWishart, ::Type{MvNormal}, X::Matrix{T})
return rand(posterior(prior, suffstats(MvNormal, X)))
end
function posterior_rand{T<:Real}(prior::NormalInverseWishart, ::Type{MvNormal}, x::Matrix{T}, w::Array{Float64})
return rand(posterior(prior, suffstats(MvNormal, X, w)))
end
function posterior_sample{T<:Real}(prior::NormalInverseWishart, ::Type{MvNormal}, X::Matrix{T})
mu, Sig = rand(posterior(prior, suffstats(MvNormal, X)))
return MvNormal(mu, Sig)
end
function posterior_sample{T<:Real}(prior::NormalInverseWishart, ::Type{MvNormal}, X::Matrix{T}, w::Array{Float64})
mu, Sig = rand(posterior(prior, suffstats(MvNormal, X, w)))
return Normal(mu, Sig)
end
# Used "Conjugate Bayesian analysis of the Gaussian distribution" by Murphy as
# a reference. Note that there were some typos in that document so the code
# here may not correspond exactly.
import NumericExtensions.sum
immutable MvNormalKnownSigma <: GenerativeFormulation
Sigma::Matrix{Float64}
function MvNormalKnownSigma(S::Matrix{Float64})
# TODO: Error checking, maybe
new(S)
end
end
immutable MvNormalKnownSigmaStats
Sigma::Matrix{Float64} # known covariance
s::Vector{Float64} # (weighted) sum of x
tw::Float64 # total sample weight
function MvNormalKnownSigmaStats(S::Matrix{Float64},
s::Vector{Float64},
tw::Float64)
new(S, s, float64(tw))
end
end
function suffstats{T<:Real}(g::MvNormalKnownSigma, X::Matrix{T})
d, n = size(X)
s = X[:,1]
for j in 2:n
for i in 1:d
@inbounds s[i] += X[i,j]
end
end
MvNormalKnownSigmaStats(g.Sigma, s, float64(n))
end
function suffstats{T<:Real}(g::MvNormalKnownSigma, X::Matrix{T}, w::Array{T})
d, n = size(X)
# Could use NumericExtensions or BLAS
tw = w[1]
s = w[1] .* X[:,1]
for j in 2:n
@inbounds wj = w[j]
for i in 1:d
@inbounds s[i] += wj * X[i,j]
end
tw += wj
end
MvNormalKnownSigmaStats(g.Sigma, s, tw)
end
function posterior(prior::MultivariateNormal, ss::MvNormalKnownSigmaStats)
Sigma0inv = inv(prior.Σ)
mu0 = prior.μ
Sigmainv = inv(ss.Sigma)
SigmaN = inv(Sigma0inv + ss.tw*Sigmainv)
muN = SigmaN*(Sigmainv*ss.s + Sigma0inv*mu0)
return MultivariateNormal(muN, SigmaN)
end
function posterior{T<:Real}(prior::(MultivariateNormal, Matrix{Float64}), ::Type{MultivariateNormal}, X::Matrix{T})
pri_μ::MultivariateNormal, Σ::Matrix{Float64} = prior
posterior(pri_μ, suffstats(MvNormalKnownSigma(Σ), X))
end
function posterior{T<:Real}(prior::(MultivariateNormal, Matrix{Float64}), ::Type{MultivariateNormal}, X::Matrix{T}, w::Array{Float64})
pri_μ::MultivariateNormal, Σ::Matrix{Float64} = prior
posterior(pri_μ, suffstats(MvNormalKnownSigma(Σ), X, w))
end
function posterior_sample{T<:Real}(prior::(MultivariateNormal, Matrix{Float64}), ::Type{MultivariateNormal}, X::Matrix{T})
mu = rand(posterior(prior[1], suffstats(MvNormalKnownSigma(prior[2]), X)))
return MultivariateNormal(mu, prior[2])
end
function posterior_sample{T<:Real}(prior::(MultivariateNormal, Matrix{Float64}), ::Type{MultivariateNormal}, X::Matrix{T}, w::Array{Float64})
mu = rand(posterior(prior[1], suffstats(MvNormalKnownSigma(prior[2]), X, w)))
return MultivariateNormal(mu, prior[2])
end
# TODO: The non-conjugate normal prior on mean and inverse-gamma/Wishart prior
# on (co)variance.
#
# NormalInverseGammaSemiConjugate: p(\mu, \sigma) = N(\mu | m_0, V_0)IG(\sigma| a0, b0)
# NormalInverseWishartSemiConjugate: p(\mu,\Sigma) = N(\mu | m_0, V_0)IW(\Sigma|S_0,\nu_0)
# Used "Conjugate Bayesian analysis of the Gaussian distribution" by Murphy as
# a reference. Note that there were some typos in that document so the code
# here may not correspond exactly.
immutable NormalWishart <: Distribution
dim::Int
zeromean::Bool
mu::Vector{Float64}
kappa::Float64
Tchol::Cholesky{Float64} # Precision matrix (well, sqrt of one)
nu::Float64
function NormalWishart(mu::Vector{Float64}, kappa::Real,
Tchol::Cholesky{Float64}, nu::Real)
# Probably should put some error checking in here
d = length(mu)
zmean::Bool = true
for i = 1:d
if mu[i] != 0.
zmean = false
break
end
end
new(d, zmean, mu, float64(kappa), Tchol, float64(nu))
end
end
function NormalWishart(mu::Vector{Float64}, kappa::Real,
T::Matrix{Float64}, nu::Real)
NormalWishart(mu, kappa, cholfact(T), nu)
end
function insupport(::Type{NormalWishart}, x::Vector{Float64}, Lam::Matrix{Float64})
return (all(isfinite(x)) &&
size(Lam, 1) == size(Lam, 2) &&
isApproxSymmmetric(Lam) &&
size(Lam, 1) == length(x) &&
hasCholesky(Lam))
end
pdf(nw::NormalWishart, x::Vector{Float64}, Lam::Matrix{Float64}) =
exp(logpdf(nw, x, Lam))
function logpdf(nw::NormalWishart, x::Vector{Float64}, Lam::Matrix{Float64})
if !insupport(NormalWishart, x, Lam)
return -Inf
else
p = length(x)
nu = nw.nu
kappa = nw.kappa
mu = nw.mu
Tchol = nw.Tchol
hnu = 0.5 * nu
hp = 0.5 * p
# Normalization
logp::Float64 = hp*(log(kappa) - float64(log2π))
logp -= hnu * logdet(Tchol)
logp -= hnu * p * log(2.)
logp -= lpgamma(p, hnu)
# Wishart (MvNormal contributes 0.5 as well)
logp += (hnu - hp) * logdet(Lam)
logp -= 0.5 * trace(Tchol \ Lam)
# Normal
z = nw.zeromean ? x : x - mu
logp -= 0.5 * kappa * dot(z, Lam * z)
return logp
end
end
function rand(nw::NormalWishart)
Lam = rand(Wishart(nw.nu, nw.Tchol))
mu = rand(MvNormal(nw.mu, inv(Lam) ./ nw.kappa))
return (mu, Lam)
end
function posterior(prior::NormalWishart, ss::MvNormalStats)
mu0 = prior.mu
kappa0 = prior.kappa
TC0 = prior.Tchol
nu0 = prior.nu
kappa = kappa0 + ss.tw
nu = nu0 + ss.tw
mu = (kappa0.*mu0 + ss.s) ./ kappa
Lam0 = TC0[:U]'*TC0[:U]
z = prior.zeromean ? ss.m : ss.m - mu0
Lam = Lam0 + ss.s2 + kappa0*ss.tw/kappa*(z*z')
return NormalWishart(mu, kappa, cholfact(Lam), nu)
end
function posterior{T<:Real}(prior::NormalWishart, ::Type{MvNormal}, X::Matrix{T})
return posterior(prior, suffstats(MvNormal, X))
end
function posterior{T<:Real}(prior::NormalWishart, ::Type{MvNormal}, X::Matrix{T}, w::Array{Float64})
return posterior(prior, suffstats(MvNormal, X, w))
end
function posterior_rand{T<:Real}(prior::NormalWishart, ::Type{MvNormal}, X::Matrix{T})
return rand(posterior(prior, suffstats(MvNormal, X)))
end
function posterior_rand{T<:Real}(prior::NormalWishart, ::Type{MvNormal}, x::Matrix{T}, w::Array{Float64})
return rand(posterior(prior, suffstats(MvNormal, X, w)))
end
# These are obviously less efficient than using an inverse-Wishart b/c of the
# extra inv.
function posterior_sample{T<:Real}(prior::NormalWishart, ::Type{MvNormal}, X::Matrix{T})
mu, Lam = rand(posterior(prior, suffstats(MvNormal, X)))
return MvNormal(mu, inv(Lam))
end
function posterior_sample{T<:Real}(prior::NormalWishart, ::Type{MvNormal}, X::Matrix{T}, w::Array{Float64})
mu, Lam = rand(posterior(prior, suffstats(MvNormal, X, w)))
return MvNormal(mu, inv(Lam))
end
......@@ -144,11 +144,24 @@ function posterior(prior::Normal, ss::NormalKnownSigmaStats)
return Normal(μ1, σ1)
end
function posterior(prior::InverseGamma, ss::NormalKnownMuStats)
α1 = prior.shape + 0.5*ss.tw
β1 = rate(prior) + 0.5*ss.s2
return InverseGamma(α1, 1.0 / β1)
end
function posterior(prior::Gamma, ss::NormalKnownMuStats)
a = prior.shape + 0.5*ss.tw
b = rate(prior) + 0.5*ss.s2
return Gamma(a, 1. / b)
end
function posterior{T<:Real}(prior::(Normal, Float64), ::Type{Normal}, x::Array{T})
pri_μ::Normal, σ::Float64 = prior
posterior(pri_μ, suffstats(NormalKnownSigma(σ), x))
end
function posterior{T<:Real}(prior::(Normal, Float64), ::Type{Normal}, x::Array{T}, w::Array{Float64})
pri_μ::Normal, σ::Float64 = prior
posterior(pri_μ, suffstats(NormalKnownSigma(σ), x, w))
......@@ -204,3 +217,47 @@ function posterior{T<:Real}(prior::(Float64, InverseGamma), ::Type{Normal}, x::A
posterior(pri_σ, suffstats(NormalKnownMu(μ), x, w))
end
function posterior{T<:Real}(prior::(Float64, Gamma), ::Type{Normal}, x::Array{T})
μ::Float64 = prior[1]
pri_tau::Gamma = prior[2]
posterior(pri_tau, suffstats(NormalKnownMu(μ), x))
end
function posterior{T<:Real}(prior::(Float64, Gamma), ::Type{Normal}, x::Array{T}, w::Array{Float64})
μ::Float64 = prior[1]
pri_tau::Gamma = prior[2]
posterior(pri_tau, suffstats(NormalKnownMu(μ), x, w))
end
# The NormalInverseGamma version is in normalinversegamma.jl
function posterior_sample{T<:Real}(prior::(Normal, Float64), ::Type{Normal}, x::Array{T})
mu = rand(posterior(prior, suffstats(Normal, x)))
return Normal(mu, prior[2])
end
function posterior_sample{T<:Real}(prior::(Normal, Float64), ::Type{Normal}, x::Array{T}, w::Array{Float64})
mu = rand(posterior(prior, suffstats(Normal, x, w)))
return Normal(mu, prior[2])
end
function posterior_sample{T<:Real}(prior::(Float64, InverseGamma), ::Type{Normal}, x::Array{T})
sig2 = rand(posterior(prior, suffstats(Normal, x)))
return Normal(prior[1], sqrt(sig2))
end
function posterior_sample{T<:Real}(prior::(Float64, InverseGamma), ::Type{Normal}, x::Array{T}, w::Array{Float64})
sig2 = rand(posterior(prior, suffstats(Normal, x, w)))
return Normal(prior[1], sqrt(sig2))
end
function posterior_sample{T<:Real}(prior::(Float64, Gamma), ::Type{Normal}, x::Array{T})
tau2 = rand(posterior(prior, suffstats(Normal, x)))
return Normal(prior[1], sqrt(1./tau2))
end
function posterior_sample{T<:Real}(prior::(Float64, Gamma), ::Type{Normal}, x::Array{T}, w::Array{Float64})
tau2 = rand(posterior(prior, suffstats(Normal, x, w)))
return Normal(prior[1], 1./sqrt(tau2))
end
......@@ -291,3 +291,6 @@ posterior_make{D<:Distribution}(::Type{D}, θ) = D(θ)
fit_map{D<:Distribution}(pri::Distribution, ::Type{D}, x) = posterior_make(D, posterior_mode(pri, D, x))
fit_map{D<:Distribution}(pri::Distribution, ::Type{D}, x, w) = posterior_make(D, posterior_mode(pri, D, x, w))
posterior_sample{D<:Distribution}(pri::Distribution, G::Type{D}, x) = D(rand(posterior(pri, G, x))...)
posterior_sample{D<:Distribution}(pri::Distribution, G::Type{D}, x, w) = D(rand(posterior(pri, G, x, w))...)
posterior_sample{D<:Distribution,G<:Distribution}(post::D, ::Type{G}) = G(rand(post)...)
......@@ -27,6 +27,10 @@ function insupport(IW::InverseWishart, X::Matrix{Float64})
return size(X, 1) == size(X, 2) && isApproxSymmmetric(X) &&
size(X, 1) == size(IW.Psichol, 1) && hasCholesky(X)
end
# This just checks if X could come from any Inverse-Wishart
function insupport(::Type{InverseWishart}, X::Matrix{Float64})
return size(X, 1) == size(X, 2) && isApproxSymmmetric(X) && hasCholesky(X)
end
function mean(IW::InverseWishart)
if IW.nu > size(IW.Psichol, 1) + 1
......
......@@ -27,6 +27,10 @@ function insupport(W::Wishart, X::Matrix{Float64})
return size(X, 1) == size(X, 2) && isApproxSymmmetric(X) &&
size(X, 1) == size(W.Schol, 1) && hasCholesky(X)
end
# This just checks if X could come from any Wishart
function insupport(::Type{Wishart}, X::Matrix{Float64})
return size(X, 1) == size(X, 2) && isApproxSymmmetric(X) && hasCholesky(X)
end
mean(w::Wishart) = w.nu * w.Schol[:U]' * w.Schol[:U]
......@@ -65,12 +69,3 @@ function rand(w::Wishart)
end
var(w::Wishart) = error("Not yet implemented")
# multivariate gamma / partial gamma function
function lpgamma(p::Int64, a::Float64)
res::Float64 = p * (p - 1.0) / 4.0 * log(pi)
for ii in 1:p
res += lgamma(a + (1.0 - ii) / 2.0)
end
return res
end