Now OpenAI has released ChatGPT-5 and removed the other models. For the mixture-of-normals prompt above, after a few iterations it gave the working code
module constants_mod
implicit none
private
public :: dp, twopi, logtwopi
integer, parameter :: dp = kind(1.0d0)
real(kind=dp), parameter :: twopi = 6.283185307179586476925286766559_dp
real(kind=dp), parameter :: logtwopi = 1.837877066409345339081937709_dp
end module constants_mod
module mixture_mod
use constants_mod, only: dp, twopi, logtwopi
implicit none
private
public :: mixture_params, init_mixture, em_fit_mixture, print_mixture
type mixture_params
integer :: k = 0
real(kind=dp), allocatable :: w(:)
real(kind=dp), allocatable :: mu(:)
real(kind=dp), allocatable :: sigma(:)
end type mixture_params
contains
pure elemental function normal_logpdf(x, m, s) result(lp)
real(kind=dp), intent(in) :: x, m, s
real(kind=dp) :: lp
lp = -0.5_dp*(logtwopi + 2.0_dp*log(s) + ((x-m)/s)**2)
end function normal_logpdf
pure function logsumexp_vec(v) result(ls)
real(kind=dp), intent(in) :: v(:)
real(kind=dp) :: ls
real(kind=dp) :: vmax
real(kind=dp), allocatable :: tmp(:)
integer :: i, n
n = size(v)
vmax = v(1)
do i = 2, n
if (v(i) > vmax) then
vmax = v(i)
end if
end do
allocate(tmp(n))
do i = 1, n
tmp(i) = exp(v(i) - vmax)
end do
ls = vmax + log(sum(tmp))
deallocate(tmp)
end function logsumexp_vec
pure elemental function clamp(x, a, b) result(y)
real(kind=dp), intent(in) :: x, a, b
real(kind=dp) :: y
if (x < a) then
y = a
else if (x > b) then
y = b
else
y = x
end if
end function clamp
subroutine init_mixture(y, k, mix)
real(kind=dp), intent(in) :: y(:)
integer, intent(in) :: k
type(mixture_params), intent(out) :: mix
real(kind=dp) :: ymean, ystd
integer :: i, n
real(kind=dp), allocatable :: ys(:), q(:)
n = size(y)
mix%k = k
allocate(mix%w(k), mix%mu(k), mix%sigma(k))
ymean = sum(y)/real(n,dp)
ystd = sqrt(max( (sum((y - ymean)**2)/real(max(n-1,1),dp)), 1.0e-6_dp ))
mix%w = 1.0_dp/real(k,dp)
allocate(ys(n), q(k))
ys = y
call sort_inplace(ys)
do i = 1, k
q(i) = percentile_of_sorted(ys, (real(i,dp)-0.5_dp)/real(k,dp))
end do
mix%mu = q
mix%sigma = ystd
deallocate(ys, q)
end subroutine init_mixture
subroutine em_fit_mixture(y, mix, max_iter, tol, min_sigma, seed, converged, n_iter)
real(kind=dp), intent(in) :: y(:)
type(mixture_params), intent(inout) :: mix
integer, intent(in), optional :: max_iter
real(kind=dp), intent(in), optional :: tol
real(kind=dp), intent(in), optional :: min_sigma
integer, intent(in), optional :: seed
logical, intent(out), optional :: converged
integer, intent(out), optional :: n_iter
integer :: it, itmax, k, n, i, j
real(kind=dp) :: eps, minsig
real(kind=dp) :: ll, ll_prev, diff
real(kind=dp), allocatable :: r(:,:), logw(:), logpdf(:,:), logpost(:,:), denom(:)
real(kind=dp), allocatable :: nk(:), mu_new(:), sigma_new(:), w_new(:)
real(kind=dp) :: num, den, s2
itmax = merge(max_iter, 500, present(max_iter))
eps = merge(tol, 1.0e-8_dp, present(tol))
minsig = merge(min_sigma, 1.0e-6_dp, present(min_sigma))
if (present(seed)) then
continue
end if
k = mix%k
n = size(y)
allocate(r(n,k), logw(k), logpdf(n,k), logpost(n,k), denom(n))
allocate(nk(k), mu_new(k), sigma_new(k), w_new(k))
ll_prev = -huge(1.0_dp)
do it = 1, itmax
do j = 1, k
logw(j) = log(clamp(mix%w(j), 1.0e-300_dp, 1.0e+300_dp))
end do
do j = 1, k
do i = 1, n
logpdf(i,j) = normal_logpdf(y(i), mix%mu(j), mix%sigma(j))
end do
end do
do i = 1, n
do j = 1, k
logpost(i,j) = logw(j) + logpdf(i,j)
end do
denom(i) = logsumexp_vec(logpost(i,:))
do j = 1, k
r(i,j) = exp(logpost(i,j) - denom(i))
end do
end do
ll = sum(denom)
do j = 1, k
nk(j) = sum(r(:,j))
end do
do j = 1, k
if (nk(j) <= 0.0_dp) nk(j) = 1.0e-12_dp
end do
do j = 1, k
num = 0.0_dp
do i = 1, n
num = num + r(i,j)*y(i)
end do
den = nk(j)
mu_new(j) = num / den
end do
do j = 1, k
s2 = 0.0_dp
do i = 1, n
s2 = s2 + r(i,j)*(y(i) - mu_new(j))**2
end do
s2 = s2 / nk(j)
sigma_new(j) = sqrt(max(s2, minsig))
end do
w_new = nk / real(n,dp)
do j = 1, k
sigma_new(j) = max(sigma_new(j), minsig)
w_new(j) = max(w_new(j), 1.0e-12_dp)
end do
w_new = w_new / sum(w_new)
diff = abs(ll - ll_prev)
mix%w = w_new
mix%mu = mu_new
mix%sigma = sigma_new
if (diff < eps) exit
ll_prev = ll
end do
if (present(converged)) then
converged = (it < itmax)
end if
if (present(n_iter)) n_iter = it
deallocate(r, logw, logpdf, logpost, denom, nk, mu_new, sigma_new, w_new)
end subroutine em_fit_mixture
subroutine print_mixture(mix, label)
type(mixture_params), intent(in) :: mix
character(*), intent(in) :: label
integer :: j
write(*,'(a)') repeat("-", 58)
write(*,'(a)') trim(label)
write(*,'(a)') repeat("-", 58)
write(*,'(a)') " j weight mean sigma"
write(*,'(a)') repeat("-", 58)
do j = 1, mix%k
write(*,'(i4,3(2x,es15.7))') j, mix%w(j), mix%mu(j), mix%sigma(j)
end do
write(*,'(a)') repeat("-", 58)
end subroutine print_mixture
subroutine sort_inplace(x)
real(kind=dp), intent(inout) :: x(:)
integer :: i, j, n
real(kind=dp) :: key
n = size(x)
do i = 2, n
key = x(i)
j = i - 1
do
if (j < 1) exit
if (x(j) > key) then
x(j+1) = x(j)
j = j - 1
else
exit
end if
end do
x(j+1) = key
end do
end subroutine sort_inplace
pure function percentile_of_sorted(xs, p) result(q)
real(kind=dp), intent(in) :: xs(:)
real(kind=dp), intent(in) :: p
real(kind=dp) :: q
integer :: n
real(kind=dp) :: pp, idx, frac
integer :: i0, i1
n = size(xs)
pp = clamp(p, 0.0_dp, 1.0_dp)
idx = 1.0_dp + pp*real(max(n-1,0),dp)
i0 = int(floor(idx))
i1 = int(ceiling(idx))
if (i0 < 1) i0 = 1
if (i1 < 1) i1 = 1
if (i0 > n) i0 = n
if (i1 > n) i1 = n
frac = idx - real(i0,dp)
q = (1.0_dp - frac)*xs(i0) + frac*xs(i1)
end function percentile_of_sorted
end module mixture_mod
program test_mixture_em
use constants_mod, only: dp
use mixture_mod
implicit none
integer :: n, k_true, k_fit, i, j, iters
type(mixture_params) :: mix_true, mix_est
real(kind=dp), allocatable :: y(:)
real(kind=dp), allocatable :: u(:), z(:)
real(kind=dp), allocatable :: cumw(:)
integer, allocatable :: comp(:)
real(kind=dp) :: tol, minsig
logical :: ok
integer :: nseed, clock
integer, allocatable :: seed_put(:)
n = 5000
k_true = 3
k_fit = 3
allocate(y(n), u(n), z(n), comp(n))
allocate(mix_true%w(k_true), mix_true%mu(k_true), mix_true%sigma(k_true))
mix_true%k = k_true
mix_true%w = [0.55_dp, 0.35_dp, 0.10_dp]
mix_true%mu = [-1.0_dp, 1.5_dp, 4.0_dp]
mix_true%sigma = [0.7_dp, 0.6_dp, 1.2_dp]
! portable seeding: query seed length and fill from system_clock
call random_seed(size=nseed)
allocate(seed_put(nseed))
call system_clock(count=clock)
do i = 1, nseed
seed_put(i) = clock + 37*i
end do
call random_seed(put=seed_put)
deallocate(seed_put)
allocate(cumw(k_true))
cumw(1) = mix_true%w(1)
do j = 2, k_true
cumw(j) = cumw(j-1) + mix_true%w(j)
end do
call random_number(u)
call randn_array(z)
do i = 1, n
if (u(i) < cumw(1)) then
comp(i) = 1
else if (u(i) < cumw(2)) then
comp(i) = 2
else
comp(i) = 3
end if
j = comp(i)
y(i) = mix_true%mu(j) + mix_true%sigma(j)*z(i)
end do
call print_mixture(mix_true, "true mixture parameters")
call init_mixture(y, k_fit, mix_est)
tol = 1.0e-8_dp
minsig = 1.0e-6_dp
call em_fit_mixture(y, mix_est, max_iter=1000, tol=tol, min_sigma=minsig, converged=ok, n_iter=iters)
call print_mixture(mix_est, "estimated mixture parameters (em)")
contains
subroutine randn_array(z)
use constants_mod, only: dp, twopi
real(kind=dp), intent(out) :: z(:)
integer :: m, i
real(kind=dp), allocatable :: u1(:), u2(:)
real(kind=dp) :: r, theta
m = size(z)
allocate(u1((m+1)/2), u2((m+1)/2))
call random_number(u1)
call random_number(u2)
do i = 1, size(u1)
u1(i) = max(u1(i), 1.0e-12_dp)
r = sqrt(-2.0_dp*log(u1(i)))
theta = twopi*u2(i)
z(2*i-1) = r*cos(theta)
if (2*i <= m) then
z(2*i) = r*sin(theta)
end if
end do
deallocate(u1, u2)
end subroutine randn_array
end program test_mixture_em
with sample output
----------------------------------------------------------
true mixture parameters
----------------------------------------------------------
j weight mean sigma
----------------------------------------------------------
1 5.5000000E-01 -1.0000000E+00 7.0000000E-01
2 3.5000000E-01 1.5000000E+00 6.0000000E-01
3 1.0000000E-01 4.0000000E+00 1.2000000E+00
----------------------------------------------------------
----------------------------------------------------------
estimated mixture parameters (em)
----------------------------------------------------------
j weight mean sigma
----------------------------------------------------------
1 5.5237690E-01 -1.0101226E+00 6.8874118E-01
2 3.5657542E-01 1.5156374E+00 6.2589453E-01
3 9.1047677E-02 4.0244089E+00 1.1813303E+00
----------------------------------------------------------
I can’t say yet whether GPT-5 is better than o3.