cleanup and presentable

This commit is contained in:
Remy Moll 2025-02-03 20:14:33 +01:00
parent da8a7d4574
commit facb52b33e
12 changed files with 734 additions and 443 deletions

View File

@ -2,7 +2,7 @@
#import "@preview/based:0.2.0": base64
#let code_font_scale = 0.6em
#let code_font_scale = 0.5em
#let cell_matcher(cell, cell_tag) = {
// Matching function to check if a cell has a specific tag

Binary file not shown.

View File

@ -1,8 +1,5 @@
#import "@preview/diatypst:0.2.0": *
// #set text(font: "Cantarell")
// #set heading(numbering: (..nums)=>"")
#show: slides.with(
title: "N-Body project ",
subtitle: "Computational Astrophysics, HS24",
@ -13,15 +10,13 @@
// ratio: 16/9,
)
#show footnote.entry: set text(size: 0.6em)
#set footnote.entry(gap: 3pt)
#set align(horizon)
#import "helpers.typ"
// KINDA COOL:
// _diatypst_ defines some default styling for elements, e.g Terms created with ```typc / Term: Definition``` will look like this
// / *Term*: Definition
// Setup of code location
#let t1 = json("../task1.ipynb")
@ -41,10 +36,7 @@
== Overview - the system
Get a feel for the particles and their distribution. [#link(<task1:plot_particle_distribution>)[code]]
Get a feel for the particles and their distribution
#columns(2)[
#helpers.image_cell(t1, "plot_particle_distribution")
// Note: for visibility the outer particles are not shown.
@ -54,15 +46,24 @@ Get a feel for the particles and their distribution. [#link(<task1:plot_particle
- a _spherical_ distribution
$==>$ treat the system as a *globular cluster*
#footnote[Unit handling [#link(<task1:function_apply_units>)[code]]]
]
// It is a small globular cluster with
// - 5*10^4 stars => m in terms of msol
// - radius - 10 pc
// Densities are now expressed in M_sol / pc^3
// Forces are now expressed
== Density
We compare the computed density with the analytical model provided by the _Hernquist_ model:
Compare the computed density
#footnote[Density sampling [#link(<task1:function_density_distribution>)[code]]]
with the analytical model provided by the _Hernquist_ model:
#grid(
columns: (1fr, 2fr),
columns: (3fr, 4fr),
inset: 0.5em,
block[
$
@ -72,15 +73,12 @@ We compare the computed density with the analytical model provided by the _Hernq
$
r_"hm" = (1 + sqrt(2)) dot a
$
#text(size: 0.6em)[
Density sampling [#link(<task1:function_density_distribution>)[code]];
]
],
block[
#helpers.image_cell(t1, "plot_density_distribution")
]
)
// Note that by construction, the first shell contains no particles
// => the numerical density is zero there
// Having more bins means to have shells that are nearly empty
@ -89,45 +87,41 @@ We compare the computed density with the analytical model provided by the _Hernq
== Force computation
// N Body and variations
#grid(
columns: (2fr, 1fr),
columns: (3fr, 2fr),
inset: 0.5em,
block[
#helpers.image_cell(t1, "plot_force_radial")
// The radial force is computed as the sum of the forces of all particles in the system.
#text(size: 0.6em)[
Analytical force [#link(<task1:function_analytical_forces>)[code]];
$N^2$ force [#link(<task1:function_n2_forces>)[code]];
$epsilon$ computation [#link(<task1:function_interparticle_distance>)[code]];
]
],
block[
Discussion:
- the analytical method replicates the behavior accurately
- at small softenings the $N^2$ method has noisy artifacts
- a $1 dot epsilon$ softening is a good compromise between accuracy and stability
- the analytical
#footnote[Analytical force [#link(<task1:function_analytical_forces>)[code]]]
method replicates the behavior accurately
- at small softenings the $N^2$
#footnote[$N^2$ force [#link(<task1:function_n2_forces>)[code]]]
method has noisy artifacts
- a $1 dot epsilon$
#footnote[$epsilon$ computation [#link(<task1:function_interparticle_distance>)[code]]]
softening is a good compromise between accuracy and stability
]
)
// basic $N^2$ matches analytical solution without dropoff. but: noisy data from "bad" samples
// $N^2$ with softening matches analytical solution but has a dropoff. No noisy data.
// => softening $\approx 1 \varepsilon$ is a sweet spot since the dropoff is "late"
== Relaxation
We express system relaxation in terms of the dynamical time of the system.
$
t_"relax" = overbrace(N / (8 log N), n_"relax") dot t_"crossing"
$
where the crossing time of the system can be estimated through the half-mass velocity $t_"crossing" = v(r_"hm")/r_"hm"$.
We find a relaxation of [#link(<task1:compute_relaxation_time>)[code]].
We find a relaxation of $approx 30 "Myr"$ ([#link(<task1:compute_relaxation_time>)[code]])
// === Discussion
#grid(
columns: (1fr, 1fr),
inset: 0.5em,
@ -140,6 +134,7 @@ We find a relaxation of [#link(<task1:compute_relaxation_time>)[code]].
- $=>$ relaxation time increases
]
)
// The estimate for $n_{relax}$ comes from the contribution of each star-star encounter to the velocity dispersion. This depends on the perpendicular force
// $\implies$ a bigger softening length leads to a smaller $\delta v$.
@ -164,7 +159,7 @@ We find a relaxation of [#link(<task1:compute_relaxation_time>)[code]].
)[
#helpers.image_cell(t2, "plot_particle_distribution")
$=>$ use $M_"sys" approx 10^4 M_"sol" + M_"BH"$
$==>$ use $M_"sys" approx 10^4 M_"sol" + M_"BH"$
]
@ -180,55 +175,83 @@ We find a relaxation of [#link(<task1:compute_relaxation_time>)[code]].
inset: 0.5em,
block[
#helpers.image_cell(t2, "plot_force_radial_single")
// The radial force is computed as the sum of the forces of all particles in the system.
#text(size: 0.6em)[
$N^2$ force [#link(<task1:function_n2_forces>)[code]];
$epsilon$ computation [#link(<task1:function_interparticle_distance>)[code]];
Mesh force [#link(<task2:function_mesh_force>)[code]];
]
],
block[
Discussion:
- using the (established) baseline of $N^2$ with $1 dot epsilon$ softening
- small grids are stable but inaccurate at the center
- using the (established) baseline of $N^2$
#footnote[$N^2$ force [#link(<task1:function_n2_forces>)[code]]]
with $1 dot epsilon$
#footnote[$epsilon$ computation [#link(<task1:function_interparticle_distance>)[code]]]
softening
- small grids
#footnote[Mesh force [#link(<task2:function_mesh_force>)[code]]]
are stable but inaccurate at the center
- very large grids have issues with overdiscretization
$==> 75 times 75 times 75$ as a good compromise
]
)
// Some other comments:
// - see the artifacts because of the even grid numbers (hence the switch to 75)
// overdiscretization for large grids -> vertical spread even though r is constant
// this becomes even more apparent when looking at the data without noise - the artifacts remain
]
)
//
// We can not rely on the interparticle distance computation for a disk!
// Given softening length 0.037 does not match the mean interparticle distance 0.0262396757880128
//
// Discussion of the discrepancies
// TODO
#helpers.image_cell(t2, "plot_force_computation_time")
// Computed for 10^4 particles => mesh will scale better for larger systems
== Time integration
=== Runge-Kutta
*Integration step*
#helpers.code_reference_cell(t2, "function_runge_kutta")
*Timesteps*
Chosen such that displacement is small (compared to the inter-particle distance) [#link(<task2:integration_timestep>)[code]]:
$
op(d)t = 10^(-4) dot S / v_"part"
$
// too large timesteps lead to instable systems <=> integration not accurate enough
*Full integration*
[#link(<task2:function_time_integration>)[code]]
#pagebreak()
=== Results
#align(center, block(
height: 1fr,
)[
== First results
#helpers.image_cell(t2, "plot_system_evolution")
])
== Varying the softening
#helpers.image_cell(t2, "plot_second_system_evolution")
== Stability [#link("../task2_nsquare_integration.gif")[1 epsilon]]
#page(
columns: 2
)[
#helpers.image_cell(t2, "plot_integration_stability")
]
== Particle mesh solver
sdlsd
#helpers.image_cell(t2, "plot_pm_solver_integration")
#helpers.image_cell(t2, "plot_pm_solver_stability")
= Appendix - Code <appendix>
== Code
#helpers.code_cell(t1, "plot_particle_distribution")
<task1:plot_particle_distribution>
#helpers.code_reference_cell(t1, "function_apply_units")
<task1:function_apply_units>
#pagebreak(weak: true)
@ -260,6 +283,15 @@ sdlsd
#helpers.code_reference_cell(t2, "function_mesh_force")
<task2:function_mesh_force>
#pagebreak(weak: true)
#helpers.code_cell(t2, "integration_timestep")
<task2:integration_timestep>
#pagebreak(weak: true)
#helpers.code_cell(t2, "function_time_integration")
<task2:function_time_integration>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 403 KiB

View File

@ -13,7 +13,7 @@ def cached_forces(cache_path: Path, particles: np.ndarray, force_function:callab
n_particles = particles.shape[0]
kwargs_str = "_".join([f"{k}_{v}" for k, v in func_kwargs.items()])
kwargs_str = kwargs_to_str(func_kwargs)
force_cache = cache_path / f"forces__{force_function.__name__}__n_{n_particles}__kwargs_{kwargs_str}.npy"
time_cache = cache_path / f"time__{force_function.__name__}__n_{n_particles}__kwargs_{kwargs_str}.npy"
@ -26,8 +26,27 @@ def cached_forces(cache_path: Path, particles: np.ndarray, force_function:callab
force = force_function(particles, **func_kwargs)
np.save(force_cache, force)
time = 0
np.info(f"Timing {force_function.__name__} for {n_particles} particles")
logger.info(f"Timing {force_function.__name__} for {n_particles} particles")
time = timeit.timeit(lambda: force_function(particles, **func_kwargs), number=10)
np.save(time_cache, time)
return force, time
def kwargs_to_str(kwargs: dict):
"""
Converts a dictionary of keyword arguments to a string.
"""
base_str = ""
for k, v in kwargs.items():
print(type(v))
if type(v) == float:
base_str += f"{k}_{v:.3f}"
elif type(v) == callable:
base_str += f"{k}_{v.__name__}"
else:
base_str += f"{k}_{v}"
base_str += "__"
return base_str

View File

@ -71,7 +71,6 @@ def mesh_poisson(mesh: np.ndarray, G: float, spacing: float) -> np.ndarray:
logger.debug(f"Proceeding to poisson equation with {rho_hat.shape=}, {k_inv.shape=}")
grad_phi_hat = - 4 * np.pi * G * rho_hat * k_inv * 1j
# nabla^2 phi => -i * k * nabla phi = 4 pi G rho => nabla phi = - i * rho * k / k^2
# TODO: check minus
grad_phi = np.real(fft.ifftn(grad_phi_hat))
return grad_phi
@ -133,9 +132,7 @@ def mesh_poisson(mesh: np.ndarray, G: float, spacing: float) -> np.ndarray:
rho_hat = fft.fftn(mesh)
# we also need the wave numbers
spacing_3d = np.linalg.norm([spacing, spacing, spacing])
k = fft.fftfreq(mesh.shape[0], spacing) * (2 * np.pi)
# TODO: check if this is correct
# assuming the grid is cubic
kx, ky, kz = np.meshgrid(k, k, k)
k_sr = kx**2 + ky**2 + kz**2
@ -145,10 +142,9 @@ def mesh_poisson(mesh: np.ndarray, G: float, spacing: float) -> np.ndarray:
logger.debug(f"Count of ksquare zeros: {np.sum(k_sr == 0)}")
show_mesh_information(np.abs(k_sr), "k_square")
k_sr[k_sr == 0] = np.inf
k_sr[k_sr == 0] = 1e-10
# k_inv = k_vec / k_sr # allows for element-wise division
# logger.debug(f"Proceeding to poisson equation with {rho_hat.shape=}, {k_inv.shape=}")
phi_hat = - 4 * np.pi * G * rho_hat / k_sr
# nabla^2 phi becomes -i * k * nabla phi_hat = 4 pi G rho_hat
# => nabla phi = - i * rho * k / k^2
@ -156,6 +152,7 @@ def mesh_poisson(mesh: np.ndarray, G: float, spacing: float) -> np.ndarray:
return phi
#### Helper functions for star mapping
def create_mesh(min_pos: float, max_pos: float, n_grid: int) -> tuple[np.ndarray, np.ndarray, float]:
"""
@ -239,3 +236,57 @@ def mesh_plot_2d(mesh: np.ndarray, name: str, only_z: bool = False):
axs[2].imshow(np.sum(mesh, axis=2), origin='lower')
axs[2].set_title("Flattened in z")
plt.show()
##################################
# For the presentation - without logging
def mesh__forces(particles: np.ndarray, G: float = 1, n_grid: int = 50, mapping: callable = None) -> np.ndarray:
"""
Computes the gravitational force acting on a set of particles using a mesh-based approach.
Assumes that the particles array has the following columns: x, y, z, m.
"""
max_pos = np.max(np.abs(particles[:, :3]))
mesh, axis, spacing = create_mesh(-max_pos, max_pos, n_grid)
fill_mesh(particles, mesh, axis, mapping)
# we want a density mesh:
cell_volume = spacing**3
rho = mesh / cell_volume
# compute the potential and its gradient
phi = mesh_poisson(rho, G, spacing)
# get the acceleration from finite differences of the potential
ax, ay, az = np.gradient(phi, spacing)
a_vec = - np.stack([ax, ay, az], axis=0)
# compute the particle forces from the mesh potential
forces = np.zeros_like(particles[:, :3])
ijks = np.digitize(particles[:, :3], axis) - 1
for i in range(particles.shape[0]):
m = particles[i, 3]
idx = ijks[i]
forces[i] = m * a_vec[..., idx[0], idx[1], idx[2]]
return forces
def mesh__poisson(mesh: np.ndarray, G: float, spacing: float) -> np.ndarray:
"""
Solves the poisson equation for the mesh using the FFT.
Returns the the potential - phi
"""
rho_hat = fft.fftn(mesh)
# we also need the wave numbers
k = fft.fftfreq(mesh.shape[0], spacing) * (2 * np.pi)
# assuming the grid is cubic
kx, ky, kz = np.meshgrid(k, k, k)
k_sr = kx**2 + ky**2 + kz**2
k_sr[k_sr == 0] = np.inf
phi_hat = - 4 * np.pi * G * rho_hat / k_sr
return np.real(fft.ifftn(phi_hat))

View File

@ -77,7 +77,8 @@ def to_particles_3d(y: np.ndarray) -> np.ndarray:
n_steps = y.shape[0]
n_particles = y.shape[1] // 7
y = y.reshape((n_steps, n_particles, 7))
# logger.debug(f"Unflattened array into {y.shape=}")
logger.info(f"Unflattened array into {y.shape=}")
return y

View File

@ -12,3 +12,11 @@ def model_density_distribution(r_bins: np.ndarray, M: float = 5, a: float = 5) -
"""
rho = M / (2 * np.pi) * a / (r_bins * (r_bins + a)**3)
return rho
def model_particle_count(r_bins: np.ndarray, M: float = 5, a: float = 5, mi: float = 1) -> np.ndarray:
rho = model_density_distribution(r_bins, M, a)
v_shells = 4/3 * np.pi * (r_bins[1:]**3 - r_bins[:-1]**3)
v_shells = np.insert(v_shells, 0, 4/3 * np.pi * r_bins[0]**3)
n_shells = rho * v_shells / mi
return n_shells

View File

@ -1,6 +1,9 @@
import numpy as np
import matplotlib.pyplot as plt
from astropy import units as u
from matplotlib.animation import FuncAnimation
from pathlib import Path
from .units import apply_units
import logging
logger = logging.getLogger(__name__)
@ -42,6 +45,16 @@ def density_distribution(r_bins: np.ndarray, particles: np.ndarray, ret_error: b
else:
return density
def particle_count(r_bins, particles):
r = np.linalg.norm(particles[:, :3], axis=1)
# r_bins = np.insert(r_bins, 0, 0)
count = np.zeros_like(r_bins)
for i in range(len(r_bins) - 1):
mask = (r >= r_bins[i]) & (r < r_bins[i+1])
count[i] = np.count_nonzero(mask)
return count
def r_distribution(particles: np.ndarray):
@ -94,17 +107,16 @@ def mean_interparticle_distance(particles: np.ndarray):
epsilon = (1 / rho)**(1/3)
logger.info(f"Found mean interparticle distance: {epsilon}")
return epsilon
# TODO: check if this is correct
def half_mass_radius(particles: np.ndarray):
"""
Computes the half mass radius of a set of particles.
Assumes that the particles array has the following columns: x, y, z ...
Assumes that the particles array has the following columns: x, y, z, m, ...
"""
if particles.shape[1] < 3:
raise ValueError("Particles array must have at least 3 columns: x, y, z")
if particles.shape[1] < 4:
raise ValueError("Particles array must have at least 4 columns: x, y, z, m")
# even though in the simple example, all the masses are the same, we will consider the general case
total_mass = np.sum(particles[:, 3])
@ -125,31 +137,6 @@ def half_mass_radius(particles: np.ndarray):
def total_energy(particles: np.ndarray):
"""
Computes the total energy of a set of particles.
Assumes that the particles array has the following columns: x, y, z, vx, vy, vz, m.
Uses the approximation that the particles are in a central potential as computed in analytical.py
"""
if particles.shape[1] != 7:
raise ValueError("Particles array must have 7 columns: x, y, z, vx, vy, vz, m")
# compute the kinetic energy
v = particles[:, 3:6]
m = particles[:, 6]
ke = 0.5 * np.sum(m * np.linalg.norm(v, axis=1)**2)
# # compute the potential energy
# forces = forces_basic.analytical_forces(particles)
# r = np.linalg.norm(particles[:, :3], axis=1)
# pe_particles = -forces[:, 0] * particles[:, 0] - forces[:, 1] * particles[:, 1] - forces[:, 2] * particles[:, 2]
# pe = np.sum(pe_particles)
# # TODO: i am pretty sure this is wrong
pe = 0
return ke + pe
def particles_plot_3d(positions: np.ndarray, masses: np.ndarray, title: str = "Particle distribution (3D)"):
"""
Plots a 3D scatter plot of a set of particles.
@ -214,3 +201,129 @@ def particles_plot_2d(particles: np.ndarray, title: str = "Flattened distributio
plt.show()
else:
ax.hist2d(x, y, bins=100, cmap='coolwarm')
def particles_plot_2d_multiframe(particles_3d: np.ndarray, t_range: np.ndarray, title: str):
# reduce the font size
fig, axs = plt.subplots(4, 6, figsize=(20, 12))
fig.suptitle(title)
# make sure we have enough time steps to show
diff = axs.size - particles_3d.shape[0]
if diff > 0:
logger.debug(f"Adding dummy time steps: {diff=} -> {axs.size=}")
plot_t_range = np.concatenate([t_range, np.zeros(diff)])
plot_particles_in_time = particles_3d
elif diff < 0:
logger.debug(f"Too many steps to plot - reducing: {particles_3d.shape[0]} -> {axs.size}")
# skip some of the time steps
plot_t_range = []
plot_particles_in_time = []
for i in range(axs.size):
idx = int(i / axs.size * particles_3d.shape[0])
# make sure we have the first and last time step are included
if i == 0:
idx = 0
elif i == axs.size - 1:
idx = particles_3d.shape[0] - 1
plot_t_range.append(t_range[idx])
plot_particles_in_time.append(particles_3d[idx])
else:
plot_t_range = t_range
plot_particles_in_time = particles_3d
for p, t, a in zip(plot_particles_in_time, plot_t_range, axs.flat):
a.set_title(f"t={t:.2g}")
particles_plot_2d(p, ax=a)
plt.show()
def particles_plot_2d_animated(particles_3d: np.ndarray, t_range: np.ndarray, output: Path):
# Also: show the 2D evolution as a GIF
plt.ioff()
fig, ax = plt.subplots()
fig.suptitle("Particle evolution (top view)")
ax.set_aspect('equal')
xmax = np.max(particles_3d[:, :, :3])
ax.set_xlim(-xmax, xmax)
ax.set_ylim(-xmax, xmax)
ax.set_xlabel('x')
ax.set_ylabel('y')
def update(i):
ax.set_title(f"t={t_range[i]:.2g}")
particles_plot_2d(particles_3d[i], ax=ax)
ax.set_xlim(-xmax, xmax) # Ensure x limits remain fixed
ax.set_ylim(-xmax, xmax) # Ensure y limits remain fixed
ani = FuncAnimation(fig, update, frames=range(len(particles_3d)), repeat=False)
ani.save(output, writer='ffmpeg', fps=5)
plt.close(fig)
plt.ion()
def particles_plot_radial_evolution(particles_3d: np.ndarray, t_range: np.ndarray):
# radial extrema of the particles - disk surface
n_steps = particles_3d.shape[0]
r_mins = np.zeros(n_steps)
r_maxs = np.zeros(n_steps)
r_hms = np.zeros(n_steps)
for i in range(n_steps):
p = particles_3d[i, ...]
# exclude the black hole
r = np.linalg.norm(p[1:,:3], axis=1)
# plt.plot(r[1::100], alpha=0.5)
r_mins[i] = np.min(r)
r_maxs[i] = np.max(r)
r_hms[i] = half_mass_radius(p)
r_mins = apply_units(r_mins, "position")
r_maxs = apply_units(r_maxs, "position")
plt.figure()
plt.plot(t_range, r_mins, label='$r_{min}$', color=plt.cm.Blues(0.5))
plt.plot(t_range, r_maxs, label='$r_{max}$', color=plt.cm.Blues(0.8))
plt.fill_between(t_range, r_mins, r_maxs, color=plt.cm.Blues(0.2))
plt.plot(t_range, r_hms, label='$r_{hm}$', color=plt.cm.Greens(0.5))
# show the initial conditions
plt.hlines(r_mins[0], t_range[0], t_range[-1], color='black', linestyle='--')
plt.hlines(r_maxs[0], t_range[0], t_range[-1], color='black', linestyle='--')
plt.title(f'Radial extrema over {n_steps} timesteps')
plt.xlabel('Integration time')
plt.ylabel(f'{r_mins.unit:latex}')
plt.legend()
plt.show()
def particles_plot_orbits(particles_3d: np.ndarray, t_range: np.ndarray):
# particle orbits
fig, axs = plt.subplots(2, 1)
axs[0].set_position([0, 0.3, 1, 0.6])
axs[0].set_xlabel('x')
axs[0].set_ylabel('y')
axs[1].set_position([0, 0, 1, 0.2])
axs[1].set_xlabel("t")
axs[1].set_ylabel('z')
fig.suptitle('Particle orbits')
print(particles_3d.shape)
mid = particles_3d.shape[1] // 2
particle_idx = [1, 2, 3, 4, 5, mid-2, mid-1, mid, mid+1, mid+2, -5, -4, -3, -2, -1]
colors = plt.cm.Blues(np.linspace(0.2, 0.8, len(particle_idx)))
for i, idx in enumerate(particle_idx):
x = particles_3d[:, idx, 0]
y = particles_3d[:, idx, 1]
z = particles_3d[:, idx, 2]
axs[0].plot(x, y, label=f'p{idx}', color=colors[i])
axs[1].plot(z, label=f'p{idx}', color=colors[i])
# plt.legend()
plt.show()

View File

@ -97,34 +97,18 @@ def particles_to_mesh(particles: np.ndarray, mesh: np.ndarray, axis: np.ndarray,
mesh[ijk[0], ijk[1], ijk[2]] += weight * m
'''
#### Actually need to patch this
def ode_setup(particles: np.ndarray, force_function: callable) -> tuple[np.ndarray, callable]:
def pm_ode_setup(particles: np.ndarray, force_function: callable, boundary_condition: str) -> tuple[np.ndarray, callable]:
"""
Linearizes the ODE system for the particles interacting gravitationally.
Returns:
- the Y0 array corresponding to the initial conditions (x0 and v0)
- the function that computes the right hand side of the ODE with function signature f(t, y)
Assumes that the particles array has the following columns: x, y, z, vx, vy, vz, m.
Returns a linear ode function that can be integrated by an ODE solver and implements the given boundary conditions.
"""
if particles.shape[1] != 7:
raise ValueError("Particles array must have 7 columns: x, y, z, vx, vy, vz, m")
# for the integrators we need to flatten array which contains 7 columns for now
# we don't really care how we reshape as long as we unflatten consistently
particles = particles.flatten()
logger.debug(f"Reshaped 7 columns into {particles.shape=}")
def f(y, t):
def f(p, t):
"""
Computes the right hand side of the ODE system.
The ODE system is linearized around the current positions and velocities.
"""
p = to_particles(y)
# this is explicitly a copy, which has shape (n, 7)
# columns x, y, z, vx, vy, vz, m
# (need to keep y intact since integrators make multiple function calls)
forces = force_function(p[:, [0, 1, 2, -1]])
# compute the accelerations
@ -139,12 +123,6 @@ def ode_setup(particles: np.ndarray, force_function: callable) -> tuple[np.ndarr
# the masses remain unchanged
# p[:, -1] = p[:, -1]
# flatten the array again
# logger.debug(f"As particles: {y}")
p = p.reshape(-1, copy=False)
# logger.debug(f"As column: {y}")
return p
return particles, f
'''
return f