Testing Class Material

This commit is contained in:
Lisa Schwetlick 2024-08-26 13:54:13 +02:00
commit 05b1f6cdd5
85 changed files with 102796 additions and 0 deletions

View file

@ -0,0 +1,6 @@
The material in this repository is released under the
CC Attribution-Share Alike 4.0 International
license.
Full license text available at
https://creativecommons.org/licenses/by-sa/4.0/

View file

@ -0,0 +1 @@
# Testing Project

View file

@ -0,0 +1,28 @@
import numpy as np
import pytest
# add a commandline option to pytest
def pytest_addoption(parser):
"""Add random seed option to py.test.
"""
parser.addoption('--seed', dest='seed', type=int, action='store',
help='set random seed')
# configure pytest to automatically set the rnd seed if not passed on CLI
def pytest_configure(config):
seed = config.getvalue("seed")
# if seed was not set by the user, we set one now
if seed is None or seed == ('NO', 'DEFAULT'):
config.option.seed = int(np.random.randint(2 ** 31 - 1))
def pytest_report_header(config):
return f'Using random seed: {config.option.seed}'
@pytest.fixture
def random_state(request):
random_state = np.random.RandomState(request.config.option.seed)
return random_state

View file

@ -0,0 +1,28 @@
import pytest
def test_for_loop_simple():
cases = [1, 2, 3]
for a in cases:
assert a > 0
@pytest.mark.parametrize('a', [1, 2, 3])
def test_parametrize_simple(a):
# This test will be run 3 times, with a=1, a=2, and a=3
assert a > 0
def test_for_loop_multiple():
cases = [(1, 'hi', 'hi'), (2, 'no', 'nono')]
for a, b, expected in cases:
result = b * a
assert result == expected
@pytest.mark.parametrize('a, b, expected', [(1, 'hi', 'hi'), (2, 'no', 'nono')])
def test_parametrize_multiple(a, b, expected):
# This test will be run 2 times, with a=1, b='hi', expected='hi'
# and a=2, b='no', expected='nono'
result = b * a
assert result == expected

View file

@ -0,0 +1 @@
# Your code goes here

View file

@ -0,0 +1,32 @@
import numpy as np
from logistic import iterate_f
def fit_r(xs):
""" Takes a population trajectory and returns the value of r that generated it.
By far not the most efficient method, but it always finds the optimal value of r with 1/1000
precision.
Parameters
----------
xs : list of float
A population trajectory.
Returns
-------
r: float
The value of r that generated the population trajectory.
"""
xs = np.asarray(xs)
x0 = xs[0]
it = len(xs) - 1
def error(r):
return np.linalg.norm(xs - iterate_f(it, x0, r))
errors = []
for r in np.linspace(0, 4, 4001):
errors.append((r, error(r)))
return min(errors, key=lambda x: x[1])[0]

View file

@ -0,0 +1,70 @@
"""Usage:
```
plot_trajectory(100, 3.6, 0.1)
plot_bifurcation(2.5, 4.2, 0.001)
```
"""
import numpy as np
from matplotlib import pyplot as plt
from logistic import iterate_f
def plot_trajectory(n, r, x0, fname="single_trajectory.png"):
"""
Saves a plot of a single trajectory of the logistic function
inputs
n: int (number of iterations)
r: float (r value for the logistic function)
x0: float (between 0 and 1, starting point for the iteration)
fname: str (filename to which to save the image)
returns
fig, ax (matplotlib objects)
"""
xs = iterate_f(n, x0, r)
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(list(range(n)), xs)
fig.suptitle('Logistic Function')
fig.savefig(fname)
return fig, ax
def plot_bifurcation(start, end, step, fname="bifurcation.png", it=100000,
last=300):
"""
Saves a plot of the bifurcation diagram of the logistic function. The
`start`, `end`, and `step` parameters define for which r values to
calculate the logistic function. If you space them too closely, it might
take a very long time, if you dont plot enough, your bifurcation diagram
won't be informative. Choose wisely!
inputs
start, end, step: float (which r values to calculate the logistic
function for)
fname: str (filename to which to save the image)
it: int (how many iterations to run for each r value)
last: int (how many of the last iterates to plot)
returns
fig, ax (matplotlib objects)
"""
r_range = np.arange(start, end, step)
x = []
y = []
for r in r_range:
xs = iterate_f(it, 0.1, r)
all_xs = xs[len(xs) - last::].copy()
unique_xs = np.unique(all_xs)
y.extend(unique_xs)
x.extend(np.ones(len(unique_xs)) * r)
fig, ax = plt.subplots(figsize=(20, 10))
ax.scatter(x, y, s=0.1, color='k')
ax.set_xlabel("r")
fig.savefig(fname)
return fig, ax

View file

@ -0,0 +1,4 @@
# pytest.ini
[pytest]
norecursedirs = *

View file

@ -0,0 +1,28 @@
import numpy as np
import pytest
# add a commandline option to pytest
def pytest_addoption(parser):
"""Add random seed option to py.test.
"""
parser.addoption('--seed', dest='seed', type=int, action='store',
help='set random seed')
# configure pytest to automatically set the rnd seed if not passed on CLI
def pytest_configure(config):
seed = config.getvalue("seed")
# if seed was not set by the user, we set one now
if seed is None or seed == ('NO', 'DEFAULT'):
config.option.seed = int(np.random.randint(2 ** 31 - 1))
def pytest_report_header(config):
return f'Using random seed: {config.option.seed}'
@pytest.fixture
def random_state(request):
random_state = np.random.RandomState(request.config.option.seed)
return random_state

View file

@ -0,0 +1,24 @@
import numpy as np
def f(x, r):
""" Compute the logistic map for a given value of x and r. """
return r * x * (1 - x)
def iterate_f(it, x0, r):
""" Generate a population trajectory.
Takes a number of iterations `it`, a starting value, x0,
and a parameter value for r. It executes f repeatedly (it times),
each time using the last result of f as the new input to f. Append each
iteration's result to a list l. Finally, convert the list into a numpy
array and return it.
"""
x = x0
xs = [x0]
for _ in range(it):
x = f(x, r)
xs.append(x)
return np.array(xs)

View file

@ -0,0 +1,32 @@
import numpy as np
from logistic import iterate_f
def fit_r(xs):
""" Takes a population trajectory and returns the value of r that generated it.
By far not the most efficient method, but it always finds the optimal value of r with 1/1000
precision.
Parameters
----------
xs : list of float
A population trajectory.
Returns
-------
r: float
The value of r that generated the population trajectory.
"""
xs = np.asarray(xs)
x0 = xs[0]
it = len(xs) - 1
def error(r):
return np.linalg.norm(xs - iterate_f(it, x0, r))
errors = []
for r in np.linspace(0, 4, 4001):
errors.append((r, error(r)))
return min(errors, key=lambda x: x[1])[0]

View file

@ -0,0 +1,54 @@
import numpy as np
from numpy.testing import assert_allclose
import pytest
from logistic import f, iterate_f
def test_f():
# Test cases are (x, r, expected)
cases = [
(0.1, 2.2, 0.198),
(0.2, 3.4, 0.544),
(0.5, 2, 0.5),
]
for x, r, expected in cases:
result = f(x, r)
assert_allclose(result, expected)
def test_f_corner_cases():
# Test cases are (x, r, expected)
cases = [
(0, 1.1, 0),
(1, 3.7, 0),
]
for x, r, expected in cases:
result = f(x, r)
assert_allclose(result, expected)
def test_random_convergence():
SEED = 42
random_state = np.random.RandomState(SEED)
r = 1.5
for _ in range(100):
x0 = random_state.uniform(0.0000001, 0.9999999)
xs = iterate_f(it=100, x0=x0, r=r)
assert np.isclose(xs[-1], 1 / 3)
# SEED = 42
# @pytest.fixture
# def random_state():
# print(f"Using seed {SEED}")
# random_state = np.random.RandomState(SEED)
# return random_state
#@pytest.mark.xfail
def test_random_convergence_decorator(random_state):
r = 1.5
for _ in range(100):
x0 = random_state.uniform(0.0000001, 0.9999999)
xs = iterate_f(it=100, x0=x0, r=r)
assert np.isclose(xs[-1], 1 / 3)

View file

@ -0,0 +1,29 @@
import numpy as np
from numpy.testing import assert_allclose
from logistic import iterate_f
from logistic_fit import fit_r
SEED = 42
def test_logistic_fit():
r = 3.123
x0 = 0.322
xs = iterate_f(it=27, x0=x0, r=r)
assert_allclose(r, fit_r(xs), atol=1e-3)
def test_logistic_fit_randomized():
random_state = np.random.RandomState(SEED)
# We test for 100 random values of x0 and r, to make sure that the function works in general.
for _ in range(100):
x0 = random_state.uniform(0.0001, 0.9999)
# Round `r` to 1/1000 to make sure that it matches the precision of the fit_r function,
# so that r can be exactly recovered.
r = round(random_state.uniform(0.001, 3.999), 3)
xs = iterate_f(it=17, x0=x0, r=r)
assert_allclose(r, fit_r(xs), atol=1e-3)

View file

@ -0,0 +1,38 @@
from numpy.testing import assert_allclose
import pytest
from logistic import f, iterate_f
@pytest.mark.parametrize('x, r, expected', [
(0.1, 2.2, 0.198),
(0.2, 3.4, 0.544),
(0.5, 2, 0.5),
]
)
def test_f(x, r, expected):
result = f(x, r)
assert_allclose(result, expected)
@pytest.mark.parametrize('x, r, expected', [
(0, 1.1, 0),
(1, 3.7, 0),
]
)
def test_f_special_x_values(x, r, expected):
result = f(x, r)
assert_allclose(result, expected)
@pytest.mark.parametrize(
'x, r, it, expected',
[
(0.1, 2.2, 1, [0.1, 0.198]),
(0.2, 3.4, 4, [0.2, 0.544, 0.843418, 0.449019, 0.841163]),
(0.5, 2, 2, [0.5, 0.5, 0.5]),
]
)
def test_iterate_f(x, r, it, expected):
result = iterate_f(it, x, r)
assert_allclose(result, expected, rtol=1e-5)

View file

@ -0,0 +1,16 @@
from numpy.testing import assert_allclose
from logistic import f
# Add here your test for the logistic map
def test_f_corner_cases():
# Test cases are (x, r, expected)
cases = [
(0, 1.1, 0),
(1, 3.7, 0),
]
for x, r, expected in cases:
result = f(x, r)
assert_allclose(result, expected)