Material for ASPP 2024
30
.gitignore
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
# OS specific
|
||||
.DS_Store
|
||||
|
||||
# Editors
|
||||
.vscode
|
||||
.idea
|
||||
*.swp
|
||||
|
||||
# Python
|
||||
.venv/
|
||||
build/
|
||||
_build/
|
||||
dist/
|
||||
eggs/
|
||||
.eggs/
|
||||
sdist/
|
||||
*.egg-info/
|
||||
*.egg
|
||||
*.pyc
|
||||
__pycache__/
|
||||
tests-reports/
|
||||
.mypy_cache/
|
||||
.pytest_cache/
|
||||
.env
|
||||
|
||||
# Jupyter
|
||||
.ipynb_checkpoints/
|
||||
|
||||
# Project
|
||||
_archive/
|
6
LICENSE.txt
Normal file
|
@ -0,0 +1,6 @@
|
|||
The material in this repository is released under the
|
||||
CC Attribution-Share Alike 4.0 International
|
||||
license.
|
||||
|
||||
Full license text available at
|
||||
https://creativecommons.org/licenses/by-sa/4.0/
|
10
README.md
Normal file
|
@ -0,0 +1,10 @@
|
|||
# Scientific programming patterns
|
||||
|
||||
Material for the class "Scientific programming patterns", first given as ASPP 2022 in Bilbao
|
||||
|
||||
|
||||
# From this:
|
||||
![](generate_assets/DallE/DALL%C2%B7E%202022-08-11%2020.28.02%20-%20An%20apocalyptic%20monster%20with%20many%20heads%20that%20is%20destroying%20a%20computer.%20Digital%20art.png)
|
||||
|
||||
# To this:
|
||||
![](generate_assets/DallE/DALL%C2%B7E%202022-08-11%2020.29.56%20-%20A%20tame%20happy%20python%20cuddling%20with%20a%20happy%20scientist%20in%20front%20of%20a%20computer%3B%20Digital%20art.png)
|
182
code_snippets/factory_methods.ipynb
Normal file
|
@ -0,0 +1,182 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-08-12T12:14:28.926189Z",
|
||||
"start_time": "2022-08-12T12:14:28.923089Z"
|
||||
},
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import numpy as np"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-08-12T12:21:00.003395Z",
|
||||
"start_time": "2022-08-12T12:20:59.997851Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"dict_ = {'a': 3.1, 'b': 4.2}\n",
|
||||
"with open('my_class.json', 'w') as f:\n",
|
||||
" json.dump(dict_, f)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-08-12T12:23:03.889266Z",
|
||||
"start_time": "2022-08-12T12:23:03.883050Z"
|
||||
},
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class MyClass:\n",
|
||||
" \n",
|
||||
" def __init__(self, a, b):\n",
|
||||
" \"\"\"The basic constructor takes 'raw' values.\"\"\"\n",
|
||||
" self.a = a\n",
|
||||
" self.b = b\n",
|
||||
" \n",
|
||||
" @classmethod\n",
|
||||
" def from_random_values(cls, random_state=np.random):\n",
|
||||
" \"\"\"Create a MyClass instance with random parameters.\"\"\"\n",
|
||||
" a = random_state.rand()\n",
|
||||
" b = random_state.randn()\n",
|
||||
" return cls(a, b)\n",
|
||||
" \n",
|
||||
" @classmethod\n",
|
||||
" def from_json(cls, json_fname):\n",
|
||||
" \"\"\"Create a MyClass instance with parameters read form a json file.\"\"\"\n",
|
||||
" with open(json_fname, 'r') as f:\n",
|
||||
" dict_ = json.load(f)\n",
|
||||
" a = dict_['a']\n",
|
||||
" b = dict_['b']\n",
|
||||
" return cls(a, b)\n",
|
||||
"\n",
|
||||
"my_class = MyClass.from_random_values()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-08-12T12:23:12.242599Z",
|
||||
"start_time": "2022-08-12T12:23:12.237477Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'a': 0.842940228048758, 'b': 0.2797222990193814}"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"my_class = MyClass.from_random_values()\n",
|
||||
"my_class.__dict__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-08-12T12:23:44.439726Z",
|
||||
"start_time": "2022-08-12T12:23:44.432540Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'a': 3.1, 'b': 4.2}"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"my_class = MyClass.from_json('my_class.json')\n",
|
||||
"my_class.__dict__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"hide_input": false,
|
||||
"kernelspec": {
|
||||
"display_name": "Python [conda env:bog]",
|
||||
"language": "python",
|
||||
"name": "conda-env-bog-py"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.5"
|
||||
},
|
||||
"toc": {
|
||||
"nav_menu": {
|
||||
"height": "12px",
|
||||
"width": "252px"
|
||||
},
|
||||
"navigate_menu": true,
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"threshold": 4,
|
||||
"toc_cell": false,
|
||||
"toc_section_display": "block",
|
||||
"toc_window_display": false
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
287
code_snippets/walker_initializers.ipynb
Normal file
|
@ -0,0 +1,287 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-09-02T11:13:12.648377Z",
|
||||
"start_time": "2022-09-02T11:13:12.165387Z"
|
||||
},
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-09-02T11:14:10.246402Z",
|
||||
"start_time": "2022-09-02T11:14:10.235135Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Walker:\n",
|
||||
" \"\"\" The Walker knows how to walk at random on a context map. \"\"\"\n",
|
||||
"\n",
|
||||
" def __init__(self, sigma_i, sigma_j, size, map_type='flat'):\n",
|
||||
" self.sigma_i = sigma_i\n",
|
||||
" self.sigma_j = sigma_j\n",
|
||||
" self.size = size\n",
|
||||
"\n",
|
||||
" if map_type == 'flat':\n",
|
||||
" context_map = np.ones((size, size))\n",
|
||||
" elif map_type == 'hills':\n",
|
||||
" grid_ii, grid_jj = np.mgrid[0:size, 0:size]\n",
|
||||
" i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)\n",
|
||||
" i_waves /= i_waves.max()\n",
|
||||
" j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \\\n",
|
||||
" np.sin(grid_jj / 10)\n",
|
||||
" j_waves /= j_waves.max()\n",
|
||||
" context_map = j_waves + i_waves\n",
|
||||
" elif map_type == 'labyrinth':\n",
|
||||
" context_map = np.ones((size, size))\n",
|
||||
" context_map[50:100, 50:60] = 0\n",
|
||||
" context_map[20:89, 80:90] = 0\n",
|
||||
" context_map[90:120, 0:10] = 0\n",
|
||||
" context_map[120:size, 30:40] = 0\n",
|
||||
" context_map[180:190, 50:60] = 0\n",
|
||||
"\n",
|
||||
" context_map[50:60, 50:200] = 0\n",
|
||||
" context_map[179:189, 80:130] = 0\n",
|
||||
" context_map[110:120, 0:190] = 0\n",
|
||||
" context_map[120:size, 30:40] = 0\n",
|
||||
" context_map[180:190, 50:60] = 0\n",
|
||||
" context_map /= context_map.sum()\n",
|
||||
" self.context_map = context_map\n",
|
||||
"\n",
|
||||
" # Pre-compute a 2D grid of coordinates for efficiency\n",
|
||||
" self._grid_ii, self._grid_jj = np.mgrid[0:size, 0:size]\n",
|
||||
"\n",
|
||||
"walker = Walker(sigma_i=3, sigma_j=4, size=200, map_type='hills')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-09-02T11:15:44.357965Z",
|
||||
"start_time": "2022-09-02T11:15:44.351558Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Walker:\n",
|
||||
" \"\"\" The Walker knows how to walk at random on a context map. \"\"\"\n",
|
||||
"\n",
|
||||
" def __init__(self, sigma_i, sigma_j, size, context_map):\n",
|
||||
" self.sigma_i = sigma_i\n",
|
||||
" self.sigma_j = sigma_j\n",
|
||||
" self.size = size\n",
|
||||
" self.context_map = context_map\n",
|
||||
" # Pre-compute a 2D grid of coordinates for efficiency\n",
|
||||
" self._grid_ii, self._grid_jj = np.mgrid[0:size, 0:size]\n",
|
||||
"\n",
|
||||
" @classmethod\n",
|
||||
" def from_context_map_type(cls, sigma_i, sigma_j, size, map_type):\n",
|
||||
" \"\"\" Create an instance of Walker with a context map defined by type.\"\"\"\n",
|
||||
" if map_type == 'flat':\n",
|
||||
" context_map = np.ones((size, size))\n",
|
||||
" elif map_type == 'hills':\n",
|
||||
" grid_ii, grid_jj = np.mgrid[0:size, 0:size]\n",
|
||||
" i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)\n",
|
||||
" i_waves /= i_waves.max()\n",
|
||||
" j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) +\\\n",
|
||||
" np.sin(grid_jj / 10)\n",
|
||||
" j_waves /= j_waves.max()\n",
|
||||
" context_map = j_waves + i_waves\n",
|
||||
" elif map_type == 'labyrinth':\n",
|
||||
" context_map = np.ones((size, size))\n",
|
||||
" context_map[50:100, 50:60] = 0\n",
|
||||
" context_map[20:89, 80:90] = 0\n",
|
||||
" context_map[90:120, 0:10] = 0\n",
|
||||
" context_map[120:size, 30:40] = 0\n",
|
||||
" context_map[180:190, 50:60] = 0\n",
|
||||
"\n",
|
||||
" context_map[50:60, 50:200] = 0\n",
|
||||
" context_map[179:189, 80:130] = 0\n",
|
||||
" context_map[110:120, 0:190] = 0\n",
|
||||
" context_map[120:size, 30:40] = 0\n",
|
||||
" context_map[180:190, 50:60] = 0\n",
|
||||
"\n",
|
||||
" context_map /= context_map.sum()\n",
|
||||
" return cls(sigma_i, sigma_j, size, context_map)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-09-02T11:15:49.092194Z",
|
||||
"start_time": "2022-09-02T11:15:49.086575Z"
|
||||
},
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"walker = Walker.from_context_map_type(sigma_i=3, sigma_j=4, size=200, map_type='hills')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-09-02T11:19:37.723607Z",
|
||||
"start_time": "2022-09-02T11:19:37.717518Z"
|
||||
},
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def flat_context_map_builder(size):\n",
|
||||
" \"\"\" A context map where all positions are equally likely. \"\"\"\n",
|
||||
" return np.ones((size, size))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def hills_context_map_builder(size):\n",
|
||||
" \"\"\" A context map with bumps and valleys. \"\"\"\n",
|
||||
" grid_ii, grid_jj = np.mgrid[0:size, 0:size]\n",
|
||||
" i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)\n",
|
||||
" i_waves /= i_waves.max()\n",
|
||||
" j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \\\n",
|
||||
" np.sin(grid_jj / 10)\n",
|
||||
" j_waves /= j_waves.max()\n",
|
||||
" context_map = j_waves + i_waves\n",
|
||||
" return context_map\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def labyrinth_context_map_builder(size):\n",
|
||||
" \"\"\" A context map that looks like a labyrinth. \"\"\"\n",
|
||||
" context_map = np.ones((size, size))\n",
|
||||
" context_map[50:100, 50:60] = 0\n",
|
||||
" context_map[20:89, 80:90] = 0\n",
|
||||
" context_map[90:120, 0:10] = 0\n",
|
||||
" context_map[120:size, 30:40] = 0\n",
|
||||
" context_map[180:190, 50:60] = 0\n",
|
||||
"\n",
|
||||
" context_map[50:60, 50:200] = 0\n",
|
||||
" context_map[179:189, 80:130] = 0\n",
|
||||
" context_map[110:120, 0:190] = 0\n",
|
||||
" context_map[120:size, 30:40] = 0\n",
|
||||
" context_map[180:190, 50:60] = 0\n",
|
||||
"\n",
|
||||
" return context_map"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-09-02T11:20:25.815725Z",
|
||||
"start_time": "2022-09-02T11:20:25.811489Z"
|
||||
},
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Walker:\n",
|
||||
"\n",
|
||||
" def __init__(self, sigma_i, sigma_j, size, context_map):\n",
|
||||
" self.sigma_i = sigma_i\n",
|
||||
" self.sigma_j = sigma_j\n",
|
||||
" self.size = size\n",
|
||||
" self.context_map = context_map\n",
|
||||
" # Pre-compute a 2D grid of coordinates for efficiency\n",
|
||||
" self._grid_ii, self._grid_jj = np.mgrid[0:size, 0:size]\n",
|
||||
"\n",
|
||||
" @classmethod\n",
|
||||
" def from_context_map_builder(cls, sigma_i, sigma_j, size, context_map_builder):\n",
|
||||
" \"\"\"Initialize the context map from an external builder.\n",
|
||||
"\n",
|
||||
" `builder` is a callable that takes a `size` as input parameter\n",
|
||||
" and outputs a `size` x `size` numpy array of positive values.\n",
|
||||
" \"\"\"\n",
|
||||
" context_map = context_map_builder(size)\n",
|
||||
" context_map /= context_map.sum()\n",
|
||||
" return cls(sigma_i, sigma_j, size, context_map)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-09-02T11:20:26.367914Z",
|
||||
"start_time": "2022-09-02T11:20:26.362287Z"
|
||||
},
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"walker = Walker.from_context_map_builder(\n",
|
||||
" sigma_i=3, \n",
|
||||
" sigma_j=4, \n",
|
||||
" size=200, \n",
|
||||
" context_map_builder=hills_context_map_builder,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"a"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"hide_input": false,
|
||||
"kernelspec": {
|
||||
"display_name": "Python [conda env:bog]",
|
||||
"language": "python",
|
||||
"name": "conda-env-bog-py"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.5"
|
||||
},
|
||||
"toc": {
|
||||
"nav_menu": {
|
||||
"height": "12px",
|
||||
"width": "252px"
|
||||
},
|
||||
"navigate_menu": true,
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"threshold": 4,
|
||||
"toc_cell": false,
|
||||
"toc_section_display": "block",
|
||||
"toc_window_display": false
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
223
code_snippets/walker_next_step.ipynb
Normal file
|
@ -0,0 +1,223 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8ad9fe94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Walker:\n",
|
||||
" # ...\n",
|
||||
" \n",
|
||||
" def sample_next_step(self, current_i, current_j, random_state=np.random):\n",
|
||||
" \"\"\" Sample a new position for the walker. \"\"\"\n",
|
||||
" # Combine the next-step proposal with the context map to get a\n",
|
||||
" # next-step probability map\n",
|
||||
" next_step_map = self._next_step_proposal(current_i, current_j)\n",
|
||||
" selection_map = self._compute_next_step_probability(next_step_map)\n",
|
||||
"\n",
|
||||
" # Draw a new position from the next-step probability map\n",
|
||||
" r = random_state.rand()\n",
|
||||
" cumulative_map = np.cumsum(selection_map)\n",
|
||||
" cumulative_map = cumulative_map.reshape(selection_map.shape)\n",
|
||||
" i_next, j_next = np.argwhere(cumulative_map >= r)[0]\n",
|
||||
"\n",
|
||||
" return i_next, j_next\n",
|
||||
"\n",
|
||||
" def _next_step_proposal(self, current_i, current_j):\n",
|
||||
" \"\"\" Create the 2D proposal map for the next step of the walker. \"\"\"\n",
|
||||
" # 2D Gaussian distribution , centered at current position,\n",
|
||||
" # and with different standard deviations for i and j\n",
|
||||
" grid_ii, grid_jj = self._grid_ii, self._grid_jj\n",
|
||||
" sigma_i, sigma_j = self.sigma_i, self.sigma_j\n",
|
||||
"\n",
|
||||
" rad = (\n",
|
||||
" (((grid_ii - current_i) ** 2) / (sigma_i ** 2))\n",
|
||||
" + (((grid_jj - current_j) ** 2) / (sigma_j ** 2))\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)\n",
|
||||
" return p_next_step / p_next_step.sum()\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "95148e45",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Walker:\n",
|
||||
" # ...\n",
|
||||
"\n",
|
||||
" def _next_step_proposal(self, current_i, current_j):\n",
|
||||
" \"\"\" Create the 2D proposal map for the next step of the walker. \"\"\"\n",
|
||||
" raise NotImplementedError(\"`_next_step_proposal` not implemented\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "da34226f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class GaussianWalker(Walker):\n",
|
||||
" # ...\n",
|
||||
"\n",
|
||||
" def _next_step_proposal(self, current_i, current_j):\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
"class RectangularWalker(Walker):\n",
|
||||
" # ...\n",
|
||||
"\n",
|
||||
" def _next_step_proposal(self, current_i, current_j):\n",
|
||||
"\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
"class JumpingWalker(Walker):\n",
|
||||
" # ...\n",
|
||||
"\n",
|
||||
" def _next_step_proposal(self, current_i, current_j):\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" \n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cfd90d16",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Walker:\n",
|
||||
" # ...\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" def _compute_next_step_probability(self, next_step_map):\n",
|
||||
" \"\"\" Compute the next step probability map from next step proposal and\n",
|
||||
" context map. \"\"\"\n",
|
||||
" next_step_probability = next_step_map * self.context_map\n",
|
||||
" next_step_probability /= next_step_probability.sum()\n",
|
||||
" return next_step_probability\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "72041675",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class GaussianWalkerWithProductInteraction(Walker):\n",
|
||||
" def _next_step_proposal(self, current_i, current_j):\n",
|
||||
" # ...\n",
|
||||
" def _compute_next_step_probability(self, next_step_map):\n",
|
||||
" # ...\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"class GaussianWalkerWithSumInteraction(Walker):\n",
|
||||
" def _next_step_proposal(self, current_i, current_j):\n",
|
||||
" # ...\n",
|
||||
" def _compute_next_step_probability(self, next_step_map):\n",
|
||||
" # ...\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"class RectangularWalkerWithProductInteraction(Walker):\n",
|
||||
" def _next_step_proposal(self, current_i, current_j):\n",
|
||||
" # ...\n",
|
||||
" def _compute_next_step_probability(self, next_step_map):\n",
|
||||
" # ...\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"class RectangularWalkerWithSumInteraction(Walker):\n",
|
||||
" def _next_step_proposal(self, current_i, current_j):\n",
|
||||
" # ...\n",
|
||||
" def _compute_next_step_probability(self, next_step_map):\n",
|
||||
" # ...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5ee2e200",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Walker:\n",
|
||||
" def __init__(self, size, context_map, next_step_proposal, next_step_proposal_arguments):\n",
|
||||
" self.next_step_proposal = next_step_proposal\n",
|
||||
" # ...\n",
|
||||
"\n",
|
||||
" \n",
|
||||
" def sample_next_step(self, current_i, current_j, random_state=np.random):\n",
|
||||
" \"\"\" Sample a new position for the walker. \"\"\"\n",
|
||||
" # Combine the next-step proposal with the context map to get a\n",
|
||||
" # next-step probability map\n",
|
||||
" next_step_map = self.next_step_proposal(current_i, current_j, **next_step_proposal_arguments)\n",
|
||||
" selection_map = self._compute_next_step_probability(next_step_map)\n",
|
||||
"\n",
|
||||
" # Draw a new position from the next-step probability map\n",
|
||||
" r = random_state.rand()\n",
|
||||
" cumulative_map = np.cumsum(selection_map)\n",
|
||||
" cumulative_map = cumulative_map.reshape(selection_map.shape)\n",
|
||||
" i_next, j_next = np.argwhere(cumulative_map >= r)[0]\n",
|
||||
"\n",
|
||||
" return i_next, j_next\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
105
code_snippets/walker_with_plotting.ipynb
Normal file
|
@ -0,0 +1,105 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2120045b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Walker:\n",
|
||||
" \"\"\" The Walker knows how to walk at random on a context map. \"\"\"\n",
|
||||
"\n",
|
||||
" def __init__(self, sigma_i, sigma_j, size, map_type='flat'):\n",
|
||||
" # ...\n",
|
||||
"\n",
|
||||
" def plot_trajectory(self, trajectory):\n",
|
||||
" \"\"\" Plot a trajectory over a context map. \"\"\"\n",
|
||||
" trajectory = np.asarray(trajectory)\n",
|
||||
" plt.matshow(self.context_map)\n",
|
||||
" plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
" def plot_trajectory_hexbin(self, trajectory):\n",
|
||||
" \"\"\" Plot an hexagonal density map of a trajectory. \"\"\"\n",
|
||||
" trajectory = np.asarray(trajectory)\n",
|
||||
" with plt.rc_context({'figure.figsize': (4, 4), \n",
|
||||
" 'axes.labelsize': 16, \n",
|
||||
" 'xtick.labelsize': 14, \n",
|
||||
" 'ytick.labelsize': 14}):\n",
|
||||
" plt.hexbin(\n",
|
||||
" trajectory[:, 1], trajectory[:, 0], \n",
|
||||
" gridsize=30, extent=(0, 200, 0, 200), \n",
|
||||
" edgecolors='none', cmap='Reds'\n",
|
||||
" )\n",
|
||||
" plt.gca().invert_yaxis()\n",
|
||||
" plt.xlabel('X')\n",
|
||||
" plt.ylabel('Y')\n",
|
||||
" \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e8b1035c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Walker:\n",
|
||||
" \"\"\" The Walker knows how to walk at random on a context map. \"\"\"\n",
|
||||
"\n",
|
||||
" def __init__(self, sigma_i, sigma_j, size, map_type='flat'):\n",
|
||||
" # ...\n",
|
||||
"\n",
|
||||
" def plot_trajectory(self, trajectory):\n",
|
||||
" \"\"\" Plot a trajectory over a context map. \"\"\"\n",
|
||||
" trajectory = np.asarray(trajectory)\n",
|
||||
" plt.matshow(self.context_map)\n",
|
||||
" plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
" def plot_trajectory_hexbin(self, trajectory):\n",
|
||||
" \"\"\" Plot an hexagonal density map of a trajectory. \"\"\"\n",
|
||||
" trajectory = np.asarray(trajectory)\n",
|
||||
" plt.hexbin(\n",
|
||||
" trajectory[:, 1], trajectory[:, 0], \n",
|
||||
" gridsize=30, extent=(0, 200, 0, 200), \n",
|
||||
" edgecolors='none', cmap='Reds'\n",
|
||||
" )\n",
|
||||
" plt.gca().invert_yaxis()\n",
|
||||
" plt.xlabel('X')\n",
|
||||
" plt.ylabel('Y')\n",
|
||||
" \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
After Width: | Height: | Size: 1.8 MiB |
After Width: | Height: | Size: 1.8 MiB |
After Width: | Height: | Size: 1.7 MiB |
After Width: | Height: | Size: 1.7 MiB |
After Width: | Height: | Size: 1.8 MiB |
After Width: | Height: | Size: 1.8 MiB |
After Width: | Height: | Size: 1.8 MiB |
After Width: | Height: | Size: 1.4 MiB |
After Width: | Height: | Size: 1.5 MiB |
After Width: | Height: | Size: 1.7 MiB |
After Width: | Height: | Size: 1.7 MiB |
After Width: | Height: | Size: 1.5 MiB |
After Width: | Height: | Size: 1.5 MiB |
After Width: | Height: | Size: 1.6 MiB |
After Width: | Height: | Size: 1.5 MiB |
After Width: | Height: | Size: 1.6 MiB |
After Width: | Height: | Size: 1.6 MiB |
1511
generate_assets/explain_the_walker.ipynb
Normal file
BIN
generate_assets/walk1.png
Normal file
After Width: | Height: | Size: 8.3 KiB |
BIN
generate_assets/walk2.png
Normal file
After Width: | Height: | Size: 9.3 KiB |
BIN
generate_assets/walk3.png
Normal file
After Width: | Height: | Size: 8.8 KiB |
BIN
generate_assets/walk4.png
Normal file
After Width: | Height: | Size: 9.4 KiB |
BIN
generate_assets/walk5.png
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
generate_assets/walk6.png
Normal file
After Width: | Height: | Size: 11 KiB |
BIN
generate_assets/walk7.png
Normal file
After Width: | Height: | Size: 93 KiB |
590
notebooks/01a_Classes.ipynb
Normal file
|
@ -0,0 +1,590 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# The smell of classes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## The \"data bundle\" smell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T15:05:51.531289Z",
|
||||
"start_time": "2018-07-27T17:05:51.526519+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def momentum(mass, velocity):\n",
|
||||
" return mass * velocity\n",
|
||||
"\n",
|
||||
"def energy(mass, velocity):\n",
|
||||
" return 0.5 * mass * velocity ** 2\n",
|
||||
"\n",
|
||||
"def update_position(velocity, position, dt):\n",
|
||||
" return position + velocity * dt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T15:05:51.905235Z",
|
||||
"start_time": "2018-07-27T17:05:51.900153+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"9.0\n",
|
||||
"1.2000000000000002\n",
|
||||
"1.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Naive\n",
|
||||
"mass1 = 10.0\n",
|
||||
"velocity1 = 0.9\n",
|
||||
"\n",
|
||||
"mass2 = 12.0\n",
|
||||
"velocity2 = 0.1\n",
|
||||
"\n",
|
||||
"print(momentum(mass1, velocity1))\n",
|
||||
"print(momentum(mass2, velocity2))\n",
|
||||
"print(momentum(mass1, velocity2)) # ??"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"We have two parameters that will be sent to these functions over and over again: `mass` and `velocity`.\n",
|
||||
"\n",
|
||||
"Moreover, the parameters cannot be mixed up (e.g. the velocity of one particle with the mass of another)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T15:05:52.116795Z",
|
||||
"start_time": "2018-07-27T17:05:52.112569+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"9.0\n",
|
||||
"1.2000000000000002\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"masses = [10.0, 12.0]\n",
|
||||
"velocities = [0.9, 0.1]\n",
|
||||
"\n",
|
||||
"print(momentum(masses[0], velocities[0]))\n",
|
||||
"print(momentum(masses[1], velocities[1]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T15:05:52.548364Z",
|
||||
"start_time": "2018-07-27T17:05:52.544726+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"9.0\n",
|
||||
"1.2000000000000002\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"particle1 = {'mass': 10.0, 'velocity': 0.9}\n",
|
||||
"particle2 = {'mass': 12.0, 'velocity': 0.1}\n",
|
||||
"\n",
|
||||
"print(momentum(particle1['mass'], particle1['velocity']))\n",
|
||||
"print(momentum(particle2['mass'], particle2['velocity']))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"All of the functions above can be rewritten as a function of this particle \"instance\", eliminating the bookkeeping for the individual parameters."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T15:05:53.571400Z",
|
||||
"start_time": "2018-07-27T17:05:53.567192+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"9.0\n",
|
||||
"1.2000000000000002\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def momentum(particle):\n",
|
||||
" return particle['mass'] * particle['velocity']\n",
|
||||
"\n",
|
||||
"print(momentum(particle1))\n",
|
||||
"print(momentum(particle2))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"An annoying thing of this solution is that we have to remember the name of the keys in the dictionary, and the solution is sensitive to typos.\n",
|
||||
"\n",
|
||||
"To solve this, we could write a function to build a particle, a.k.a a \"constructor\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T15:06:20.004037Z",
|
||||
"start_time": "2018-07-27T17:06:19.998500+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"9.0\n",
|
||||
"1.2000000000000002\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def init_particle(mass, velocity):\n",
|
||||
" self = {\n",
|
||||
" 'mass': mass,\n",
|
||||
" 'velocity': velocity\n",
|
||||
" }\n",
|
||||
" return self\n",
|
||||
"\n",
|
||||
"particle1 = init_particle(10.0, 0.9)\n",
|
||||
"particle2 = init_particle(12.0, 0.1)\n",
|
||||
"print(momentum(particle1))\n",
|
||||
"print(momentum(particle2))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"`particle1` and `particle2` are called \"instances\" of the particle \"class\".\n",
|
||||
"\n",
|
||||
"Python classes are a way to formalize this pattern: creating a bundle of data that belongs together. E.g. the parameters of an experiment, the results of a simulation, etc."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Introducing classes as a data bundle template"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-08-04T13:04:49.208543Z",
|
||||
"start_time": "2018-08-04T15:04:49.203180+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Particle:\n",
|
||||
" def __init__(self, mass, velocity):\n",
|
||||
" self.mass = mass\n",
|
||||
" self.velocity = velocity\n",
|
||||
"\n",
|
||||
"particle1 = Particle(10.0, 0.9)\n",
|
||||
"particle2 = Particle(12.0, 0.1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T15:07:09.818544Z",
|
||||
"start_time": "2018-07-27T17:07:09.814535+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"0.9"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"particle1.velocity"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T15:07:09.997264Z",
|
||||
"start_time": "2018-07-27T17:07:09.994114+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"12.0"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"particle2.mass"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T15:07:11.559629Z",
|
||||
"start_time": "2018-07-27T17:07:11.555632+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
},
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'mass': 10.0, 'velocity': 0.9}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"particle1.__dict__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Class methods"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T12:13:56.972938Z",
|
||||
"start_time": "2018-07-27T14:13:56.969323+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"9.0\n",
|
||||
"1.2000000000000002\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def momentum(particle):\n",
|
||||
" return particle.mass * particle.velocity\n",
|
||||
"\n",
|
||||
"print(momentum(particle1))\n",
|
||||
"print(momentum(particle2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"9.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"class Particle:\n",
|
||||
" def __init__(self, mass, velocity):\n",
|
||||
" self.mass = mass\n",
|
||||
" self.velocity = velocity\n",
|
||||
"\n",
|
||||
" def momentum(self):\n",
|
||||
" return self.mass * self.velocity\n",
|
||||
"\n",
|
||||
"particle1 = Particle(10.0, 0.9)\n",
|
||||
"print(particle1.momentum())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"We have been using class instances and methods all along..."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'A scanner darkly'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"s = 'A scanner Darkly'\n",
|
||||
"s.capitalize()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'apple', 'banana', 'pineapple'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"x = set(['apple', 'banana', 'apple', 'pineapple'])\n",
|
||||
"x"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'apple', 'banana', 'kiwi', 'pineapple'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"x.union(['banana', 'kiwi'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"hide_input": false,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"toc": {
|
||||
"nav_menu": {
|
||||
"height": "174px",
|
||||
"width": "252px"
|
||||
},
|
||||
"navigate_menu": true,
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"threshold": 4,
|
||||
"toc_cell": false,
|
||||
"toc_position": {
|
||||
"height": "953px",
|
||||
"left": "0px",
|
||||
"right": "1253px",
|
||||
"top": "127px",
|
||||
"width": "320px"
|
||||
},
|
||||
"toc_section_display": "block",
|
||||
"toc_window_display": false
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
279
notebooks/01b_Classes_teacher_edition.ipynb
Normal file
|
@ -0,0 +1,279 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# The smell of classes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## The \"data bundle\" smell"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2021-08-11T12:50:34.342224Z",
|
||||
"start_time": "2021-08-11T14:50:34.336560+02:00"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def momentum(mass, velocity):\n",
|
||||
" return mass * velocity\n",
|
||||
"\n",
|
||||
"def energy(mass, velocity):\n",
|
||||
" return 0.5 * mass * velocity ** 2\n",
|
||||
"\n",
|
||||
"def update_position(velocity, position, dt):\n",
|
||||
" return position + velocity * dt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2021-08-11T12:50:34.776689Z",
|
||||
"start_time": "2021-08-11T14:50:34.769617+02:00"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Naive\n",
|
||||
"mass1 = 10.0\n",
|
||||
"velocity1 = 0.9\n",
|
||||
"\n",
|
||||
"mass2 = 12.0\n",
|
||||
"velocity2 = 0.1\n",
|
||||
"\n",
|
||||
"print(momentum(mass1, velocity1))\n",
|
||||
"print(momentum(mass2, velocity2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We have two parameters that will be sent to these functions over and over again: `mass` and `velocity`.\n",
|
||||
"\n",
|
||||
"Moreover, the parameters cannot be mixed up (e.g. the velocity of one particle with the mass of another)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T15:05:52.548364Z",
|
||||
"start_time": "2018-07-27T17:05:52.544726+02:00"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introducing classes as a data bundle template"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-08-04T13:04:49.208543Z",
|
||||
"start_time": "2018-08-04T15:04:49.203180+02:00"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Particle:\n",
|
||||
" pass\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Class methods"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T12:13:56.972938Z",
|
||||
"start_time": "2018-07-27T14:13:56.969323+02:00"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def momentum(particle):\n",
|
||||
" return particle.mass * particle.velocity\n",
|
||||
"\n",
|
||||
"print(momentum(particle1))\n",
|
||||
"print(momentum(particle2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Particle:\n",
|
||||
" def __init__(self, mass, velocity):\n",
|
||||
" self.mass = mass\n",
|
||||
" self.velocity = velocity\n",
|
||||
"\n",
|
||||
" # Method here\n",
|
||||
"\n",
|
||||
"particle1 = Particle(10.0, 0.9, 0.0)\n",
|
||||
"print(particle1.momentum())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"We have been using class instances and methods all along..."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"s = 'A scanner Darkly'\n",
|
||||
"s.capitalize()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x = set(['apple', 'banana', 'apple', 'pineapple'])\n",
|
||||
"x"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x.union(['banana', 'kiwi'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"hide_input": false,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"toc": {
|
||||
"nav_menu": {
|
||||
"height": "174px",
|
||||
"width": "252px"
|
||||
},
|
||||
"navigate_menu": true,
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"threshold": 4,
|
||||
"toc_cell": false,
|
||||
"toc_position": {
|
||||
"height": "953px",
|
||||
"left": "0px",
|
||||
"right": "1253px",
|
||||
"top": "127px",
|
||||
"width": "320px"
|
||||
},
|
||||
"toc_section_display": "block",
|
||||
"toc_window_display": false
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
455
notebooks/02a_Serialization.ipynb
Normal file
|
@ -0,0 +1,455 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Serialization demo"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import numpy as np\n",
|
||||
"import pickle"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## pickle is simple but can be dangerous"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class SomethingSimple:\n",
|
||||
" def __init__(self, foo, bar):\n",
|
||||
" self.foo = foo\n",
|
||||
" self.bar = bar\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "(__main__.SomethingSimple, 3, 'two')"
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"simple = SomethingSimple(foo=3, bar='two')\n",
|
||||
"\n",
|
||||
"with open('simple.pickle', 'wb') as f:\n",
|
||||
" pickle.dump(simple, f)\n",
|
||||
"\n",
|
||||
"with open('simple.pickle', 'rb') as f:\n",
|
||||
" simple_bis = pickle.load(f)\n",
|
||||
"\n",
|
||||
"type(simple_bis), simple_bis.foo, simple_bis.bar"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class SomethingSimple:\n",
|
||||
" def __init__(self, foo, bla):\n",
|
||||
" self.foo = foo\n",
|
||||
" self.bla = bla"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "{'foo': 3, 'bar': 'two'}"
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with open('simple.pickle', 'rb') as f:\n",
|
||||
" simple_bis = pickle.load(f)\n",
|
||||
"\n",
|
||||
"simple_bis.__dict__"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Even worse when you have a simple class name change"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "AttributeError",
|
||||
"evalue": "Can't get attribute 'SomethingSimple' on <module '__main__'>",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
|
||||
"\u001B[0;31mAttributeError\u001B[0m Traceback (most recent call last)",
|
||||
"\u001B[0;32m<ipython-input-6-56e3756749ad>\u001B[0m in \u001B[0;36m<module>\u001B[0;34m()\u001B[0m\n\u001B[1;32m 8\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 9\u001B[0m \u001B[0;32mwith\u001B[0m \u001B[0mopen\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m'simple.pickle'\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m'rb'\u001B[0m\u001B[0;34m)\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0mf\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 10\u001B[0;31m \u001B[0msimple_bis\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mpickle\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mload\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mf\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 11\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 12\u001B[0m \u001B[0msimple_bis\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__dict__\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
|
||||
"\u001B[0;31mAttributeError\u001B[0m: Can't get attribute 'SomethingSimple' on <module '__main__'>"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Simulate a name change of the SomethingSimple class\n",
|
||||
"del SomethingSimple\n",
|
||||
"\n",
|
||||
"class Simple:\n",
|
||||
" def __init__(self, foo, bar):\n",
|
||||
" self.foo = foo\n",
|
||||
" self.bar = bar\n",
|
||||
"\n",
|
||||
"with open('simple.pickle', 'rb') as f:\n",
|
||||
" simple_bis = pickle.load(f)\n",
|
||||
"\n",
|
||||
"simple_bis.__dict__"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## JSON is still quite simple, and allows you a closer control"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class SomethingNice:\n",
|
||||
"\n",
|
||||
" def __init__(self, foo, bar):\n",
|
||||
" self.foo = foo\n",
|
||||
" self.bar = bar\n",
|
||||
"\n",
|
||||
" @classmethod\n",
|
||||
" def from_json(cls, fname):\n",
|
||||
" with open(fname, 'r') as f:\n",
|
||||
" dump = json.load(f)\n",
|
||||
" return cls(**dump)\n",
|
||||
"\n",
|
||||
" def to_json(self, fname):\n",
|
||||
" with open(fname, 'w') as f:\n",
|
||||
" json.dump(self.__dict__, f)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "{'foo': 3, 'bar': 'two'}"
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"so_nice = SomethingNice(foo=3, bar='two')\n",
|
||||
"so_nice.__dict__\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\"foo\": 3, \"bar\": \"two\"}"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"so_nice.to_json('nice.json')\n",
|
||||
"!cat ./nice.json"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "{'foo': 3, 'bar': 'two'}"
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"so_nice_again = SomethingNice.from_json('nice.json')\n",
|
||||
"so_nice_again.__dict__"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "{'foo': 3, 'bar': array([1.2, 3.4])}"
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"not_so_nice = SomethingNice(foo=3, bar=np.array([1.2, 3.4]))\n",
|
||||
"not_so_nice.__dict__"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "TypeError",
|
||||
"evalue": "Object of type 'ndarray' is not JSON serializable",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
|
||||
"\u001B[0;31mTypeError\u001B[0m Traceback (most recent call last)",
|
||||
"\u001B[0;32m<ipython-input-12-e860ef19277d>\u001B[0m in \u001B[0;36m<module>\u001B[0;34m()\u001B[0m\n\u001B[0;32m----> 1\u001B[0;31m \u001B[0mnot_so_nice\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mto_json\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m'not_so_nice.json'\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m",
|
||||
"\u001B[0;32m<ipython-input-7-c9b082d8b74f>\u001B[0m in \u001B[0;36mto_json\u001B[0;34m(self, fname)\u001B[0m\n\u001B[1;32m 13\u001B[0m \u001B[0;32mdef\u001B[0m \u001B[0mto_json\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mfname\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 14\u001B[0m \u001B[0;32mwith\u001B[0m \u001B[0mopen\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mfname\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m'w'\u001B[0m\u001B[0;34m)\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0mf\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 15\u001B[0;31m \u001B[0mjson\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mdump\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__dict__\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mf\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m",
|
||||
"\u001B[0;32m~/miniconda3/envs/bog/lib/python3.6/json/__init__.py\u001B[0m in \u001B[0;36mdump\u001B[0;34m(obj, fp, skipkeys, ensure_ascii, check_circular, allow_nan, cls, indent, separators, default, sort_keys, **kw)\u001B[0m\n\u001B[1;32m 177\u001B[0m \u001B[0;31m# could accelerate with writelines in some versions of Python, at\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 178\u001B[0m \u001B[0;31m# a debuggability cost\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 179\u001B[0;31m \u001B[0;32mfor\u001B[0m \u001B[0mchunk\u001B[0m \u001B[0;32min\u001B[0m \u001B[0miterable\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 180\u001B[0m \u001B[0mfp\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mwrite\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mchunk\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 181\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n",
|
||||
"\u001B[0;32m~/miniconda3/envs/bog/lib/python3.6/json/encoder.py\u001B[0m in \u001B[0;36m_iterencode\u001B[0;34m(o, _current_indent_level)\u001B[0m\n\u001B[1;32m 428\u001B[0m \u001B[0;32myield\u001B[0m \u001B[0;32mfrom\u001B[0m \u001B[0m_iterencode_list\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mo\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0m_current_indent_level\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 429\u001B[0m \u001B[0;32melif\u001B[0m \u001B[0misinstance\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mo\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mdict\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 430\u001B[0;31m \u001B[0;32myield\u001B[0m \u001B[0;32mfrom\u001B[0m \u001B[0m_iterencode_dict\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mo\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0m_current_indent_level\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 431\u001B[0m \u001B[0;32melse\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 432\u001B[0m \u001B[0;32mif\u001B[0m \u001B[0mmarkers\u001B[0m \u001B[0;32mis\u001B[0m \u001B[0;32mnot\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
|
||||
"\u001B[0;32m~/miniconda3/envs/bog/lib/python3.6/json/encoder.py\u001B[0m in \u001B[0;36m_iterencode_dict\u001B[0;34m(dct, _current_indent_level)\u001B[0m\n\u001B[1;32m 402\u001B[0m \u001B[0;32melse\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 403\u001B[0m \u001B[0mchunks\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0m_iterencode\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mvalue\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0m_current_indent_level\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 404\u001B[0;31m \u001B[0;32myield\u001B[0m \u001B[0;32mfrom\u001B[0m \u001B[0mchunks\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 405\u001B[0m \u001B[0;32mif\u001B[0m \u001B[0mnewline_indent\u001B[0m \u001B[0;32mis\u001B[0m \u001B[0;32mnot\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 406\u001B[0m \u001B[0m_current_indent_level\u001B[0m \u001B[0;34m-=\u001B[0m \u001B[0;36m1\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
|
||||
"\u001B[0;32m~/miniconda3/envs/bog/lib/python3.6/json/encoder.py\u001B[0m in \u001B[0;36m_iterencode\u001B[0;34m(o, _current_indent_level)\u001B[0m\n\u001B[1;32m 435\u001B[0m \u001B[0;32mraise\u001B[0m \u001B[0mValueError\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m\"Circular reference detected\"\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 436\u001B[0m \u001B[0mmarkers\u001B[0m\u001B[0;34m[\u001B[0m\u001B[0mmarkerid\u001B[0m\u001B[0;34m]\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mo\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 437\u001B[0;31m \u001B[0mo\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0m_default\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mo\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 438\u001B[0m \u001B[0;32myield\u001B[0m \u001B[0;32mfrom\u001B[0m \u001B[0m_iterencode\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mo\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0m_current_indent_level\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 439\u001B[0m \u001B[0;32mif\u001B[0m \u001B[0mmarkers\u001B[0m \u001B[0;32mis\u001B[0m \u001B[0;32mnot\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
|
||||
"\u001B[0;32m~/miniconda3/envs/bog/lib/python3.6/json/encoder.py\u001B[0m in \u001B[0;36mdefault\u001B[0;34m(self, o)\u001B[0m\n\u001B[1;32m 178\u001B[0m \"\"\"\n\u001B[1;32m 179\u001B[0m raise TypeError(\"Object of type '%s' is not JSON serializable\" %\n\u001B[0;32m--> 180\u001B[0;31m o.__class__.__name__)\n\u001B[0m\u001B[1;32m 181\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 182\u001B[0m \u001B[0;32mdef\u001B[0m \u001B[0mencode\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mo\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
|
||||
"\u001B[0;31mTypeError\u001B[0m: Object of type 'ndarray' is not JSON serializable"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"not_so_nice.to_json('not_so_nice.json')"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class SomethingWorking:\n",
|
||||
"\n",
|
||||
" def __init__(self, foo, data):\n",
|
||||
" self.foo = foo\n",
|
||||
" self.data = data\n",
|
||||
"\n",
|
||||
" @classmethod\n",
|
||||
" def from_json(cls, fname):\n",
|
||||
" with open(fname, 'r') as f:\n",
|
||||
" dump = json.load(f)\n",
|
||||
" dump['data'] = np.array(dump['data'])\n",
|
||||
" return cls(**dump)\n",
|
||||
"\n",
|
||||
" def to_json(self, fname):\n",
|
||||
" dump = {\n",
|
||||
" 'foo': self.foo,\n",
|
||||
" 'data': self.data.tolist(),\n",
|
||||
" }\n",
|
||||
" with open(fname, 'w') as f:\n",
|
||||
" json.dump(dump, f)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\"foo\": 3, \"data\": [[1, 2], [3, 4]]}"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"not_so_nice = SomethingWorking(foo=3, data=np.array([[1, 2], [3,4 ]]))\n",
|
||||
"not_so_nice.to_json('not_so_nice.json')\n",
|
||||
"!cat not_so_nice.json\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"hide_input": false,
|
||||
"kernelspec": {
|
||||
"display_name": "Python [default]",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.5.5"
|
||||
},
|
||||
"toc": {
|
||||
"nav_menu": {
|
||||
"height": "12px",
|
||||
"width": "252px"
|
||||
},
|
||||
"navigate_menu": true,
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"threshold": 4,
|
||||
"toc_cell": false,
|
||||
"toc_section_display": "block",
|
||||
"toc_window_display": false
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
207
notebooks/02b_Serialization_teacher_edition.ipynb
Normal file
|
@ -0,0 +1,207 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Serialization demo"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import numpy as np\n",
|
||||
"import pickle"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## pickle is simple but can be dangerous"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class SomethingSimple:\n",
|
||||
" def __init__(self, foo, bar):\n",
|
||||
" self.foo = foo\n",
|
||||
" self.bar = bar\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"simple = SomethingSimple(foo=3, bar='two')"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## JSON is still quite simple, and allows you a closer control"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class SomethingNice:\n",
|
||||
"\n",
|
||||
" def __init__(self, foo, bar):\n",
|
||||
" self.foo = foo\n",
|
||||
" self.bar = bar\n",
|
||||
"\n",
|
||||
" @classmethod\n",
|
||||
" def from_json(cls, fname):\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
" def to_json(self, fname):\n",
|
||||
" pass\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "{'foo': 3, 'bar': 'two'}"
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"so_nice = SomethingNice(foo=3, bar='two')\n",
|
||||
"so_nice.__dict__"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"hide_input": false,
|
||||
"kernelspec": {
|
||||
"display_name": "Python [default]",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.5.5"
|
||||
},
|
||||
"toc": {
|
||||
"nav_menu": {
|
||||
"height": "12px",
|
||||
"width": "252px"
|
||||
},
|
||||
"navigate_menu": true,
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"threshold": 4,
|
||||
"toc_cell": false,
|
||||
"toc_section_display": "block",
|
||||
"toc_window_display": false
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
1
notebooks/SomethingNice.json
Normal file
|
@ -0,0 +1 @@
|
|||
{"foo": 3, "bar": "two"}
|
146
notebooks/exercises/particle_update_position.ipynb
Normal file
|
@ -0,0 +1,146 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# Exercise: Add the function `update_position` to the `Particle` class\n",
|
||||
"\n",
|
||||
"- Make the function `update_position` into a method of the class `Particle`\n",
|
||||
"- Where do the position `position` of the particle belong? Modify the class constructor if necessary\n",
|
||||
"- Once it is done, create a particle with mass 2.1 with velocity 0.8 at position 8.2 . Update the position with dt=0.1 and print out the new location."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2018-07-27T15:05:51.531289Z",
|
||||
"start_time": "2018-07-27T17:05:51.526519+02:00"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def update_position(velocity, position, dt):\n",
|
||||
" return position + velocity * dt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"1.6800000000000002\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"class Particle:\n",
|
||||
" def __init__(self, mass, velocity):\n",
|
||||
" self.mass = mass\n",
|
||||
" self.velocity = velocity\n",
|
||||
"\n",
|
||||
" def momentum(self):\n",
|
||||
" return self.mass * self.velocity\n",
|
||||
"\n",
|
||||
"particle = Particle(mass=2.1, velocity=0.8)\n",
|
||||
"print(particle.momentum())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"8.28\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"position = 8.2\n",
|
||||
"new_position = update_position(particle.velocity, position, dt=0.1)\n",
|
||||
"print(new_position)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"hide_input": false,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
},
|
||||
"toc": {
|
||||
"nav_menu": {
|
||||
"height": "174px",
|
||||
"width": "252px"
|
||||
},
|
||||
"navigate_menu": true,
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"threshold": 4,
|
||||
"toc_cell": false,
|
||||
"toc_position": {
|
||||
"height": "953px",
|
||||
"left": "0px",
|
||||
"right": "1253px",
|
||||
"top": "127px",
|
||||
"width": "320px"
|
||||
},
|
||||
"toc_section_display": "block",
|
||||
"toc_window_display": false
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
131
notebooks/exercises/solution/particle_update_position.ipynb
Normal file
|
@ -0,0 +1,131 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# Exercise: Add the function `update_position` to the `Particle` class\n",
|
||||
"\n",
|
||||
"- Make the function `update_position` into a method of the class `Particle`\n",
|
||||
"- Where do the position `position` of the particle belong? Modify the class constructor if necessary\n",
|
||||
"- Once it is done, create a particle with mass 2.1 with velocity 0.8 at position 8.2 . Update the position with dt=0.1 and print out the new location."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"1.6800000000000002\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"class Particle:\n",
|
||||
" def __init__(self, mass, velocity, position):\n",
|
||||
" self.mass = mass\n",
|
||||
" self.velocity = velocity\n",
|
||||
" self.position = position\n",
|
||||
"\n",
|
||||
" def momentum(self):\n",
|
||||
" return self.mass * self.velocity\n",
|
||||
"\n",
|
||||
" def update_position(self, dt):\n",
|
||||
" self.position = self.position + self.velocity * dt\n",
|
||||
"\n",
|
||||
"particle = Particle(mass=2.1, velocity=0.8, position=8.2)\n",
|
||||
"print(particle.momentum())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"8.28\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"particle.update_position(dt=0.1)\n",
|
||||
"print(particle.position)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"hide_input": false,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
},
|
||||
"toc": {
|
||||
"nav_menu": {
|
||||
"height": "174px",
|
||||
"width": "252px"
|
||||
},
|
||||
"navigate_menu": true,
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"threshold": 4,
|
||||
"toc_cell": false,
|
||||
"toc_position": {
|
||||
"height": "953px",
|
||||
"left": "0px",
|
||||
"right": "1253px",
|
||||
"top": "127px",
|
||||
"width": "320px"
|
||||
},
|
||||
"toc_section_display": "block",
|
||||
"toc_window_display": false
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
BIN
notebooks/simple.pickle
Normal file
286
notebooks/walker/Step_0_Introduction/Step_0_Introduction.ipynb
Normal file
1
notebooks/walker/Step_0_Introduction/show
Normal file
|
@ -0,0 +1 @@
|
|||
|
81
notebooks/walker/Step_0_Introduction/walker.py
Normal file
|
@ -0,0 +1,81 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def sample_next_step(current_i, current_j, sigma_i, sigma_j, context_map,
|
||||
random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a next-step
|
||||
# probability map
|
||||
size = context_map.shape[0]
|
||||
next_step_map = next_step_proposal(current_i, current_j, sigma_i, sigma_j,
|
||||
size)
|
||||
next_step_probability = compute_next_step_probability(next_step_map,
|
||||
context_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(next_step_probability)
|
||||
cumulative_map = cumulative_map.reshape(next_step_probability.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
|
||||
def next_step_proposal(current_i, current_j, sigma_i, sigma_j, size):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
|
||||
def compute_next_step_probability(next_step_map, context_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|
||||
|
||||
def create_context_map(size, map_type='flat'):
|
||||
""" Create a fixed context map. """
|
||||
if map_type == 'flat':
|
||||
context_map = np.ones((size, size))
|
||||
elif map_type == 'hills':
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
elif map_type == 'labyrinth':
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
context_map /= context_map.sum()
|
||||
return context_map
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
208
notebooks/walker/Step_1_classes/Step_1_classes_exercise.ipynb
Normal file
1
notebooks/walker/Step_1_classes/exercise
Normal file
|
@ -0,0 +1 @@
|
|||
|
200
notebooks/walker/Step_1_classes/solution/Step_1_classes.ipynb
Normal file
91
notebooks/walker/Step_1_classes/solution/walker.py
Normal file
|
@ -0,0 +1,91 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
class Walker:
|
||||
""" The Walker knows how to walk at random on a context map. """
|
||||
|
||||
def __init__(self, sigma_i, sigma_j, size, map_type='flat'):
|
||||
self.sigma_i = sigma_i
|
||||
self.sigma_j = sigma_j
|
||||
self.size = size
|
||||
|
||||
if map_type == 'flat':
|
||||
context_map = np.ones((size, size))
|
||||
elif map_type == 'hills':
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
elif map_type == 'labyrinth':
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
context_map /= context_map.sum()
|
||||
self.context_map = context_map
|
||||
|
||||
# Pre-compute a 2D grid of coordinates for efficiency
|
||||
self._grid_ii, self._grid_jj = np.mgrid[0:size, 0:size]
|
||||
|
||||
# --- Walker public interface
|
||||
|
||||
def sample_next_step(self, current_i, current_j, random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a
|
||||
# next-step probability map
|
||||
next_step_map = self._next_step_proposal(current_i, current_j)
|
||||
selection_map = self._compute_next_step_probability(next_step_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(selection_map)
|
||||
cumulative_map = cumulative_map.reshape(selection_map.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
# --- Walker non-public interface
|
||||
|
||||
def _next_step_proposal(self, current_i, current_j):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = self._grid_ii, self._grid_jj
|
||||
sigma_i, sigma_j = self.sigma_i, self.sigma_j
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
def _compute_next_step_probability(self, next_step_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * self.context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
81
notebooks/walker/Step_1_classes/walker.py
Normal file
|
@ -0,0 +1,81 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def sample_next_step(current_i, current_j, sigma_i, sigma_j, context_map,
|
||||
random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a next-step
|
||||
# probability map
|
||||
size = context_map.shape[0]
|
||||
next_step_map = next_step_proposal(current_i, current_j, sigma_i, sigma_j,
|
||||
size)
|
||||
next_step_probability = compute_next_step_probability(next_step_map,
|
||||
context_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(next_step_probability)
|
||||
cumulative_map = cumulative_map.reshape(next_step_probability.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
|
||||
def next_step_proposal(current_i, current_j, sigma_i, sigma_j, size):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
|
||||
def compute_next_step_probability(next_step_map, context_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|
||||
|
||||
def create_context_map(size, map_type='flat'):
|
||||
""" Create a fixed context map. """
|
||||
if map_type == 'flat':
|
||||
context_map = np.ones((size, size))
|
||||
elif map_type == 'hills':
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
elif map_type == 'labyrinth':
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
context_map /= context_map.sum()
|
||||
return context_map
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
166
notebooks/walker/Step_2_plotting/Step_2_plotting.ipynb
Normal file
1
notebooks/walker/Step_2_plotting/show
Normal file
|
@ -0,0 +1 @@
|
|||
|
167
notebooks/walker/Step_2_plotting/solution/Step_2_plotting.ipynb
Normal file
22
notebooks/walker/Step_2_plotting/solution/plotting.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_trajectory_hexbin(trajectory):
|
||||
""" Plot an hexagonal density map of a trajectory. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
with plt.rc_context({'figure.figsize': (4, 4), 'axes.labelsize': 16,
|
||||
'xtick.labelsize': 14, 'ytick.labelsize': 14}):
|
||||
plt.hexbin(trajectory[:, 1], trajectory[:, 0], gridsize=30,
|
||||
extent=(0, 200, 0, 200), edgecolors='none', cmap='Reds')
|
||||
plt.gca().invert_yaxis()
|
||||
plt.xlabel('X')
|
||||
plt.ylabel('Y')
|
83
notebooks/walker/Step_2_plotting/solution/walker.py
Normal file
|
@ -0,0 +1,83 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
class Walker:
|
||||
""" The Walker knows how to walk at random on a context map. """
|
||||
|
||||
def __init__(self, sigma_i, sigma_j, size, map_type='flat'):
|
||||
self.sigma_i = sigma_i
|
||||
self.sigma_j = sigma_j
|
||||
self.size = size
|
||||
|
||||
if map_type == 'flat':
|
||||
context_map = np.ones((size, size))
|
||||
elif map_type == 'hills':
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
elif map_type == 'labyrinth':
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
context_map /= context_map.sum()
|
||||
self.context_map = context_map
|
||||
|
||||
# Pre-compute a 2D grid of coordinates for efficiency
|
||||
self._grid_ii, self._grid_jj = np.mgrid[0:size, 0:size]
|
||||
|
||||
# --- Walker public interface
|
||||
|
||||
def sample_next_step(self, current_i, current_j, random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a
|
||||
# next-step probability map
|
||||
next_step_map = self._next_step_proposal(current_i, current_j)
|
||||
selection_map = self._compute_next_step_probability(next_step_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(selection_map)
|
||||
cumulative_map = cumulative_map.reshape(selection_map.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
# --- Walker non-public interface
|
||||
|
||||
def _next_step_proposal(self, current_i, current_j):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = self._grid_ii, self._grid_jj
|
||||
sigma_i, sigma_j = self.sigma_i, self.sigma_j
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
def _compute_next_step_probability(self, next_step_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * self.context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|
101
notebooks/walker/Step_2_plotting/walker.py
Normal file
|
@ -0,0 +1,101 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
class Walker:
|
||||
""" The Walker knows how to walk at random on a context map. """
|
||||
|
||||
def __init__(self, sigma_i, sigma_j, size, map_type='flat'):
|
||||
self.sigma_i = sigma_i
|
||||
self.sigma_j = sigma_j
|
||||
self.size = size
|
||||
|
||||
if map_type == 'flat':
|
||||
context_map = np.ones((size, size))
|
||||
elif map_type == 'hills':
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
elif map_type == 'labyrinth':
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
context_map /= context_map.sum()
|
||||
self.context_map = context_map
|
||||
|
||||
# Pre-compute a 2D grid of coordinates for efficiency
|
||||
self._grid_ii, self._grid_jj = np.mgrid[0:size, 0:size]
|
||||
|
||||
# --- Walker public interface
|
||||
|
||||
def sample_next_step(self, current_i, current_j, random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a
|
||||
# next-step probability map
|
||||
next_step_map = self._next_step_proposal(current_i, current_j)
|
||||
selection_map = self._compute_next_step_probability(next_step_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(selection_map)
|
||||
cumulative_map = cumulative_map.reshape(selection_map.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
def plot_trajectory(self, trajectory):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(self.context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
||||
|
||||
def plot_trajectory_hexbin(self, trajectory):
|
||||
""" Plot an hexagonal density map of a trajectory. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
with plt.rc_context({'figure.figsize': (4, 4), 'axes.labelsize': 16,
|
||||
'xtick.labelsize': 14, 'ytick.labelsize': 14}):
|
||||
plt.hexbin(trajectory[:, 1], trajectory[:, 0], gridsize=30,
|
||||
extent=(0, 200, 0, 200), edgecolors='none', cmap='Reds')
|
||||
plt.gca().invert_yaxis()
|
||||
plt.xlabel('X')
|
||||
plt.ylabel('Y')
|
||||
|
||||
# --- Walker non-public interface
|
||||
|
||||
def _next_step_proposal(self, current_i, current_j):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = self._grid_ii, self._grid_jj
|
||||
sigma_i, sigma_j = self.sigma_i, self.sigma_j
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
def _compute_next_step_probability(self, next_step_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * self.context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
|
@ -0,0 +1,173 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# 1. Take a look at this (working) code\n",
|
||||
"\n",
|
||||
"... and run it. We discussed that the `context_map` varies independently of the walker. Identify the part of the code that will be affected by this change."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2022-08-18T09:50:40.616906Z",
|
||||
"start_time": "2022-08-18T11:50:40.181358+02:00"
|
||||
},
|
||||
"execution": {
|
||||
"iopub.execute_input": "2022-08-20T06:27:54.689Z",
|
||||
"iopub.status.busy": "2022-08-20T06:27:54.685Z",
|
||||
"iopub.status.idle": "2022-08-20T06:27:55.297Z",
|
||||
"shell.execute_reply": "2022-08-20T06:27:55.319Z"
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%matplotlib inline\n",
|
||||
"\n",
|
||||
"from plotting import plot_trajectory, plot_trajectory_hexbin\n",
|
||||
"from walker import Walker\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a Walker instance\n",
|
||||
"walker = Walker(sigma_i=3, sigma_j=4, size=200, map_type='hills')\n",
|
||||
"\n",
|
||||
"# Sample a next step 1000 times\n",
|
||||
"i, j = 100, 50\n",
|
||||
"trajectory = []\n",
|
||||
"for _ in range(1000):\n",
|
||||
" i, j = walker.sample_next_step(i, j)\n",
|
||||
" trajectory.append((i, j))\n",
|
||||
"\n",
|
||||
"plot_trajectory(trajectory, walker.context_map)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# 2. Modify the above code to reflect the idea of a separate context_map module\n",
|
||||
"\n",
|
||||
"1. how would the import statement change as a result of needing a separate context_map module?\n",
|
||||
"2. what input arguments do the context_map functions need to take?\n",
|
||||
"3. how does the initialization of the walker change?\n",
|
||||
" - i.e. instead of \"map_type\"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# 3. (optional) Actually break out the context map initialization\n",
|
||||
"1. Move context map initialization to three functions in a separate `context_map.py` module which all return a `context_map` array\n",
|
||||
"2. Modify the constructor of Walker to take a `context_map` array instead of a `map_type`\n",
|
||||
"3. Modify this notebook to use the new code and see if the code you wrote works!\n",
|
||||
"4. Try to run all the types:\n",
|
||||
" - Run one simulation with a flat context map\n",
|
||||
" - Run one simulation with a hill context map\n",
|
||||
" - Run one simulation with a labyrinth context map"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"hide_input": false,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"nteract": {
|
||||
"version": "0.28.0"
|
||||
},
|
||||
"toc": {
|
||||
"nav_menu": {
|
||||
"height": "12px",
|
||||
"width": "252px"
|
||||
},
|
||||
"navigate_menu": true,
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"threshold": 4,
|
||||
"toc_cell": false,
|
||||
"toc_section_display": "block",
|
||||
"toc_window_display": false
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_trajectory_hexbin(trajectory):
|
||||
""" Plot an hexagonal density map of a trajectory. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
with plt.rc_context({'figure.figsize': (4, 4), 'axes.labelsize': 16,
|
||||
'xtick.labelsize': 14, 'ytick.labelsize': 14}):
|
||||
plt.hexbin(trajectory[:, 1], trajectory[:, 0], gridsize=30,
|
||||
extent=(0, 200, 0, 200), edgecolors='none', cmap='Reds')
|
||||
plt.gca().invert_yaxis()
|
||||
plt.xlabel('X')
|
||||
plt.ylabel('Y')
|
|
@ -0,0 +1,37 @@
|
|||
""" CONTEXT MAP BUILDERS """
|
||||
import numpy as np
|
||||
|
||||
|
||||
def flat_context_map(size):
|
||||
""" A context map where all positions are equally likely. """
|
||||
return np.ones((size, size))
|
||||
|
||||
|
||||
def hills_context_map(size):
|
||||
""" A context map with bumps and valleys. """
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
return context_map
|
||||
|
||||
|
||||
def labyrinth_context_map(size):
|
||||
""" A context map that looks like a labyrinth. """
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
return context_map
|
|
@ -0,0 +1,22 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_trajectory_hexbin(trajectory):
|
||||
""" Plot an hexagonal density map of a trajectory. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
with plt.rc_context({'figure.figsize': (4, 4), 'axes.labelsize': 16,
|
||||
'xtick.labelsize': 14, 'ytick.labelsize': 14}):
|
||||
plt.hexbin(trajectory[:, 1], trajectory[:, 0], gridsize=30,
|
||||
extent=(0, 200, 0, 200), edgecolors='none', cmap='Reds')
|
||||
plt.gca().invert_yaxis()
|
||||
plt.xlabel('X')
|
||||
plt.ylabel('Y')
|
|
@ -0,0 +1,60 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
class Walker:
|
||||
""" The Walker knows how to walk at random on a context map. """
|
||||
|
||||
def __init__(self, sigma_i, sigma_j, context_map):
|
||||
self.sigma_i = sigma_i
|
||||
self.sigma_j = sigma_j
|
||||
self.size = context_map.shape[0]
|
||||
# Make sure that the context map is normalized
|
||||
context_map /= context_map.sum()
|
||||
self.context_map = context_map
|
||||
|
||||
# Pre-compute a 2D grid of coordinates for efficiency
|
||||
self._grid_ii, self._grid_jj = np.mgrid[0:self.size, 0:self.size]
|
||||
|
||||
# --- Walker public interface
|
||||
|
||||
def sample_next_step(self, current_i, current_j, random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a
|
||||
# next-step probability map
|
||||
next_step_map = self._next_step_proposal(current_i, current_j)
|
||||
selection_map = self._compute_next_step_probability(next_step_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(selection_map)
|
||||
cumulative_map = cumulative_map.reshape(selection_map.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
# --- Walker non-public interface
|
||||
|
||||
def _next_step_proposal(self, current_i, current_j):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = self._grid_ii, self._grid_jj
|
||||
sigma_i, sigma_j = self.sigma_i, self.sigma_j
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
def _compute_next_step_probability(self, next_step_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * self.context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
class Walker:
|
||||
""" The Walker knows how to walk at random on a context map. """
|
||||
|
||||
def __init__(self, sigma_i, sigma_j, size, map_type='flat'):
|
||||
self.sigma_i = sigma_i
|
||||
self.sigma_j = sigma_j
|
||||
self.size = size
|
||||
|
||||
if map_type == 'flat':
|
||||
context_map = np.ones((size, size))
|
||||
elif map_type == 'hills':
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
elif map_type == 'labyrinth':
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
context_map /= context_map.sum()
|
||||
self.context_map = context_map
|
||||
|
||||
# Pre-compute a 2D grid of coordinates for efficiency
|
||||
self._grid_ii, self._grid_jj = np.mgrid[0:size, 0:size]
|
||||
|
||||
# --- Walker public interface
|
||||
|
||||
def sample_next_step(self, current_i, current_j, random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a
|
||||
# next-step probability map
|
||||
next_step_map = self._next_step_proposal(current_i, current_j)
|
||||
selection_map = self._compute_next_step_probability(next_step_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(selection_map)
|
||||
cumulative_map = cumulative_map.reshape(selection_map.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
# --- Walker non-public interface
|
||||
|
||||
def _next_step_proposal(self, current_i, current_j):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = self._grid_ii, self._grid_jj
|
||||
sigma_i, sigma_j = self.sigma_i, self.sigma_j
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
def _compute_next_step_probability(self, next_step_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * self.context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
""" CONTEXT MAP BUILDERS """
|
||||
import numpy as np
|
||||
|
||||
|
||||
def flat_context_map(size):
|
||||
""" A context map where all positions are equally likely. """
|
||||
return np.ones((size, size))
|
||||
|
||||
|
||||
def hills_context_map(size):
|
||||
""" A context map with bumps and valleys. """
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
return context_map
|
||||
|
||||
|
||||
def labyrinth_context_map(size):
|
||||
""" A context map that looks like a labyrinth. """
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
return context_map
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_trajectory_hexbin(trajectory):
|
||||
""" Plot an hexagonal density map of a trajectory. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
with plt.rc_context({'figure.figsize': (4, 4), 'axes.labelsize': 16,
|
||||
'xtick.labelsize': 14, 'ytick.labelsize': 14}):
|
||||
plt.hexbin(trajectory[:, 1], trajectory[:, 0], gridsize=30,
|
||||
extent=(0, 200, 0, 200), edgecolors='none', cmap='Reds')
|
||||
plt.gca().invert_yaxis()
|
||||
plt.xlabel('X')
|
||||
plt.ylabel('Y')
|
|
@ -0,0 +1,37 @@
|
|||
""" CONTEXT MAP BUILDERS """
|
||||
import numpy as np
|
||||
|
||||
|
||||
def flat_context_map(size):
|
||||
""" A context map where all positions are equally likely. """
|
||||
return np.ones((size, size))
|
||||
|
||||
|
||||
def hills_context_map(size):
|
||||
""" A context map with bumps and valleys. """
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
return context_map
|
||||
|
||||
|
||||
def labyrinth_context_map(size):
|
||||
""" A context map that looks like a labyrinth. """
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
return context_map
|
|
@ -0,0 +1,25 @@
|
|||
""" Functions to compute next step proposal maps. """
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def gaussian_next_step_proposal(current_i, current_j, size, sigma_i, sigma_j):
|
||||
""" Gaussian next step proposal. """
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
|
||||
def square_next_step_proposal(current_i, current_j, size, width):
|
||||
""" Square next step proposal. """
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
inside_mask = (np.abs(grid_ii - current_i) <= width // 2) & (np.abs(grid_jj - current_j) <= width // 2)
|
||||
p_next_step = inside_mask / inside_mask.sum()
|
||||
return p_next_step
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_trajectory_hexbin(trajectory):
|
||||
""" Plot an hexagonal density map of a trajectory. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
with plt.rc_context({'figure.figsize': (4, 4), 'axes.labelsize': 16,
|
||||
'xtick.labelsize': 14, 'ytick.labelsize': 14}):
|
||||
plt.hexbin(trajectory[:, 1], trajectory[:, 0], gridsize=30,
|
||||
extent=(0, 200, 0, 200), edgecolors='none', cmap='Reds')
|
||||
plt.gca().invert_yaxis()
|
||||
plt.xlabel('X')
|
||||
plt.ylabel('Y')
|
|
@ -0,0 +1,59 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
class Walker:
|
||||
""" The Walker knows how to walk at random on a context map. """
|
||||
|
||||
def __init__(self, context_map, next_step_proposal, next_step_proposal_arguments):
|
||||
self.size = context_map.shape[0]
|
||||
# Make sure that the context map is normalized
|
||||
context_map /= context_map.sum()
|
||||
self.context_map = context_map
|
||||
|
||||
self.next_step_proposal = next_step_proposal
|
||||
self.next_step_proposal_arguments = next_step_proposal_arguments
|
||||
|
||||
# --- Walker public interface
|
||||
|
||||
def sample_next_step(self, current_i, current_j, random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a
|
||||
# next-step probability map
|
||||
next_step_map = self.next_step_proposal(
|
||||
current_i, current_j, self.size, **self.next_step_proposal_arguments)
|
||||
selection_map = self._compute_next_step_probability(next_step_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(selection_map)
|
||||
cumulative_map = cumulative_map.reshape(selection_map.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
# --- Walker non-public interface
|
||||
|
||||
def _next_step_proposal(self, current_i, current_j):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = self._grid_ii, self._grid_jj
|
||||
sigma_i, sigma_j = self.sigma_i, self.sigma_j
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
def _compute_next_step_probability(self, next_step_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * self.context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
class Walker:
|
||||
""" The Walker knows how to walk at random on a context map. """
|
||||
|
||||
def __init__(self, sigma_i, sigma_j, context_map):
|
||||
self.sigma_i = sigma_i
|
||||
self.sigma_j = sigma_j
|
||||
self.size = context_map.shape[0]
|
||||
# Make sure that the context map is normalized
|
||||
context_map /= context_map.sum()
|
||||
self.context_map = context_map
|
||||
|
||||
# Pre-compute a 2D grid of coordinates for efficiency
|
||||
self._grid_ii, self._grid_jj = np.mgrid[0:self.size, 0:self.size]
|
||||
|
||||
# --- Walker public interface
|
||||
|
||||
def sample_next_step(self, current_i, current_j, random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a
|
||||
# next-step probability map
|
||||
next_step_map = self._next_step_proposal(current_i, current_j)
|
||||
selection_map = self._compute_next_step_probability(next_step_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(selection_map)
|
||||
cumulative_map = cumulative_map.reshape(selection_map.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
# --- Walker non-public interface
|
||||
|
||||
def _next_step_proposal(self, current_i, current_j):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = self._grid_ii, self._grid_jj
|
||||
sigma_i, sigma_j = self.sigma_i, self.sigma_j
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
def _compute_next_step_probability(self, next_step_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * self.context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|
39
notebooks/walker/Step_5_reproducibility/context_maps.py
Normal file
|
@ -0,0 +1,39 @@
|
|||
""" CONTEXT MAP BUILDERS """
|
||||
import numpy as np
|
||||
|
||||
|
||||
|
||||
def flat_context_map_builder(size):
|
||||
""" A context map where all positions are equally likely. """
|
||||
return np.ones((size, size))
|
||||
|
||||
|
||||
def hills_context_map_builder(size):
|
||||
""" A context map with bumps and valleys. """
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
return context_map
|
||||
|
||||
|
||||
def labyrinth_context_map_builder(size):
|
||||
""" A context map that looks like a labyrinth. """
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
return context_map
|
||||
|
1
notebooks/walker/Step_5_reproducibility/exercise
Normal file
|
@ -0,0 +1 @@
|
|||
|
22
notebooks/walker/Step_5_reproducibility/plotting.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_trajectory_hexbin(trajectory):
|
||||
""" Plot an hexagonal density map of a trajectory. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
with plt.rc_context({'figure.figsize': (4, 4), 'axes.labelsize': 16,
|
||||
'xtick.labelsize': 14, 'ytick.labelsize': 14}):
|
||||
plt.hexbin(trajectory[:, 1], trajectory[:, 0], gridsize=30,
|
||||
extent=(0, 200, 0, 200), edgecolors='none', cmap='Reds')
|
||||
plt.gca().invert_yaxis()
|
||||
plt.xlabel('X')
|
||||
plt.ylabel('Y')
|
42
notebooks/walker/Step_5_reproducibility/run.py
Normal file
|
@ -0,0 +1,42 @@
|
|||
import json
|
||||
import time
|
||||
|
||||
import git
|
||||
import numpy as np
|
||||
|
||||
import context_maps
|
||||
from walker import Walker
|
||||
|
||||
# Use the following parameters to simulate and save a trajectory of the walker
|
||||
|
||||
seed = 42
|
||||
sigma_i = 3
|
||||
sigma_j = 4
|
||||
size = 200
|
||||
i, j = (50, 100)
|
||||
n_iterations = 1000
|
||||
# USE map_type hills
|
||||
random_state = np.random.RandomState(seed)
|
||||
|
||||
# STEP 1: Create a context map
|
||||
|
||||
|
||||
# STEP 2: Create a Walker
|
||||
|
||||
|
||||
# STEP 3: Simulate the walk
|
||||
|
||||
|
||||
# STEP 4: Save the trajectory
|
||||
curr_time = time.strftime("%Y%m%d-%H%M%S")
|
||||
# save the npy file here!
|
||||
|
||||
# STEP 5: Save the metadata
|
||||
# lookup git repository
|
||||
repo = git.Repo(search_parent_directories=True)
|
||||
sha = repo.head.object.hexsha
|
||||
|
||||
with open('meta.txt', 'w') as f:
|
||||
f.write(f'I estimated parameters at {curr_time}.\n')
|
||||
f.write(f'The git repo was at commit {sha}')
|
||||
# you can add any other information you want here!
|
|
@ -0,0 +1,40 @@
|
|||
""" CONTEXT MAP BUILDERS """
|
||||
import numpy as np
|
||||
|
||||
|
||||
|
||||
def flat_context_map_builder(size):
|
||||
""" A context map where all positions are equally likely. """
|
||||
return np.ones((size, size))
|
||||
|
||||
|
||||
def hills_context_map_builder(size):
|
||||
""" A context map with bumps and valleys. """
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
return context_map
|
||||
|
||||
|
||||
def labyrinth_context_map_builder(size):
|
||||
""" A context map that looks like a labyrinth. """
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
return context_map
|
||||
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
I estimated parameters at 20230628-192022.
|
||||
The git repo was at commit 6a26566a46593a650ebfc86ebdbb28ee78ace079
|
22
notebooks/walker/Step_5_reproducibility/solution/plotting.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_trajectory_hexbin(trajectory):
|
||||
""" Plot an hexagonal density map of a trajectory. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
with plt.rc_context({'figure.figsize': (4, 4), 'axes.labelsize': 16,
|
||||
'xtick.labelsize': 14, 'ytick.labelsize': 14}):
|
||||
plt.hexbin(trajectory[:, 1], trajectory[:, 0], gridsize=30,
|
||||
extent=(0, 200, 0, 200), edgecolors='none', cmap='Reds')
|
||||
plt.gca().invert_yaxis()
|
||||
plt.xlabel('X')
|
||||
plt.ylabel('Y')
|
46
notebooks/walker/Step_5_reproducibility/solution/run.py
Normal file
|
@ -0,0 +1,46 @@
|
|||
import json
|
||||
import time
|
||||
|
||||
import git
|
||||
import numpy as np
|
||||
|
||||
import context_maps
|
||||
from walker import Walker
|
||||
|
||||
# Use the following parameters to simulate and save a trajectory of the walker
|
||||
|
||||
seed = 42
|
||||
sigma_i = 3
|
||||
sigma_j = 4
|
||||
size = 200
|
||||
i, j = (50, 100)
|
||||
n_iterations = 1000
|
||||
# USE map_type hills
|
||||
random_state = np.random.RandomState(seed)
|
||||
|
||||
# STEP 1: Create a context map
|
||||
context_map = context_maps.hills_context_map_builder(size)
|
||||
|
||||
# STEP 2: Create a Walker
|
||||
walker = Walker(sigma_i, sigma_j, context_map)
|
||||
|
||||
# STEP 3: Simulate the walk
|
||||
|
||||
trajectory = []
|
||||
for _ in range(n_iterations):
|
||||
i, j = walker.sample_next_step(i, j, random_state)
|
||||
trajectory.append((i, j))
|
||||
|
||||
# STEP 4: Save the trajectory
|
||||
curr_time = time.strftime("%Y%m%d-%H%M%S")
|
||||
np.save(f"sim_{curr_time}", trajectory)
|
||||
|
||||
|
||||
# STEP 5: Save the metadata
|
||||
# lookup git repository
|
||||
repo = git.Repo(search_parent_directories=True)
|
||||
sha = repo.head.object.hexsha
|
||||
|
||||
with open('meta.txt', 'w') as f:
|
||||
f.write(f'I estimated parameters at {curr_time}.\n')
|
||||
f.write(f'The git repo was at commit {sha}')
|
60
notebooks/walker/Step_5_reproducibility/solution/walker.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
class Walker:
|
||||
""" The Walker knows how to walk at random on a context map. """
|
||||
|
||||
def __init__(self, sigma_i, sigma_j, context_map):
|
||||
self.sigma_i = sigma_i
|
||||
self.sigma_j = sigma_j
|
||||
self.size = context_map.shape[0]
|
||||
# Make sure that the context map is normalized
|
||||
context_map /= context_map.sum()
|
||||
self.context_map = context_map
|
||||
|
||||
# Pre-compute a 2D grid of coordinates for efficiency
|
||||
self._grid_ii, self._grid_jj = np.mgrid[0:self.size, 0:self.size]
|
||||
|
||||
# --- Walker public interface
|
||||
|
||||
def sample_next_step(self, current_i, current_j, random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a
|
||||
# next-step probability map
|
||||
next_step_map = self._next_step_proposal(current_i, current_j)
|
||||
selection_map = self._compute_next_step_probability(next_step_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(selection_map)
|
||||
cumulative_map = cumulative_map.reshape(selection_map.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
# --- Walker non-public interface
|
||||
|
||||
def _next_step_proposal(self, current_i, current_j):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = self._grid_ii, self._grid_jj
|
||||
sigma_i, sigma_j = self.sigma_i, self.sigma_j
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
def _compute_next_step_probability(self, next_step_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * self.context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|
60
notebooks/walker/Step_5_reproducibility/walker.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
class Walker:
|
||||
""" The Walker knows how to walk at random on a context map. """
|
||||
|
||||
def __init__(self, sigma_i, sigma_j, context_map):
|
||||
self.sigma_i = sigma_i
|
||||
self.sigma_j = sigma_j
|
||||
self.size = context_map.shape[0]
|
||||
# Make sure that the context map is normalized
|
||||
context_map /= context_map.sum()
|
||||
self.context_map = context_map
|
||||
|
||||
# Pre-compute a 2D grid of coordinates for efficiency
|
||||
self._grid_ii, self._grid_jj = np.mgrid[0:self.size, 0:self.size]
|
||||
|
||||
# --- Walker public interface
|
||||
|
||||
def sample_next_step(self, current_i, current_j, random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a
|
||||
# next-step probability map
|
||||
next_step_map = self._next_step_proposal(current_i, current_j)
|
||||
selection_map = self._compute_next_step_probability(next_step_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(selection_map)
|
||||
cumulative_map = cumulative_map.reshape(selection_map.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
# --- Walker non-public interface
|
||||
|
||||
def _next_step_proposal(self, current_i, current_j):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = self._grid_ii, self._grid_jj
|
||||
sigma_i, sigma_j = self.sigma_i, self.sigma_j
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
def _compute_next_step_probability(self, next_step_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * self.context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
""" CONTEXT MAP BUILDERS """
|
||||
import numpy as np
|
||||
|
||||
|
||||
|
||||
def flat_context_map_builder(size):
|
||||
""" A context map where all positions are equally likely. """
|
||||
return np.ones((size, size))
|
||||
|
||||
|
||||
def hills_context_map_builder(size):
|
||||
""" A context map with bumps and valleys. """
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
return context_map
|
||||
|
||||
|
||||
def labyrinth_context_map_builder(size):
|
||||
""" A context map that looks like a labyrinth. """
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
return context_map
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_trajectory_hexbin(trajectory):
|
||||
""" Plot an hexagonal density map of a trajectory. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
with plt.rc_context({'figure.figsize': (4, 4), 'axes.labelsize': 16,
|
||||
'xtick.labelsize': 14, 'ytick.labelsize': 14}):
|
||||
plt.hexbin(trajectory[:, 1], trajectory[:, 0], gridsize=30,
|
||||
extent=(0, 200, 0, 200), edgecolors='none', cmap='Reds')
|
||||
plt.gca().invert_yaxis()
|
||||
plt.xlabel('X')
|
||||
plt.ylabel('Y')
|
46
notebooks/walker/Step_6_loading_parameters_from_file/run.py
Normal file
|
@ -0,0 +1,46 @@
|
|||
import json
|
||||
import time
|
||||
|
||||
import git
|
||||
import numpy as np
|
||||
|
||||
import context_maps
|
||||
from walker import Walker
|
||||
|
||||
# Use the following parameters to simulate and save a trajectory of the walker
|
||||
|
||||
seed = 42
|
||||
sigma_i = 3
|
||||
sigma_j = 4
|
||||
size = 200
|
||||
i, j = (50, 100)
|
||||
n_iterations = 1000
|
||||
# USE map_type hills
|
||||
random_state = np.random.RandomState(seed)
|
||||
|
||||
# STEP 1: Create a context map
|
||||
context_map = context_maps.hills_context_map_builder(size)
|
||||
|
||||
# STEP 2: Create a Walker
|
||||
walker = Walker(sigma_i, sigma_j, context_map)
|
||||
|
||||
# STEP 3: Simulate the walk
|
||||
|
||||
trajectory = []
|
||||
for _ in range(n_iterations):
|
||||
i, j = walker.sample_next_step(i, j, random_state)
|
||||
trajectory.append((i, j))
|
||||
|
||||
# STEP 4: Save the trajectory
|
||||
curr_time = time.strftime("%Y%m%d-%H%M%S")
|
||||
np.save(f"sim_{curr_time}", trajectory)
|
||||
|
||||
|
||||
# STEP 5: Save the metadata
|
||||
# lookup git repository
|
||||
repo = git.Repo(search_parent_directories=True)
|
||||
sha = repo.head.object.hexsha
|
||||
|
||||
with open('meta.txt', 'w') as f:
|
||||
f.write(f'I estimated parameters at {curr_time}.\n')
|
||||
f.write(f'The git repo was at commit {sha}')
|
|
@ -0,0 +1,46 @@
|
|||
""" CONTEXT MAP BUILDERS """
|
||||
import numpy as np
|
||||
|
||||
|
||||
|
||||
def flat_context_map_builder(size):
|
||||
""" A context map where all positions are equally likely. """
|
||||
return np.ones((size, size))
|
||||
|
||||
|
||||
def hills_context_map_builder(size):
|
||||
""" A context map with bumps and valleys. """
|
||||
grid_ii, grid_jj = np.mgrid[0:size, 0:size]
|
||||
i_waves = np.sin(grid_ii / 130) + np.sin(grid_ii / 10)
|
||||
i_waves /= i_waves.max()
|
||||
j_waves = np.sin(grid_jj / 100) + np.sin(grid_jj / 50) + \
|
||||
np.sin(grid_jj / 10)
|
||||
j_waves /= j_waves.max()
|
||||
context_map = j_waves + i_waves
|
||||
return context_map
|
||||
|
||||
|
||||
def labyrinth_context_map_builder(size):
|
||||
""" A context map that looks like a labyrinth. """
|
||||
context_map = np.ones((size, size))
|
||||
context_map[50:100, 50:60] = 0
|
||||
context_map[20:89, 80:90] = 0
|
||||
context_map[90:120, 0:10] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
context_map[50:60, 50:200] = 0
|
||||
context_map[179:189, 80:130] = 0
|
||||
context_map[110:120, 0:190] = 0
|
||||
context_map[120:size, 30:40] = 0
|
||||
context_map[180:190, 50:60] = 0
|
||||
|
||||
return context_map
|
||||
|
||||
|
||||
# Register map builders
|
||||
map_builders = {
|
||||
"flat": flat_context_map_builder,
|
||||
"hills": hills_context_map_builder,
|
||||
"labyrinth": labyrinth_context_map_builder,
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
"seed": 42,
|
||||
"sigma_i": 3,
|
||||
"sigma_j": 4,
|
||||
"size": 200,
|
||||
"map_type": "hills",
|
||||
"start_i": 50,
|
||||
"start_j": 100,
|
||||
"n_iterations": 1000
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
def plot_trajectory(trajectory, context_map):
|
||||
""" Plot a trajectory over a context map. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
plt.matshow(context_map)
|
||||
plt.plot(trajectory[:, 1], trajectory[:, 0], color='r')
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_trajectory_hexbin(trajectory):
|
||||
""" Plot an hexagonal density map of a trajectory. """
|
||||
trajectory = np.asarray(trajectory)
|
||||
with plt.rc_context({'figure.figsize': (4, 4), 'axes.labelsize': 16,
|
||||
'xtick.labelsize': 14, 'ytick.labelsize': 14}):
|
||||
plt.hexbin(trajectory[:, 1], trajectory[:, 0], gridsize=30,
|
||||
extent=(0, 200, 0, 200), edgecolors='none', cmap='Reds')
|
||||
plt.gca().invert_yaxis()
|
||||
plt.xlabel('X')
|
||||
plt.ylabel('Y')
|
|
@ -0,0 +1,42 @@
|
|||
import json
|
||||
import time
|
||||
|
||||
import git
|
||||
import numpy as np
|
||||
|
||||
from walker import Walker
|
||||
from context_maps import map_builders
|
||||
|
||||
|
||||
with open("inputs.json", 'r') as f:
|
||||
inputs = json.load(f)
|
||||
|
||||
random_state = np.random.RandomState(inputs["seed"])
|
||||
n_iterations = inputs["n_iterations"]
|
||||
|
||||
|
||||
|
||||
context_map_builder = map_builders[inputs["map_type"]]
|
||||
context_map = context_map_builder(inputs["size"])
|
||||
walker = Walker(inputs["sigma_i"], inputs["sigma_j"], context_map)
|
||||
|
||||
|
||||
trajectory = []
|
||||
for _ in range(n_iterations):
|
||||
i, j = walker.sample_next_step(inputs["start_i"], inputs["start_j"],
|
||||
random_state)
|
||||
trajectory.append((i, j))
|
||||
|
||||
# STEP 4: Save the trajectory
|
||||
curr_time = time.strftime("%Y%m%d-%H%M%S")
|
||||
np.save(f"sim_{curr_time}", trajectory)
|
||||
|
||||
|
||||
# STEP 5: Save the metadata
|
||||
# lookup git repository
|
||||
repo = git.Repo(search_parent_directories=True)
|
||||
sha = repo.head.object.hexsha
|
||||
|
||||
with open('meta.txt', 'w') as f:
|
||||
f.write(f'I estimated parameters at {curr_time}.\n')
|
||||
f.write(f'The git repo was at commit {sha}')
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,60 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
class Walker:
|
||||
""" The Walker knows how to walk at random on a context map. """
|
||||
|
||||
def __init__(self, sigma_i, sigma_j, context_map):
|
||||
self.sigma_i = sigma_i
|
||||
self.sigma_j = sigma_j
|
||||
self.size = context_map.shape[0]
|
||||
# Make sure that the context map is normalized
|
||||
context_map /= context_map.sum()
|
||||
self.context_map = context_map
|
||||
|
||||
# Pre-compute a 2D grid of coordinates for efficiency
|
||||
self._grid_ii, self._grid_jj = np.mgrid[0:self.size, 0:self.size]
|
||||
|
||||
# --- Walker public interface
|
||||
|
||||
def sample_next_step(self, current_i, current_j, random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a
|
||||
# next-step probability map
|
||||
next_step_map = self._next_step_proposal(current_i, current_j)
|
||||
selection_map = self._compute_next_step_probability(next_step_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(selection_map)
|
||||
cumulative_map = cumulative_map.reshape(selection_map.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
# --- Walker non-public interface
|
||||
|
||||
def _next_step_proposal(self, current_i, current_j):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = self._grid_ii, self._grid_jj
|
||||
sigma_i, sigma_j = self.sigma_i, self.sigma_j
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
def _compute_next_step_probability(self, next_step_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * self.context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
class Walker:
|
||||
""" The Walker knows how to walk at random on a context map. """
|
||||
|
||||
def __init__(self, sigma_i, sigma_j, context_map):
|
||||
self.sigma_i = sigma_i
|
||||
self.sigma_j = sigma_j
|
||||
self.size = context_map.shape[0]
|
||||
# Make sure that the context map is normalized
|
||||
context_map /= context_map.sum()
|
||||
self.context_map = context_map
|
||||
|
||||
# Pre-compute a 2D grid of coordinates for efficiency
|
||||
self._grid_ii, self._grid_jj = np.mgrid[0:self.size, 0:self.size]
|
||||
|
||||
# --- Walker public interface
|
||||
|
||||
def sample_next_step(self, current_i, current_j, random_state=np.random):
|
||||
""" Sample a new position for the walker. """
|
||||
|
||||
# Combine the next-step proposal with the context map to get a
|
||||
# next-step probability map
|
||||
next_step_map = self._next_step_proposal(current_i, current_j)
|
||||
selection_map = self._compute_next_step_probability(next_step_map)
|
||||
|
||||
# Draw a new position from the next-step probability map
|
||||
r = random_state.rand()
|
||||
cumulative_map = np.cumsum(selection_map)
|
||||
cumulative_map = cumulative_map.reshape(selection_map.shape)
|
||||
i_next, j_next = np.argwhere(cumulative_map >= r)[0]
|
||||
|
||||
return i_next, j_next
|
||||
|
||||
# --- Walker non-public interface
|
||||
|
||||
def _next_step_proposal(self, current_i, current_j):
|
||||
""" Create the 2D proposal map for the next step of the walker. """
|
||||
|
||||
# 2D Gaussian distribution , centered at current position,
|
||||
# and with different standard deviations for i and j
|
||||
grid_ii, grid_jj = self._grid_ii, self._grid_jj
|
||||
sigma_i, sigma_j = self.sigma_i, self.sigma_j
|
||||
|
||||
rad = (
|
||||
(((grid_ii - current_i) ** 2) / (sigma_i ** 2))
|
||||
+ (((grid_jj - current_j) ** 2) / (sigma_j ** 2))
|
||||
)
|
||||
|
||||
p_next_step = np.exp(-(rad / 2.0)) / (2.0 * np.pi * sigma_i * sigma_j)
|
||||
return p_next_step / p_next_step.sum()
|
||||
|
||||
def _compute_next_step_probability(self, next_step_map):
|
||||
""" Compute the next step probability map from next step proposal and
|
||||
context map. """
|
||||
next_step_probability = next_step_map * self.context_map
|
||||
next_step_probability /= next_step_probability.sum()
|
||||
return next_step_probability
|
||||
|