cleanup-backend #13

Merged
remoll merged 8 commits from cleanup-backend into main 2024-07-27 12:09:54 +00:00
39 changed files with 1987 additions and 2440 deletions

View File

@ -14,13 +14,13 @@ jobs:
steps:
- uses: https://gitea.com/actions/checkout@v4
- name: Login to Docker Registry
uses: docker/login-action@v3
with:
registry: git.kluster.moll.re
username: ${{ gitea.repository_owner }}
password: ${{ secrets.DOCKER_PUSH_TOKEN }}
password: ${{ secrets.PACKAGE_REGISTRY_ACCESS }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@ -29,5 +29,5 @@ jobs:
uses: docker/build-push-action@v5
with:
context: backend
tags: git.kluster.moll.re/remoll/fast_network_navigation/backend:latest
tags: git.kluster.moll.re/anydev/anyway-backend:latest
push: true

View File

@ -44,7 +44,7 @@ jobs:
- name: Add required secrets
run: |
echo ${{ secrets.ANDROID_SECRETS_BASE64 }} | base64 -d > ./android/secrets.properties
echo ${{ secrets.ANDROID_SECRETS_PROPERTIES }} > ./android/secrets.properties
working-directory: ./frontend
- name: Sanity check

View File

@ -1,34 +1,34 @@
on:
pull_request:
branches:
- main
paths:
- frontend/**
# on:
# pull_request:
# branches:
# - main
# paths:
# - frontend/**
name: Build web
# name: Build web
jobs:
build:
name: Build Web
runs-on: ubuntu-latest
steps:
# jobs:
# build:
# name: Build Web
# runs-on: ubuntu-latest
# steps:
- name: Install prerequisites
run: |
sudo apt-get update
sudo apt-get install -y xz-utils
# - name: Install prerequisites
# run: |
# sudo apt-get update
# sudo apt-get install -y xz-utils
- uses: actions/checkout@v4
# - uses: actions/checkout@v4
- uses: https://github.com/subosito/flutter-action@v2
with:
channel: stable
flutter-version: 3.19.6
cache: true
# - uses: https://github.com/subosito/flutter-action@v2
# with:
# channel: stable
# flutter-version: 3.19.6
# cache: true
- run: flutter pub get
working-directory: ./frontend
# - run: flutter pub get
# working-directory: ./frontend
- run: flutter build web
working-directory: ./frontend
# - run: flutter build web
# working-directory: ./frontend

52
.vscode/launch.json vendored Normal file
View File

@ -0,0 +1,52 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
// backend - python using fastapi
{
"name": "Backend - debug",
"type": "debugpy",
"request": "launch",
"module": "uvicorn",
"env": {
"DEBUG": "true"
},
"args": [
"--app-dir",
"src",
"main:app",
"--reload",
],
"jinja": true,
"cwd": "${workspaceFolder}/backend"
},
{
"name": "Backend - tester",
"type": "debugpy",
"request": "launch",
"program": "src/tester.py",
"env": {
"DEBUG": "true"
},
"cwd": "${workspaceFolder}/backend"
},
// frontend - flutter app
{
"name": "Frontend - debug",
"type": "dart",
"request": "launch",
"program": "lib/main.dart",
"cwd": "${workspaceFolder}/frontend"
},
{
"name": "Frontend - profile",
"type": "dart",
"request": "launch",
"program": "lib/main.dart",
"flutterMode": "profile",
"cwd": "${workspaceFolder}/frontend"
}
]
}

View File

@ -1,4 +1,4 @@
FROM python:3
FROM python:3.11-slim
WORKDIR /app
COPY Pipfile Pipfile.lock .
@ -14,4 +14,4 @@ EXPOSE 8000
ENV NUM_WORKERS=1
ENV OSM_CACHE_DIR=/cache
CMD ["pipenv", "run", "fastapi", "run", "src/main.py", '--port 8000', '--workers $NUM_WORKERS']
CMD fastapi run src/main.py --port 8000 --workers $NUM_WORKERS

View File

@ -3,17 +3,14 @@ url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[dev-packages]
[packages]
numpy = "*"
scipy = "*"
fastapi = "*"
pydantic = "*"
shapely = "*"
networkx = "*"
geopy = "*"
requests = ">=2.20.1"
mwparserfromhell = ">=0.5.2"
packaging = "*"
shapely = "*"
scipy = "*"
osmpythontools = "*"
pywikibot = "*"
[dev-packages]

631
backend/Pipfile.lock generated
View File

@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
"sha256": "b089081defe09c4ffad85fd22c5de336c76b31e7f6f67e5969b731bdaa58e528"
"sha256": "f0de801038593d42d8b780d14c2c72bb4f5f5e66df02f72244917ede5d5ebce6"
},
"pipfile-spec": 6,
"requires": {},
@ -30,6 +30,14 @@
"markers": "python_version >= '3.8'",
"version": "==4.4.0"
},
"beautifulsoup4": {
"hashes": [
"sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051",
"sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"
],
"markers": "python_full_version >= '3.6.0'",
"version": "==4.12.3"
},
"certifi": {
"hashes": [
"sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b",
@ -134,102 +142,6 @@
"markers": "python_full_version >= '3.7.0'",
"version": "==3.3.2"
},
"charset-normalizer": {
"hashes": [
"sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027",
"sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087",
"sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786",
"sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8",
"sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09",
"sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185",
"sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574",
"sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e",
"sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519",
"sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898",
"sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269",
"sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3",
"sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f",
"sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6",
"sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8",
"sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a",
"sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73",
"sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc",
"sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714",
"sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2",
"sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc",
"sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce",
"sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d",
"sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e",
"sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6",
"sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269",
"sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96",
"sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d",
"sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a",
"sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4",
"sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77",
"sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d",
"sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0",
"sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed",
"sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068",
"sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac",
"sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25",
"sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8",
"sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab",
"sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26",
"sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2",
"sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db",
"sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f",
"sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5",
"sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99",
"sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c",
"sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d",
"sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811",
"sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa",
"sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a",
"sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03",
"sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b",
"sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04",
"sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c",
"sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001",
"sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458",
"sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389",
"sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99",
"sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985",
"sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537",
"sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238",
"sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f",
"sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d",
"sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796",
"sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a",
"sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143",
"sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8",
"sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c",
"sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5",
"sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5",
"sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711",
"sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4",
"sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6",
"sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c",
"sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7",
"sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4",
"sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b",
"sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae",
"sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12",
"sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c",
"sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae",
"sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8",
"sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887",
"sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b",
"sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4",
"sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f",
"sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5",
"sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33",
"sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519",
"sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"
],
"markers": "python_full_version >= '3.7.0'",
"version": "==3.3.2"
},
"click": {
"hashes": [
"sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
@ -238,6 +150,64 @@
"markers": "python_version >= '3.7'",
"version": "==8.1.7"
},
"contourpy": {
"hashes": [
"sha256:00e5388f71c1a0610e6fe56b5c44ab7ba14165cdd6d695429c5cd94021e390b2",
"sha256:10a37ae557aabf2509c79715cd20b62e4c7c28b8cd62dd7d99e5ed3ce28c3fd9",
"sha256:11959f0ce4a6f7b76ec578576a0b61a28bdc0696194b6347ba3f1c53827178b9",
"sha256:187fa1d4c6acc06adb0fae5544c59898ad781409e61a926ac7e84b8f276dcef4",
"sha256:1a07fc092a4088ee952ddae19a2b2a85757b923217b7eed584fdf25f53a6e7ce",
"sha256:1cac0a8f71a041aa587410424ad46dfa6a11f6149ceb219ce7dd48f6b02b87a7",
"sha256:1d59e739ab0e3520e62a26c60707cc3ab0365d2f8fecea74bfe4de72dc56388f",
"sha256:2855c8b0b55958265e8b5888d6a615ba02883b225f2227461aa9127c578a4922",
"sha256:2e785e0f2ef0d567099b9ff92cbfb958d71c2d5b9259981cd9bee81bd194c9a4",
"sha256:309be79c0a354afff9ff7da4aaed7c3257e77edf6c1b448a779329431ee79d7e",
"sha256:39f3ecaf76cd98e802f094e0d4fbc6dc9c45a8d0c4d185f0f6c2234e14e5f75b",
"sha256:457499c79fa84593f22454bbd27670227874cd2ff5d6c84e60575c8b50a69619",
"sha256:49e70d111fee47284d9dd867c9bb9a7058a3c617274900780c43e38d90fe1205",
"sha256:4c75507d0a55378240f781599c30e7776674dbaf883a46d1c90f37e563453480",
"sha256:4c863140fafc615c14a4bf4efd0f4425c02230eb8ef02784c9a156461e62c965",
"sha256:4d8908b3bee1c889e547867ca4cdc54e5ab6be6d3e078556814a22457f49423c",
"sha256:5b9eb0ca724a241683c9685a484da9d35c872fd42756574a7cfbf58af26677fd",
"sha256:6022cecf8f44e36af10bd9118ca71f371078b4c168b6e0fab43d4a889985dbb5",
"sha256:6150ffa5c767bc6332df27157d95442c379b7dce3a38dff89c0f39b63275696f",
"sha256:62828cada4a2b850dbef89c81f5a33741898b305db244904de418cc957ff05dc",
"sha256:7b4182299f251060996af5249c286bae9361fa8c6a9cda5efc29fe8bfd6062ec",
"sha256:94b34f32646ca0414237168d68a9157cb3889f06b096612afdd296003fdd32fd",
"sha256:9ce6889abac9a42afd07a562c2d6d4b2b7134f83f18571d859b25624a331c90b",
"sha256:9cffe0f850e89d7c0012a1fb8730f75edd4320a0a731ed0c183904fe6ecfc3a9",
"sha256:a12a813949e5066148712a0626895c26b2578874e4cc63160bb007e6df3436fe",
"sha256:a1eea9aecf761c661d096d39ed9026574de8adb2ae1c5bd7b33558af884fb2ce",
"sha256:a31f94983fecbac95e58388210427d68cd30fe8a36927980fab9c20062645609",
"sha256:ac58bdee53cbeba2ecad824fa8159493f0bf3b8ea4e93feb06c9a465d6c87da8",
"sha256:af3f4485884750dddd9c25cb7e3915d83c2db92488b38ccb77dd594eac84c4a0",
"sha256:b33d2bc4f69caedcd0a275329eb2198f560b325605810895627be5d4b876bf7f",
"sha256:b59c0ffceff8d4d3996a45f2bb6f4c207f94684a96bf3d9728dbb77428dd8cb8",
"sha256:bb6834cbd983b19f06908b45bfc2dad6ac9479ae04abe923a275b5f48f1a186b",
"sha256:bd3db01f59fdcbce5b22afad19e390260d6d0222f35a1023d9adc5690a889364",
"sha256:bd7c23df857d488f418439686d3b10ae2fbf9bc256cd045b37a8c16575ea1040",
"sha256:c2528d60e398c7c4c799d56f907664673a807635b857df18f7ae64d3e6ce2d9f",
"sha256:d31a63bc6e6d87f77d71e1abbd7387ab817a66733734883d1fc0021ed9bfa083",
"sha256:d4492d82b3bc7fbb7e3610747b159869468079fe149ec5c4d771fa1f614a14df",
"sha256:ddcb8581510311e13421b1f544403c16e901c4e8f09083c881fab2be80ee31ba",
"sha256:e1d59258c3c67c865435d8fbeb35f8c59b8bef3d6f46c1f29f6123556af28445",
"sha256:eb3315a8a236ee19b6df481fc5f997436e8ade24a9f03dfdc6bd490fea20c6da",
"sha256:ef2b055471c0eb466033760a521efb9d8a32b99ab907fc8358481a1dd29e3bd3",
"sha256:ef5adb9a3b1d0c645ff694f9bca7702ec2c70f4d734f9922ea34de02294fdf72",
"sha256:f32c38afb74bd98ce26de7cc74a67b40afb7b05aae7b42924ea990d51e4dac02",
"sha256:fe0ccca550bb8e5abc22f530ec0466136379c01321fd94f30a22231e8a48d985"
],
"markers": "python_version >= '3.9'",
"version": "==1.2.1"
},
"cycler": {
"hashes": [
"sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30",
"sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"
],
"markers": "python_version >= '3.8'",
"version": "==0.12.1"
},
"dnspython": {
"hashes": [
"sha256:5ef3b9680161f6fa89daf8ad451b5f1a33b18ae8a1c6778cdf4b43f08c0a6e50",
@ -254,14 +224,6 @@
"markers": "python_version >= '3.8'",
"version": "==2.2.0"
},
"exceptiongroup": {
"hashes": [
"sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b",
"sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"
],
"markers": "python_version < '3.11'",
"version": "==1.2.2"
},
"fastapi": {
"hashes": [
"sha256:4f51cfa25d72f9fbc3280832e84b32494cf186f50158d364a8765aabf22587bf",
@ -279,7 +241,7 @@
"markers": "python_version >= '3.8'",
"version": "==0.0.4"
},
"geopandas": {
"fonttools": {
"hashes": [
"sha256:02569e9a810f9d11f4ae82c391ebc6fb5730d95a0657d24d754ed7763fb2d122",
"sha256:0679a30b59d74b6242909945429dbddb08496935b82f91ea9bf6ad240ec23397",
@ -433,6 +395,264 @@
"markers": "python_version >= '3.7'",
"version": "==3.1.4"
},
"kiwisolver": {
"hashes": [
"sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf",
"sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e",
"sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af",
"sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f",
"sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046",
"sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3",
"sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5",
"sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71",
"sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee",
"sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3",
"sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9",
"sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b",
"sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985",
"sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea",
"sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16",
"sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89",
"sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c",
"sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9",
"sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712",
"sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342",
"sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a",
"sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958",
"sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d",
"sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a",
"sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130",
"sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff",
"sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898",
"sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b",
"sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f",
"sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265",
"sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93",
"sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929",
"sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635",
"sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709",
"sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b",
"sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb",
"sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a",
"sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920",
"sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e",
"sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544",
"sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45",
"sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390",
"sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77",
"sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355",
"sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff",
"sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4",
"sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7",
"sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20",
"sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c",
"sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162",
"sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228",
"sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437",
"sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc",
"sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a",
"sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901",
"sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4",
"sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770",
"sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525",
"sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad",
"sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a",
"sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29",
"sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90",
"sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250",
"sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d",
"sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3",
"sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54",
"sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f",
"sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1",
"sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da",
"sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238",
"sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa",
"sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523",
"sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0",
"sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205",
"sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3",
"sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4",
"sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac",
"sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9",
"sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb",
"sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced",
"sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd",
"sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0",
"sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da",
"sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18",
"sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9",
"sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276",
"sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333",
"sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b",
"sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db",
"sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126",
"sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9",
"sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09",
"sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0",
"sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec",
"sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7",
"sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff",
"sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9",
"sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192",
"sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8",
"sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d",
"sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6",
"sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797",
"sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892",
"sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"
],
"markers": "python_version >= '3.7'",
"version": "==1.4.5"
},
"lxml": {
"hashes": [
"sha256:02437fb7308386867c8b7b0e5bc4cd4b04548b1c5d089ffb8e7b31009b961dc3",
"sha256:02f6a8eb6512fdc2fd4ca10a49c341c4e109aa6e9448cc4859af5b949622715a",
"sha256:05f8757b03208c3f50097761be2dea0aba02e94f0dc7023ed73a7bb14ff11eb0",
"sha256:06668e39e1f3c065349c51ac27ae430719d7806c026fec462e5693b08b95696b",
"sha256:07542787f86112d46d07d4f3c4e7c760282011b354d012dc4141cc12a68cef5f",
"sha256:08ea0f606808354eb8f2dfaac095963cb25d9d28e27edcc375d7b30ab01abbf6",
"sha256:0969e92af09c5687d769731e3f39ed62427cc72176cebb54b7a9d52cc4fa3b73",
"sha256:0a028b61a2e357ace98b1615fc03f76eb517cc028993964fe08ad514b1e8892d",
"sha256:0b3f5016e00ae7630a4b83d0868fca1e3d494c78a75b1c7252606a3a1c5fc2ad",
"sha256:13e69be35391ce72712184f69000cda04fc89689429179bc4c0ae5f0b7a8c21b",
"sha256:16a8326e51fcdffc886294c1e70b11ddccec836516a343f9ed0f82aac043c24a",
"sha256:19b4e485cd07b7d83e3fe3b72132e7df70bfac22b14fe4bf7a23822c3a35bff5",
"sha256:1a2569a1f15ae6c8c64108a2cd2b4a858fc1e13d25846be0666fc144715e32ab",
"sha256:1a7aca7964ac4bb07680d5c9d63b9d7028cace3e2d43175cb50bba8c5ad33316",
"sha256:1b590b39ef90c6b22ec0be925b211298e810b4856909c8ca60d27ffbca6c12e6",
"sha256:1d8a701774dfc42a2f0b8ccdfe7dbc140500d1049e0632a611985d943fcf12df",
"sha256:1e275ea572389e41e8b039ac076a46cb87ee6b8542df3fff26f5baab43713bca",
"sha256:2304d3c93f2258ccf2cf7a6ba8c761d76ef84948d87bf9664e14d203da2cd264",
"sha256:23441e2b5339bc54dc949e9e675fa35efe858108404ef9aa92f0456929ef6fe8",
"sha256:23cfafd56887eaed93d07bc4547abd5e09d837a002b791e9767765492a75883f",
"sha256:28bf95177400066596cdbcfc933312493799382879da504633d16cf60bba735b",
"sha256:2eb2227ce1ff998faf0cd7fe85bbf086aa41dfc5af3b1d80867ecfe75fb68df3",
"sha256:2fb0ba3e8566548d6c8e7dd82a8229ff47bd8fb8c2da237607ac8e5a1b8312e5",
"sha256:303f540ad2dddd35b92415b74b900c749ec2010e703ab3bfd6660979d01fd4ed",
"sha256:339ee4a4704bc724757cd5dd9dc8cf4d00980f5d3e6e06d5847c1b594ace68ab",
"sha256:33ce9e786753743159799fdf8e92a5da351158c4bfb6f2db0bf31e7892a1feb5",
"sha256:343ab62e9ca78094f2306aefed67dcfad61c4683f87eee48ff2fd74902447726",
"sha256:34e17913c431f5ae01d8658dbf792fdc457073dcdfbb31dc0cc6ab256e664a8d",
"sha256:364d03207f3e603922d0d3932ef363d55bbf48e3647395765f9bfcbdf6d23632",
"sha256:38b67afb0a06b8575948641c1d6d68e41b83a3abeae2ca9eed2ac59892b36706",
"sha256:3a745cc98d504d5bd2c19b10c79c61c7c3df9222629f1b6210c0368177589fb8",
"sha256:3b019d4ee84b683342af793b56bb35034bd749e4cbdd3d33f7d1107790f8c472",
"sha256:3b6a30a9ab040b3f545b697cb3adbf3696c05a3a68aad172e3fd7ca73ab3c835",
"sha256:3d1e35572a56941b32c239774d7e9ad724074d37f90c7a7d499ab98761bd80cf",
"sha256:3d98de734abee23e61f6b8c2e08a88453ada7d6486dc7cdc82922a03968928db",
"sha256:453d037e09a5176d92ec0fd282e934ed26d806331a8b70ab431a81e2fbabf56d",
"sha256:45f9494613160d0405682f9eee781c7e6d1bf45f819654eb249f8f46a2c22545",
"sha256:4820c02195d6dfb7b8508ff276752f6b2ff8b64ae5d13ebe02e7667e035000b9",
"sha256:49095a38eb333aaf44c06052fd2ec3b8f23e19747ca7ec6f6c954ffea6dbf7be",
"sha256:4aefd911793b5d2d7a921233a54c90329bf3d4a6817dc465f12ffdfe4fc7b8fe",
"sha256:4bc6cb140a7a0ad1f7bc37e018d0ed690b7b6520ade518285dc3171f7a117905",
"sha256:4c30a2f83677876465f44c018830f608fa3c6a8a466eb223535035fbc16f3438",
"sha256:50127c186f191b8917ea2fb8b206fbebe87fd414a6084d15568c27d0a21d60db",
"sha256:50ccb5d355961c0f12f6cf24b7187dbabd5433f29e15147a67995474f27d1776",
"sha256:519895c99c815a1a24a926d5b60627ce5ea48e9f639a5cd328bda0515ea0f10c",
"sha256:54401c77a63cc7d6dc4b4e173bb484f28a5607f3df71484709fe037c92d4f0ed",
"sha256:546cf886f6242dff9ec206331209db9c8e1643ae642dea5fdbecae2453cb50fd",
"sha256:55ce6b6d803890bd3cc89975fca9de1dff39729b43b73cb15ddd933b8bc20484",
"sha256:56793b7a1a091a7c286b5f4aa1fe4ae5d1446fe742d00cdf2ffb1077865db10d",
"sha256:57f0a0bbc9868e10ebe874e9f129d2917750adf008fe7b9c1598c0fbbfdde6a6",
"sha256:5b8c041b6265e08eac8a724b74b655404070b636a8dd6d7a13c3adc07882ef30",
"sha256:5e097646944b66207023bc3c634827de858aebc226d5d4d6d16f0b77566ea182",
"sha256:60499fe961b21264e17a471ec296dcbf4365fbea611bf9e303ab69db7159ce61",
"sha256:610b5c77428a50269f38a534057444c249976433f40f53e3b47e68349cca1425",
"sha256:625e3ef310e7fa3a761d48ca7ea1f9d8718a32b1542e727d584d82f4453d5eeb",
"sha256:657a972f46bbefdbba2d4f14413c0d079f9ae243bd68193cb5061b9732fa54c1",
"sha256:69ab77a1373f1e7563e0fb5a29a8440367dec051da6c7405333699d07444f511",
"sha256:6a520b4f9974b0a0a6ed73c2154de57cdfd0c8800f4f15ab2b73238ffed0b36e",
"sha256:6d68ce8e7b2075390e8ac1e1d3a99e8b6372c694bbe612632606d1d546794207",
"sha256:6dcc3d17eac1df7859ae01202e9bb11ffa8c98949dcbeb1069c8b9a75917e01b",
"sha256:6dfdc2bfe69e9adf0df4915949c22a25b39d175d599bf98e7ddf620a13678585",
"sha256:739e36ef7412b2bd940f75b278749106e6d025e40027c0b94a17ef7968d55d56",
"sha256:7429e7faa1a60cad26ae4227f4dd0459efde239e494c7312624ce228e04f6391",
"sha256:74da9f97daec6928567b48c90ea2c82a106b2d500f397eeb8941e47d30b1ca85",
"sha256:74e4f025ef3db1c6da4460dd27c118d8cd136d0391da4e387a15e48e5c975147",
"sha256:75a9632f1d4f698b2e6e2e1ada40e71f369b15d69baddb8968dcc8e683839b18",
"sha256:76acba4c66c47d27c8365e7c10b3d8016a7da83d3191d053a58382311a8bf4e1",
"sha256:79d1fb9252e7e2cfe4de6e9a6610c7cbb99b9708e2c3e29057f487de5a9eaefa",
"sha256:7ce7ad8abebe737ad6143d9d3bf94b88b93365ea30a5b81f6877ec9c0dee0a48",
"sha256:7ed07b3062b055d7a7f9d6557a251cc655eed0b3152b76de619516621c56f5d3",
"sha256:7ff762670cada8e05b32bf1e4dc50b140790909caa8303cfddc4d702b71ea184",
"sha256:8268cbcd48c5375f46e000adb1390572c98879eb4f77910c6053d25cc3ac2c67",
"sha256:875a3f90d7eb5c5d77e529080d95140eacb3c6d13ad5b616ee8095447b1d22e7",
"sha256:89feb82ca055af0fe797a2323ec9043b26bc371365847dbe83c7fd2e2f181c34",
"sha256:8a7e24cb69ee5f32e003f50e016d5fde438010c1022c96738b04fc2423e61706",
"sha256:8ab6a358d1286498d80fe67bd3d69fcbc7d1359b45b41e74c4a26964ca99c3f8",
"sha256:8b8df03a9e995b6211dafa63b32f9d405881518ff1ddd775db4e7b98fb545e1c",
"sha256:8cf85a6e40ff1f37fe0f25719aadf443686b1ac7652593dc53c7ef9b8492b115",
"sha256:8e8d351ff44c1638cb6e980623d517abd9f580d2e53bfcd18d8941c052a5a009",
"sha256:9164361769b6ca7769079f4d426a41df6164879f7f3568be9086e15baca61466",
"sha256:96e85aa09274955bb6bd483eaf5b12abadade01010478154b0ec70284c1b1526",
"sha256:981a06a3076997adf7c743dcd0d7a0415582661e2517c7d961493572e909aa1d",
"sha256:9cd5323344d8ebb9fb5e96da5de5ad4ebab993bbf51674259dbe9d7a18049525",
"sha256:9d6c6ea6a11ca0ff9cd0390b885984ed31157c168565702959c25e2191674a14",
"sha256:a02d3c48f9bb1e10c7788d92c0c7db6f2002d024ab6e74d6f45ae33e3d0288a3",
"sha256:a233bb68625a85126ac9f1fc66d24337d6e8a0f9207b688eec2e7c880f012ec0",
"sha256:a2f6a1bc2460e643785a2cde17293bd7a8f990884b822f7bca47bee0a82fc66b",
"sha256:a6d17e0370d2516d5bb9062c7b4cb731cff921fc875644c3d751ad857ba9c5b1",
"sha256:a6d2092797b388342c1bc932077ad232f914351932353e2e8706851c870bca1f",
"sha256:ab67ed772c584b7ef2379797bf14b82df9aa5f7438c5b9a09624dd834c1c1aaf",
"sha256:ac6540c9fff6e3813d29d0403ee7a81897f1d8ecc09a8ff84d2eea70ede1cdbf",
"sha256:ae4073a60ab98529ab8a72ebf429f2a8cc612619a8c04e08bed27450d52103c0",
"sha256:ae791f6bd43305aade8c0e22f816b34f3b72b6c820477aab4d18473a37e8090b",
"sha256:aef5474d913d3b05e613906ba4090433c515e13ea49c837aca18bde190853dff",
"sha256:b0b3f2df149efb242cee2ffdeb6674b7f30d23c9a7af26595099afaf46ef4e88",
"sha256:b128092c927eaf485928cec0c28f6b8bead277e28acf56800e972aa2c2abd7a2",
"sha256:b16db2770517b8799c79aa80f4053cd6f8b716f21f8aca962725a9565ce3ee40",
"sha256:b336b0416828022bfd5a2e3083e7f5ba54b96242159f83c7e3eebaec752f1716",
"sha256:b47633251727c8fe279f34025844b3b3a3e40cd1b198356d003aa146258d13a2",
"sha256:b537bd04d7ccd7c6350cdaaaad911f6312cbd61e6e6045542f781c7f8b2e99d2",
"sha256:b5e4ef22ff25bfd4ede5f8fb30f7b24446345f3e79d9b7455aef2836437bc38a",
"sha256:b74b9ea10063efb77a965a8d5f4182806fbf59ed068b3c3fd6f30d2ac7bee734",
"sha256:bb2dc4898180bea79863d5487e5f9c7c34297414bad54bcd0f0852aee9cfdb87",
"sha256:bbc4b80af581e18568ff07f6395c02114d05f4865c2812a1f02f2eaecf0bfd48",
"sha256:bcc98f911f10278d1daf14b87d65325851a1d29153caaf146877ec37031d5f36",
"sha256:be49ad33819d7dcc28a309b86d4ed98e1a65f3075c6acd3cd4fe32103235222b",
"sha256:bec4bd9133420c5c52d562469c754f27c5c9e36ee06abc169612c959bd7dbb07",
"sha256:c2faf60c583af0d135e853c86ac2735ce178f0e338a3c7f9ae8f622fd2eb788c",
"sha256:c689d0d5381f56de7bd6966a4541bff6e08bf8d3871bbd89a0c6ab18aa699573",
"sha256:c7079d5eb1c1315a858bbf180000757db8ad904a89476653232db835c3114001",
"sha256:cb3942960f0beb9f46e2a71a3aca220d1ca32feb5a398656be934320804c0df9",
"sha256:cd9e78285da6c9ba2d5c769628f43ef66d96ac3085e59b10ad4f3707980710d3",
"sha256:cf2a978c795b54c539f47964ec05e35c05bd045db5ca1e8366988c7f2fe6b3ce",
"sha256:d14a0d029a4e176795cef99c056d58067c06195e0c7e2dbb293bf95c08f772a3",
"sha256:d237ba6664b8e60fd90b8549a149a74fcc675272e0e95539a00522e4ca688b04",
"sha256:d26a618ae1766279f2660aca0081b2220aca6bd1aa06b2cf73f07383faf48927",
"sha256:d28cb356f119a437cc58a13f8135ab8a4c8ece18159eb9194b0d269ec4e28083",
"sha256:d4ed0c7cbecde7194cd3228c044e86bf73e30a23505af852857c09c24e77ec5d",
"sha256:d83e2d94b69bf31ead2fa45f0acdef0757fa0458a129734f59f67f3d2eb7ef32",
"sha256:d8bbcd21769594dbba9c37d3c819e2d5847656ca99c747ddb31ac1701d0c0ed9",
"sha256:d9b342c76003c6b9336a80efcc766748a333573abf9350f4094ee46b006ec18f",
"sha256:dc911208b18842a3a57266d8e51fc3cfaccee90a5351b92079beed912a7914c2",
"sha256:dfa7c241073d8f2b8e8dbc7803c434f57dbb83ae2a3d7892dd068d99e96efe2c",
"sha256:e282aedd63c639c07c3857097fc0e236f984ceb4089a8b284da1c526491e3f3d",
"sha256:e290d79a4107d7d794634ce3e985b9ae4f920380a813717adf61804904dc4393",
"sha256:e3d9d13603410b72787579769469af730c38f2f25505573a5888a94b62b920f8",
"sha256:e481bba1e11ba585fb06db666bfc23dbe181dbafc7b25776156120bf12e0d5a6",
"sha256:e49b052b768bb74f58c7dda4e0bdf7b79d43a9204ca584ffe1fb48a6f3c84c66",
"sha256:eb00b549b13bd6d884c863554566095bf6fa9c3cecb2e7b399c4bc7904cb33b5",
"sha256:ec87c44f619380878bd49ca109669c9f221d9ae6883a5bcb3616785fa8f94c97",
"sha256:edcfa83e03370032a489430215c1e7783128808fd3e2e0a3225deee278585196",
"sha256:f11ae142f3a322d44513de1018b50f474f8f736bc3cd91d969f464b5bfef8836",
"sha256:f2a09f6184f17a80897172863a655467da2b11151ec98ba8d7af89f17bf63dae",
"sha256:f5b65529bb2f21ac7861a0e94fdbf5dc0daab41497d18223b46ee8515e5ad297",
"sha256:f60fdd125d85bf9c279ffb8e94c78c51b3b6a37711464e1f5f31078b45002421",
"sha256:f61efaf4bed1cc0860e567d2ecb2363974d414f7f1f124b1df368bbf183453a6",
"sha256:f90e552ecbad426eab352e7b2933091f2be77115bb16f09f78404861c8322981",
"sha256:f956196ef61369f1685d14dad80611488d8dc1ef00be57c0c5a03064005b0f30",
"sha256:fb91819461b1b56d06fa4bcf86617fac795f6a99d12239fb0c68dbeba41a0a30",
"sha256:fbc9d316552f9ef7bba39f4edfad4a734d3d6f93341232a9dddadec4f15d425f",
"sha256:ff69a9a0b4b17d78170c73abe2ab12084bdf1691550c5629ad1fe7849433f324",
"sha256:ffb2be176fed4457e445fe540617f0252a72a8bc56208fd65a690fdb1f57660b"
],
"markers": "python_version >= '3.6'",
"version": "==5.2.2"
},
"markdown-it-py": {
"hashes": [
"sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1",
@ -579,86 +799,73 @@
"sha256:fd05481adc0806f4b8f8f8cb309ec56924b17ce386cb1c2f73919d8a012e6b16",
"sha256:fff66e97f7c02aa0fd57ff8f702977a9c5a1d72ef55b64ee9b146291e4c41057"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==0.6.6"
},
"networkx": {
"hashes": [
"sha256:0c127d8b2f4865f59ae9cb8aafcd60b5c70f3241ebd66f7defad7c4ab90126c9",
"sha256:28575580c6ebdaf4505b22c6256a2b9de86b316dc63ba9e93abde3d78dfdbcf2"
],
"index": "pypi",
"markers": "python_version >= '3.10'",
"version": "==3.3"
},
"numpy": {
"hashes": [
"sha256:04494f6ec467ccb5369d1808570ae55f6ed9b5809d7f035059000a37b8d7e86f",
"sha256:0a43f0974d501842866cc83471bdb0116ba0dffdbaac33ec05e6afed5b615238",
"sha256:0e50842b2295ba8414c8c1d9d957083d5dfe9e16828b37de883f51fc53c4016f",
"sha256:0ec84b9ba0654f3b962802edc91424331f423dcf5d5f926676e0150789cb3d95",
"sha256:17067d097ed036636fa79f6a869ac26df7db1ba22039d962422506640314933a",
"sha256:1cde1753efe513705a0c6d28f5884e22bdc30438bf0085c5c486cdaff40cd67a",
"sha256:1e72728e7501a450288fc8e1f9ebc73d90cfd4671ebbd631f3e7857c39bd16f2",
"sha256:2635dbd200c2d6faf2ef9a0d04f0ecc6b13b3cad54f7c67c61155138835515d2",
"sha256:2ce46fd0b8a0c947ae047d222f7136fc4d55538741373107574271bc00e20e8f",
"sha256:34f003cb88b1ba38cb9a9a4a3161c1604973d7f9d5552c38bc2f04f829536609",
"sha256:354f373279768fa5a584bac997de6a6c9bc535c482592d7a813bb0c09be6c76f",
"sha256:38ecb5b0582cd125f67a629072fed6f83562d9dd04d7e03256c9829bdec027ad",
"sha256:3e8e01233d57639b2e30966c63d36fcea099d17c53bf424d77f088b0f4babd86",
"sha256:3f6bed7f840d44c08ebdb73b1825282b801799e325bcbdfa6bc5c370e5aecc65",
"sha256:4554eb96f0fd263041baf16cf0881b3f5dafae7a59b1049acb9540c4d57bc8cb",
"sha256:46e161722e0f619749d1cd892167039015b2c2817296104487cd03ed4a955995",
"sha256:49d9f7d256fbc804391a7f72d4a617302b1afac1112fac19b6c6cec63fe7fe8a",
"sha256:4d2f62e55a4cd9c58c1d9a1c9edaedcd857a73cb6fda875bf79093f9d9086f85",
"sha256:5f64641b42b2429f56ee08b4f427a4d2daf916ec59686061de751a55aafa22e4",
"sha256:63b92c512d9dbcc37f9d81b123dec99fdb318ba38c8059afc78086fe73820275",
"sha256:6d7696c615765091cc5093f76fd1fa069870304beaccfd58b5dcc69e55ef49c1",
"sha256:79e843d186c8fb1b102bef3e2bc35ef81160ffef3194646a7fdd6a73c6b97196",
"sha256:821eedb7165ead9eebdb569986968b541f9908979c2da8a4967ecac4439bae3d",
"sha256:84554fc53daa8f6abf8e8a66e076aff6ece62de68523d9f665f32d2fc50fd66e",
"sha256:8d83bb187fb647643bd56e1ae43f273c7f4dbcdf94550d7938cfc32566756514",
"sha256:903703372d46bce88b6920a0cd86c3ad82dae2dbef157b5fc01b70ea1cfc430f",
"sha256:9416a5c2e92ace094e9f0082c5fd473502c91651fb896bc17690d6fc475128d6",
"sha256:9a1712c015831da583b21c5bfe15e8684137097969c6d22e8316ba66b5baabe4",
"sha256:9c27f0946a3536403efb0e1c28def1ae6730a72cd0d5878db38824855e3afc44",
"sha256:a356364941fb0593bb899a1076b92dfa2029f6f5b8ba88a14fd0984aaf76d0df",
"sha256:a7039a136017eaa92c1848152827e1424701532ca8e8967fe480fe1569dae581",
"sha256:acd3a644e4807e73b4e1867b769fbf1ce8c5d80e7caaef0d90dcdc640dfc9787",
"sha256:ad0c86f3455fbd0de6c31a3056eb822fc939f81b1618f10ff3406971893b62a5",
"sha256:b4c76e3d4c56f145d41b7b6751255feefae92edbc9a61e1758a98204200f30fc",
"sha256:b6f6a8f45d0313db07d6d1d37bd0b112f887e1369758a5419c0370ba915b3871",
"sha256:c5a59996dc61835133b56a32ebe4ef3740ea5bc19b3983ac60cc32be5a665d54",
"sha256:c73aafd1afca80afecb22718f8700b40ac7cab927b8abab3c3e337d70e10e5a2",
"sha256:cee6cc0584f71adefe2c908856ccc98702baf95ff80092e4ca46061538a2ba98",
"sha256:cef04d068f5fb0518a77857953193b6bb94809a806bd0a14983a8f12ada060c9",
"sha256:cf5d1c9e6837f8af9f92b6bd3e86d513cdc11f60fd62185cc49ec7d1aba34864",
"sha256:e61155fae27570692ad1d327e81c6cf27d535a5d7ef97648a17d922224b216de",
"sha256:e7f387600d424f91576af20518334df3d97bc76a300a755f9a8d6e4f5cadd289",
"sha256:ed08d2703b5972ec736451b818c2eb9da80d66c3e84aed1deeb0c345fefe461b",
"sha256:fbd6acc766814ea6443628f4e6751d0da6593dae29c08c0b2606164db026970c",
"sha256:feff59f27338135776f6d4e2ec7aeeac5d5f7a08a83e80869121ef8164b74af9"
"sha256:08458fbf403bff5e2b45f08eda195d4b0c9b35682311da5a5a0a0925b11b9bd8",
"sha256:0fbb536eac80e27a2793ffd787895242b7f18ef792563d742c2d673bfcb75134",
"sha256:12f5d865d60fb9734e60a60f1d5afa6d962d8d4467c120a1c0cda6eb2964437d",
"sha256:15eb4eca47d36ec3f78cde0a3a2ee24cf05ca7396ef808dda2c0ddad7c2bde67",
"sha256:173a00b9995f73b79eb0191129f2455f1e34c203f559dd118636858cc452a1bf",
"sha256:1b902ce0e0a5bb7704556a217c4f63a7974f8f43e090aff03fcf262e0b135e02",
"sha256:1f682ea61a88479d9498bf2091fdcd722b090724b08b31d63e022adc063bad59",
"sha256:1f87fec1f9bc1efd23f4227becff04bd0e979e23ca50cc92ec88b38489db3b55",
"sha256:24a0e1befbfa14615b49ba9659d3d8818a0f4d8a1c5822af8696706fbda7310c",
"sha256:2c3a346ae20cfd80b6cfd3e60dc179963ef2ea58da5ec074fd3d9e7a1e7ba97f",
"sha256:36d3a9405fd7c511804dc56fc32974fa5533bdeb3cd1604d6b8ff1d292b819c4",
"sha256:3fdabe3e2a52bc4eff8dc7a5044342f8bd9f11ef0934fcd3289a788c0eb10018",
"sha256:4127d4303b9ac9f94ca0441138acead39928938660ca58329fe156f84b9f3015",
"sha256:4658c398d65d1b25e1760de3157011a80375da861709abd7cef3bad65d6543f9",
"sha256:485b87235796410c3519a699cfe1faab097e509e90ebb05dcd098db2ae87e7b3",
"sha256:529af13c5f4b7a932fb0e1911d3a75da204eff023ee5e0e79c1751564221a5c8",
"sha256:5a3d94942c331dd4e0e1147f7a8699a4aa47dffc11bf8a1523c12af8b2e91bbe",
"sha256:5daab361be6ddeb299a918a7c0864fa8618af66019138263247af405018b04e1",
"sha256:61728fba1e464f789b11deb78a57805c70b2ed02343560456190d0501ba37b0f",
"sha256:6790654cb13eab303d8402354fabd47472b24635700f631f041bd0b65e37298a",
"sha256:69ff563d43c69b1baba77af455dd0a839df8d25e8590e79c90fcbe1499ebde42",
"sha256:6bf4e6f4a2a2e26655717a1983ef6324f2664d7011f6ef7482e8c0b3d51e82ac",
"sha256:6e4eeb6eb2fced786e32e6d8df9e755ce5be920d17f7ce00bc38fcde8ccdbf9e",
"sha256:72dc22e9ec8f6eaa206deb1b1355eb2e253899d7347f5e2fae5f0af613741d06",
"sha256:75b4e316c5902d8163ef9d423b1c3f2f6252226d1aa5cd8a0a03a7d01ffc6268",
"sha256:7b9853803278db3bdcc6cd5beca37815b133e9e77ff3d4733c247414e78eb8d1",
"sha256:7d6fddc5fe258d3328cd8e3d7d3e02234c5d70e01ebe377a6ab92adb14039cb4",
"sha256:81b0893a39bc5b865b8bf89e9ad7807e16717f19868e9d234bdaf9b1f1393868",
"sha256:8efc84f01c1cd7e34b3fb310183e72fcdf55293ee736d679b6d35b35d80bba26",
"sha256:8fae4ebbf95a179c1156fab0b142b74e4ba4204c87bde8d3d8b6f9c34c5825ef",
"sha256:99d0d92a5e3613c33a5f01db206a33f8fdf3d71f2912b0de1739894668b7a93b",
"sha256:9adbd9bb520c866e1bfd7e10e1880a1f7749f1f6e5017686a5fbb9b72cf69f82",
"sha256:a1e01dcaab205fbece13c1410253a9eea1b1c9b61d237b6fa59bcc46e8e89343",
"sha256:a8fc2de81ad835d999113ddf87d1ea2b0f4704cbd947c948d2f5513deafe5a7b",
"sha256:b83e16a5511d1b1f8a88cbabb1a6f6a499f82c062a4251892d9ad5d609863fb7",
"sha256:bb2124fdc6e62baae159ebcfa368708867eb56806804d005860b6007388df171",
"sha256:bfc085b28d62ff4009364e7ca34b80a9a080cbd97c2c0630bb5f7f770dae9414",
"sha256:cbab9fc9c391700e3e1287666dfd82d8666d10e69a6c4a09ab97574c0b7ee0a7",
"sha256:e5eeca8067ad04bc8a2a8731183d51d7cbaac66d86085d5f4766ee6bf19c7f87",
"sha256:e9e81fa9017eaa416c056e5d9e71be93d05e2c3c2ab308d23307a8bc4443c368",
"sha256:ea2326a4dca88e4a274ba3a4405eb6c6467d3ffbd8c7d38632502eaae3820587",
"sha256:eacf3291e263d5a67d8c1a581a8ebbcfd6447204ef58828caf69a5e3e8c75990",
"sha256:ec87f5f8aca726117a1c9b7083e7656a9d0d606eec7299cc067bb83d26f16e0c",
"sha256:f1659887361a7151f89e79b276ed8dff3d75877df906328f14d8bb40bb4f5101",
"sha256:f9cf5ea551aec449206954b075db819f52adc1638d46a6738253a712d553c7b4"
],
"index": "pypi",
"markers": "python_version >= '3.9'",
"version": "==2.0.0"
"version": "==2.0.1"
},
"osmpythontools": {
"hashes": [
"sha256:22548d86d68d36edff3cf9ab76c45745cda86a4ea0b28442e107d6b42992a426",
"sha256:ac67bea77b521941af648ef641ae1d006101948d1112475c256ea23ef31b426a"
"sha256:13ff721f760fdad5dd78b4d1461d286b78bba96ee151a7301ee8c11a0c258be9"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==1.9.3"
"version": "==0.3.5"
},
"packaging": {
"hashes": [
"sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002",
"sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==24.1"
},
@ -895,70 +1102,13 @@
"markers": "python_version >= '3.8'",
"version": "==2.18.0"
},
"pyogrio": {
"pyparsing": {
"hashes": [
"sha256:019731a856a9abfe909e86f50eb13f8362f6742337caf757c54b7c8acfe75b89",
"sha256:083351b258b3e08b6c6085dac560bd321b68de5cb4a66229095da68d5f3d696b",
"sha256:13642608a1cd67797ae8b5d792b0518d8ef3eb76506c8232ab5eaa1ea1159dff",
"sha256:17420febc17651876d5140b54b24749aa751d482b5f9ef6267b8053e6e962876",
"sha256:1a495ca4fb77c69595747dd688f8f17bb7d2ea9cd86603aa71c7fc98cc8b4174",
"sha256:2829615cf58b1b24a9f96fea42abedaa1a800dd351c67374cc2f6341138608f3",
"sha256:2e98913fa183f7597c609e774820a149e9329fd2a0f8d33978252fbd00ae87e6",
"sha256:2f2ec57ab74785db9c2bf47c0a6731e5175595a13f8253f06fa84136adb310a9",
"sha256:30cbeeaedb9bced7012487e7438919aa0c7dfba18ac3d4315182b46eb3139b9d",
"sha256:3a2fcaa269031dbbc8ebd91243c6452c5d267d6df939c008ab7533413c9cf92d",
"sha256:3f964002d445521ad5b8e732a6b5ef0e2d2be7fe566768e5075c1d71398da64a",
"sha256:4a289584da6df7ca318947301fe0ba9177e7f863f63110e087c80ac5f3658de8",
"sha256:4da0b9deb380bd9a200fee13182c4f95b02b4c554c923e2e0032f32aaf1439ed",
"sha256:4e0f90a6c3771ee1f1fea857778b4b6a1b64000d851b819f435f9091b3c38c60",
"sha256:6a6fa2e8cf95b3d4a7c0fac48bce6e5037579e28d3eb33b53349d6e11f15e5a8",
"sha256:6dc94a67163218581c7df275223488ac9b31dc582ccd756da607c3338908566c",
"sha256:796e4f6a4e769b2eb6fea9a10546ea4bdee16182d1e29802b4d6349363c3c1d7",
"sha256:7fcafed24371fe6e23bcf5abebbb29269f8d79915f1dd818ac85453657ea714a",
"sha256:9440466c0211ac81f3417f274da5903f15546b486f76b2f290e74a56aaf0e737",
"sha256:959022f3ad04053f8072dc9a2ad110c46edd9e4f92352061ba835fc91df3ca96",
"sha256:d668cb10f2bf6ccd7c402f91e8b06290722dd09dbe265ae95b2c13db29ebeba0",
"sha256:e38c3c6d37cf2cc969407e4d051dcb507cfd948eb26c7b0840c4f7d7d4a71bd4",
"sha256:f47c9b6818cc0f420015b672d5dcc488530a5ee63e5ba35a184957b21ea3922a",
"sha256:f5d80eb846be4fc4e642cbedc1ed0c143e8d241653382ecc76a7620bbd2a5c3a",
"sha256:f8bf193269ea9d347ac3ddada960a59f1ab2e4a5c009be95dc70e6505346b2fc",
"sha256:fb04bd80964428491951766452f0071b0bc37c7d38c45ef02502dbd83e5d74a0"
"sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad",
"sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"
],
"markers": "python_version >= '3.8'",
"version": "==0.9.0"
},
"pyproj": {
"hashes": [
"sha256:18faa54a3ca475bfe6255156f2f2874e9a1c8917b0004eee9f664b86ccc513d3",
"sha256:1e9fbaf920f0f9b4ee62aab832be3ae3968f33f24e2e3f7fbb8c6728ef1d9746",
"sha256:2d6ff73cc6dbbce3766b6c0bce70ce070193105d8de17aa2470009463682a8eb",
"sha256:36b64c2cb6ea1cc091f329c5bd34f9c01bb5da8c8e4492c709bda6a09f96808f",
"sha256:38a3361941eb72b82bd9a18f60c78b0df8408416f9340521df442cebfc4306e2",
"sha256:447db19c7efad70ff161e5e46a54ab9cc2399acebb656b6ccf63e4bc4a04b97a",
"sha256:44aa7c704c2b7d8fb3d483bbf75af6cb2350d30a63b144279a09b75fead501bf",
"sha256:4ba1f9b03d04d8cab24d6375609070580a26ce76eaed54631f03bab00a9c737b",
"sha256:4bc0472302919e59114aa140fd7213c2370d848a7249d09704f10f5b062031fe",
"sha256:50100b2726a3ca946906cbaa789dd0749f213abf0cbb877e6de72ca7aa50e1ae",
"sha256:5279586013b8d6582e22b6f9e30c49796966770389a9d5b85e25a4223286cd3f",
"sha256:6420ea8e7d2a88cb148b124429fba8cd2e0fae700a2d96eab7083c0928a85110",
"sha256:65ad699e0c830e2b8565afe42bd58cc972b47d829b2e0e48ad9638386d994915",
"sha256:6d227a865356f225591b6732430b1d1781e946893789a609bb34f59d09b8b0f8",
"sha256:7a27151ddad8e1439ba70c9b4b2b617b290c39395fa9ddb7411ebb0eb86d6fb0",
"sha256:80fafd1f3eb421694857f254a9bdbacd1eb22fc6c24ca74b136679f376f97d35",
"sha256:83039e5ae04e5afc974f7d25ee0870a80a6bd6b7957c3aca5613ccbe0d3e72bf",
"sha256:8b8acc31fb8702c54625f4d5a2a6543557bec3c28a0ef638778b7ab1d1772132",
"sha256:9274880263256f6292ff644ca92c46d96aa7e57a75c6df3f11d636ce845a1877",
"sha256:ab7aa4d9ff3c3acf60d4b285ccec134167a948df02347585fdd934ebad8811b4",
"sha256:c41e80ddee130450dcb8829af7118f1ab69eaf8169c4bf0ee8d52b72f098dc2f",
"sha256:db3aedd458e7f7f21d8176f0a1d924f1ae06d725228302b872885a1c34f3119e",
"sha256:e7e13c40183884ec7f94eb8e0f622f08f1d5716150b8d7a134de48c6110fee85",
"sha256:ebfbdbd0936e178091309f6cd4fcb4decd9eab12aa513cdd9add89efa3ec2882",
"sha256:fd43bd9a9b9239805f406fd82ba6b106bf4838d9ef37c167d3ed70383943ade1",
"sha256:fd93c1a0c6c4aedc77c0fe275a9f2aba4d59b8acf88cebfc19fe3c430cfabf4f",
"sha256:fffb059ba3bced6f6725961ba758649261d85ed6ce670d3e3b0a26e81cf1aa8d"
],
"markers": "python_version >= '3.9'",
"version": "==3.6.1"
"markers": "python_full_version >= '3.6.8'",
"version": "==3.1.2"
},
"python-dateutil": {
"hashes": [
@ -1060,7 +1210,6 @@
"sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760",
"sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==2.32.3"
},
@ -1171,6 +1320,14 @@
"markers": "python_version >= '3.7'",
"version": "==1.3.1"
},
"soupsieve": {
"hashes": [
"sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690",
"sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"
],
"markers": "python_version >= '3.8'",
"version": "==2.5"
},
"starlette": {
"hashes": [
"sha256:6fe59f29268538e5d0d182f2791a479a0c64638e6935d1c6989e63fb2699c6ee",
@ -1300,11 +1457,11 @@
"standard"
],
"hashes": [
"sha256:cd17daa7f3b9d7a24de3617820e634d0933b69eed8e33a516071174427238c81",
"sha256:d46cd8e0fd80240baffbcd9ec1012a712938754afcf81bce56c024c1656aece8"
"sha256:0d114d0831ff1adbf231d358cbf42f17333413042552a624ea6a9b4c33dcfd81",
"sha256:94a3608da0e530cea8f69683aa4126364ac18e3826b6630d1a65f4638aade503"
],
"markers": "python_version >= '3.8'",
"version": "==0.30.1"
"version": "==0.30.3"
},
"uvloop": {
"hashes": [
@ -1498,6 +1655,14 @@
"sha256:ffefa1374cd508d633646d51a8e9277763a9b78ae71324183693959cf94635a7"
],
"version": "==12.0"
},
"xarray": {
"hashes": [
"sha256:0b91e0bc4dc0296947947640fe31ec6e867ce258d2f7cbc10bedf4a6d68340c7",
"sha256:721a7394e8ec3d592b2d8ebe21eed074ac077dc1bb1bd777ce00e41700b4866c"
],
"markers": "python_version >= '3.9'",
"version": "==2024.6.0"
}
},
"develop": {}

View File

@ -1,12 +0,0 @@
'leisure'='park'
geological
'natural'='geyser'
'natural'='hot_spring'
'natural'='arch'
'natural'='volcano'
'natural'='stone'
'tourism'='alpine_hut'
'tourism'='viewpoint'
'tourism'='zoo'
'tourism'='artwork'
'waterway'='waterfall'

View File

@ -1,11 +0,0 @@
'tourism'='museum'
'tourism'='attraction'
'tourism'='gallery'
'tourism'='artwork'
historic
'amenity'='planetarium'
'amenity'='place_of_worship'
'amenity'='fountain'
'water'='reflecting_pool'
# 'tourism'='attraction' might be a bit too broad
# historic as well

View File

@ -1,8 +1,9 @@
import logging.config
from pathlib import Path
import os
import logging
PARAMETERS_DIR = Path('src/parameters')
LOCATION_PREFIX = Path('src')
PARAMETERS_DIR = LOCATION_PREFIX / 'parameters'
AMENITY_SELECTORS_PATH = PARAMETERS_DIR / 'amenity_selectors.yaml'
LANDMARK_PARAMETERS_PATH = PARAMETERS_DIR / 'landmark_parameters.yaml'
OPTIMIZER_PARAMETERS_PATH = PARAMETERS_DIR / 'optimizer_parameters.yaml'
@ -12,8 +13,15 @@ OPTIMIZER_PARAMETERS_PATH = PARAMETERS_DIR / 'optimizer_parameters.yaml'
cache_dir_string = os.getenv('OSM_CACHE_DIR', './cache')
OSM_CACHE_DIR = Path(cache_dir_string)
logger = logging.getLogger(__name__)
logging.basicConfig(
level = logging.INFO,
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
import logging
import yaml
LOGGING_CONFIG = LOCATION_PREFIX / 'log_config.yaml'
config = yaml.safe_load(LOGGING_CONFIG.read_text())
logging.config.dictConfig(config)
# if we are in a debug session, set the log level to debug
if os.getenv('DEBUG', False):
logging.getLogger().setLevel(logging.DEBUG)

View File

@ -1,151 +0,0 @@
import yaml
import logging
from OSMPythonTools import cachingStrategy, overpass
from structs.landmarks import Landmark, LandmarkType
from structs.preferences import Preferences, Preference
import constants
SIGHTSEEING = LandmarkType(landmark_type='sightseeing')
NATURE = LandmarkType(landmark_type='nature')
SHOPPING = LandmarkType(landmark_type='shopping')
class LandmarkManager:
logger = logging.getLogger(__name__)
def __init__(self) -> None:
strategy = cachingStrategy.JSON(cacheDir=constants.OSM_CACHE_DIR)
self.query_builder = overpass.Overpass()
with constants.AMENITY_SELECTORS_PATH.open('r') as f:
self.amenity_selectors = yaml.safe_load(f)
with constants.LANDMARK_PARAMETERS_PATH.open('r') as f:
self.parameters = yaml.safe_load(f)
# max_distance = parameters['city_bbox_side']
def get_landmark_lists(self, preferences: Preferences, center_coordinates: tuple[float, float]) -> tuple[list[Landmark], list[Landmark]]:
'''
Generate a list of landmarks based on the preferences of the user and the center (ie. start) coordinates.
The list is then used by the pathfinding algorithm to generate a path that goes through the most interesting landmarks.
:param preferences: the preferences specified by the user
:param center_coordinates: the coordinates of the starting point
'''
L = []
# List for sightseeing
if preferences.sightseeing.score != 0:
score_func = lambda loc, n_tags: int((10 + n_tags * self.parameters['tag_coeff']) * self.parameters['church_coeff'])
L1 = self.fetch_landmarks(self.amenity_selectors['sightseeing'], SIGHTSEEING, center_coordinates, self.parameters['city_bbox_side'], score_func)
self.correct_score(L1, preferences.sightseeing)
L += L1
# List for nature
if preferences.nature.score != 0:
score_func = lambda loc, n_tags: int((10 + n_tags * self.parameters['tag_coeff']) * self.parameters['park_coeff'])
L2 = self.fetch_landmarks(self.amenity_selectors['nature'], NATURE, center_coordinates, self.parameters['city_bbox_side'], score_func)
self.correct_score(L2, preferences.nature)
L += L2
# List for shopping
if preferences.shopping.score != 0:
score_func = lambda loc, n_tags: int((10 + n_tags * self.parameters['tag_coeff']))
L3 = self.fetch_landmarks(self.amenity_selectors['shopping'], SHOPPING, center_coordinates, self.parameters['city_bbox_side'], score_func)
self.correct_score(L3, preferences.shopping)
L += L3
# remove duplicates
L = list(set(L))
L_constrained = self.take_most_important(L, self.parameters['N_important'])
self.logger.info(f'Generated {len(L)} landmarks around {center_coordinates}, and constrained to {len(L_constrained)} most important ones.')
return L, L_constrained
# Take the most important landmarks from the list
def take_most_important(self, landmarks: list[Landmark], n_max: int) -> list[Landmark]:
landmarks_sorted = sorted(landmarks, key=lambda x: x.attractiveness, reverse=True)
return landmarks_sorted[:n_max]
# Correct the score of a list of landmarks by taking into account preference settings
def correct_score(self, L: list[Landmark], preference: Preference):
if len(L) == 0 :
return
if L[0].type != preference.type :
raise TypeError(f"LandmarkType {preference.type} does not match the type of Landmark {L[0].name}")
for elem in L :
elem.attractiveness = int(elem.attractiveness*preference.score/500) # arbitrary computation
# Function to count elements within a certain radius of a location
def count_elements_within_radius(self, point: Point, radius: int) -> int:
center_coordinates = (point.x, point.y)
try:
landmarks = ox.features_from_point(
center_point = center_coordinates,
dist = radius,
tags = {'building': True} # this is a common tag to give an estimation of the number of elements in the area
)
return len(landmarks)
except ox._errors.InsufficientResponseError:
return 0
def fetch_landmarks(
self,
amenity_selectors: list[dict],
landmarktype: LandmarkType,
center_coordinates: tuple[float, float],
distance: int,
score_function: callable
) -> list[Landmark]:
landmarks = ox.features_from_point(
center_point = center_coordinates,
dist = distance,
tags = amenity_selectors
)
self.logger.info(f'Fetched {len(landmarks)} landmarks around {center_coordinates}.')
# cleanup the list
# remove rows where name is None
landmarks = landmarks[landmarks['name'].notna()]
# TODO: remove rows that are part of another building
ret_landmarks = []
for element, description in landmarks.iterrows():
osm_type = element[0]
osm_id = element[1]
location = description['geometry']
n_tags = len(description['nodes']) if type(description['nodes']) == list else 1
if type(location) == Point:
location = location
elif type(location) == Polygon or type(location) == MultiPolygon:
location = location.centroid
elif type(location) == LineString:
location = location.interpolate(location.length/2)
score = score_function(location, n_tags)
landmark = Landmark(
name = description['name'],
type = landmarktype,
location = (location.x, location.y),
osm_type = osm_type,
osm_id = osm_id,
attractiveness = score,
must_do = False,
n_tags = n_tags
)
ret_landmarks.append(landmark)
return ret_landmarks

View File

@ -1,419 +0,0 @@
import numpy as np
import yaml
from typing import List, Tuple
from scipy.optimize import linprog
from math import radians, sin, cos, acos
from structs.landmarks import Landmark
import constants
# Function to print the result
def print_res(L: List[Landmark], L_tot):
if len(L) == L_tot:
print('\nAll landmarks can be visited within max_steps, the following order is suggested : ')
else :
print('Could not visit all the landmarks, the following order is suggested : ')
dist = 0
for elem in L :
if elem.time_to_reach_next is not None :
print('- ' + elem.name + ', time to reach next = ' + str(elem.time_to_reach_next))
dist += elem.time_to_reach_next
else :
print('- ' + elem.name)
print("\nMinutes walked : " + str(dist))
print(f"Visited {len(L)-2} out of {L_tot-2} landmarks")
# Prevent the use of a particular solution
def prevent_config(resx, A_ub, b_ub) -> bool:
for i, elem in enumerate(resx):
resx[i] = round(elem)
N = len(resx) # Number of edges
L = int(np.sqrt(N)) # Number of landmarks
nonzeroind = np.nonzero(resx)[0] # the return is a little funky so I use the [0]
nonzero_tup = np.unravel_index(nonzeroind, (L,L))
ind_a = nonzero_tup[0].tolist()
vertices_visited = ind_a
vertices_visited.remove(0)
ones = [1]*L
h = [0]*N
for i in range(L) :
if i in vertices_visited :
h[i*L:i*L+L] = ones
A_ub = np.vstack((A_ub, h))
b_ub.append(len(vertices_visited)-1)
return A_ub, b_ub
# Prevent the possibility of a given solution bit
def break_cricle(circle_vertices: list, L: int, A_ub: list, b_ub: list) -> bool:
if L-1 in circle_vertices :
circle_vertices.remove(L-1)
h = [0]*L*L
for i in range(L) :
if i in circle_vertices :
h[i*L:i*L+L] = [1]*L
A_ub = np.vstack((A_ub, h))
b_ub.append(len(circle_vertices)-1)
return A_ub, b_ub
# Checks if the path is connected, returns a circle if it finds one and the RESULT
def is_connected(resx) -> bool:
# first round the results to have only 0-1 values
for i, elem in enumerate(resx):
resx[i] = round(elem)
N = len(resx) # length of res
L = int(np.sqrt(N)) # number of landmarks. CAST INTO INT but should not be a problem because N = L**2 by def.
n_edges = resx.sum() # number of edges
nonzeroind = np.nonzero(resx)[0] # the return is a little funny so I use the [0]
nonzero_tup = np.unravel_index(nonzeroind, (L,L))
ind_a = nonzero_tup[0].tolist()
ind_b = nonzero_tup[1].tolist()
edges = []
edges_visited = []
vertices_visited = []
edge1 = (ind_a[0], ind_b[0])
edges_visited.append(edge1)
vertices_visited.append(edge1[0])
for i, a in enumerate(ind_a) :
edges.append((a, ind_b[i])) # Create the list of edges
remaining = edges
remaining.remove(edge1)
break_flag = False
while len(remaining) > 0 and not break_flag:
for edge2 in remaining :
if edge2[0] == edge1[1] :
if edge1[1] in vertices_visited :
edges_visited.append(edge2)
break_flag = True
break
else :
vertices_visited.append(edge1[1])
edges_visited.append(edge2)
remaining.remove(edge2)
edge1 = edge2
elif edge1[1] == L-1 or edge1[1] in vertices_visited:
break_flag = True
break
vertices_visited.append(edge1[1])
if len(vertices_visited) == n_edges +1 :
return vertices_visited, []
else:
return vertices_visited, edges_visited
# Function that returns the distance in meters from one location to another
def get_distance(p1: Tuple[float, float], p2: Tuple[float, float], detour: float, speed: float) :
# Compute the straight-line distance in km
if p1 == p2 :
return 0, 0
else:
dist = 6371.01 * acos(sin(radians(p1[0]))*sin(radians(p2[0])) + cos(radians(p1[0]))*cos(radians(p2[0]))*cos(radians(p1[1]) - radians(p2[1])))
# Consider the detour factor for average cityto deterline walking distance (in km)
walk_dist = dist*detour
# Time to walk this distance (in minutes)
walk_time = walk_dist/speed*60
if walk_time > 15 :
walk_time = 5*round(walk_time/5)
else :
walk_time = round(walk_time)
return round(walk_dist, 1), walk_time
# Initialize A and c. Compute the distances from all landmarks to each other and store attractiveness
# We want to maximize the sightseeing : max(c) st. A*x < b and A_eq*x = b_eq
def init_ub_dist(landmarks: List[Landmark], max_steps: int):
# Read the parameters from the file
with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
detour = parameters['detour_factor']
speed = parameters['average_walking_speed']
# Objective function coefficients. a*x1 + b*x2 + c*x3 + ...
c = []
# Coefficients of inequality constraints (left-hand side)
A_ub = []
for spot1 in landmarks :
dist_table = [0]*len(landmarks)
c.append(-spot1.attractiveness)
for j, spot2 in enumerate(landmarks) :
t = get_distance(spot1.location, spot2.location, detour, speed)[1]
dist_table[j] = t
A_ub += dist_table
c = c*len(landmarks)
return c, A_ub, [max_steps]
# Constraint to respect only one travel per landmark. Also caps the total number of visited landmarks
def respect_number(L:int, A_ub, b_ub):
ones = [1]*L
zeros = [0]*L
for i in range(L) :
h = zeros*i + ones + zeros*(L-1-i)
A_ub = np.vstack((A_ub, h))
b_ub.append(1)
# Read the parameters from the file
with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
max_landmarks = parameters['max_landmarks']
A_ub = np.vstack((A_ub, ones*L))
b_ub.append(max_landmarks+1)
return A_ub, b_ub
# Constraint to not have d14 and d41 simultaneously. Does not prevent circular symmetry with more elements
def break_sym(L, A_ub, b_ub):
upper_ind = np.triu_indices(L,0,L)
up_ind_x = upper_ind[0]
up_ind_y = upper_ind[1]
for i, _ in enumerate(up_ind_x) :
l = [0]*L*L
if up_ind_x[i] != up_ind_y[i] :
l[up_ind_x[i]*L + up_ind_y[i]] = 1
l[up_ind_y[i]*L + up_ind_x[i]] = 1
A_ub = np.vstack((A_ub,l))
b_ub.append(1)
return A_ub, b_ub
# Constraint to not stay in position. Removes d11, d22, d33, etc.
def init_eq_not_stay(L: int):
l = [0]*L*L
for i in range(L) :
for j in range(L) :
if j == i :
l[j + i*L] = 1
l = np.array(np.array(l))
return [l], [0]
# Go through the landmarks and force the optimizer to use landmarks where attractiveness is set to -1
def respect_user_mustsee(landmarks: List[Landmark], A_eq: list, b_eq: list) :
L = len(landmarks)
for i, elem in enumerate(landmarks) :
if elem.must_do is True and elem.name not in ['finish', 'start']:
l = [0]*L*L
for j in range(L) : # sets the horizontal ones (go from)
l[j +i*L] = 1 # sets the vertical ones (go to) double check if good
for k in range(L-1) :
l[k*L+L-1] = 1
A_eq = np.vstack((A_eq,l))
b_eq.append(2)
return A_eq, b_eq
# Constraint to ensure start at start and finish at goal
def respect_start_finish(L: int, A_eq: list, b_eq: list):
ls = [1]*L + [0]*L*(L-1) # sets only horizontal ones for start (go from)
ljump = [0]*L*L
ljump[L-1] = 1 # Prevent start finish jump
lg = [0]*L*L
ll = [0]*L*(L-1) + [1]*L
for k in range(L-1) : # sets only vertical ones for goal (go to)
ll[k*L] = 1
if k != 0 : # Prevent the shortcut start -> finish
lg[k*L+L-1] = 1
A_eq = np.vstack((A_eq,ls))
A_eq = np.vstack((A_eq,ljump))
A_eq = np.vstack((A_eq,lg))
A_eq = np.vstack((A_eq,ll))
b_eq.append(1)
b_eq.append(0)
b_eq.append(1)
b_eq.append(0)
return A_eq, b_eq
# Constraint to tie the problem together. Necessary but not sufficient to avoid circles
def respect_order(N: int, A_eq, b_eq):
for i in range(N-1) : # Prevent stacked ones
if i == 0 or i == N-1: # Don't touch start or finish
continue
else :
l = [0]*N
l[i] = -1
l = l*N
for j in range(N) :
l[i*N + j] = 1
A_eq = np.vstack((A_eq,l))
b_eq.append(0)
return A_eq, b_eq
# Computes the time to reach from each landmark to the next
def link_list(order: List[int], landmarks: List[Landmark]) -> List[Landmark]:
# Read the parameters from the file
with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
detour_factor = parameters['detour_factor']
speed = parameters['average_walking_speed']
L = []
j = 0
total_dist = 0
while j < len(order)-1 :
elem = landmarks[order[j]]
next = landmarks[order[j+1]]
d = get_distance(elem.location, next.location, detour_factor, speed)[1]
elem.time_to_reach_next = d
L.append(elem)
j += 1
total_dist += d
L.append(next)
return L, total_dist
def link_list_simple(ordered_visit: List[Landmark])-> List[Landmark] :
# Read the parameters from the file
with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
detour_factor = parameters['detour_factor']
speed = parameters['average_walking_speed']
L = []
j = 0
total_dist = 0
while j < len(ordered_visit)-1 :
elem = ordered_visit[j]
next = ordered_visit[j+1]
elem.next_uuid = next.uuid
d = get_distance(elem.location, next.location, detour_factor, speed)[1]
elem.time_to_reach_next = d
if elem.name not in ['start', 'finish'] :
elem.must_do = True
L.append(elem)
j += 1
total_dist += d
L.append(next)
return L, total_dist
# Main optimization pipeline
def solve_optimization (landmarks :List[Landmark], max_steps: int, printing_details: bool) :
L = len(landmarks)
# SET CONSTRAINTS FOR INEQUALITY
c, A_ub, b_ub = init_ub_dist(landmarks, max_steps) # Add the distances from each landmark to the other
A_ub, b_ub = respect_number(L, A_ub, b_ub) # Respect max number of visits (no more possible stops than landmarks).
A_ub, b_ub = break_sym(L, A_ub, b_ub) # break the 'zig-zag' symmetry
# SET CONSTRAINTS FOR EQUALITY
A_eq, b_eq = init_eq_not_stay(L) # Force solution not to stay in same place
A_eq, b_eq = respect_user_mustsee(landmarks, A_eq, b_eq) # Check if there are user_defined must_see. Also takes care of start/goal
A_eq, b_eq = respect_start_finish(L, A_eq, b_eq) # Force start and finish positions
A_eq, b_eq = respect_order(L, A_eq, b_eq) # Respect order of visit (only works when max_steps is limiting factor)
# SET BOUNDS FOR DECISION VARIABLE (x can only be 0 or 1)
x_bounds = [(0, 1)]*L*L
# Solve linear programming problem
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3)
# Raise error if no solution is found
if not res.success :
raise ArithmeticError("No solution could be found, the problem is overconstrained. Please adapt your must_dos")
# If there is a solution, we're good to go, just check for connectiveness
else :
order, circle = is_connected(res.x)
i = 0
timeout = 80
while len(circle) != 0 and i < timeout:
A_ub, b_ub = prevent_config(res.x, A_ub, b_ub)
A_ub, b_ub = break_cricle(order, len(landmarks), A_ub, b_ub)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3)
order, circle = is_connected(res.x)
if len(circle) == 0 :
break
print(i)
i += 1
if i == timeout :
raise TimeoutError(f"Optimization took too long. No solution found after {timeout} iterations.")
# Add the times to reach and stop optimizing
L, total_dist = link_list(order, landmarks)
if printing_details is True :
if i != 0 :
print(f"Neded to recompute paths {i} times because of unconnected loops...")
print_res(L, len(landmarks))
print("\nTotal score : " + str(int(-res.fun)))
return L

View File

@ -1,303 +0,0 @@
from collections import defaultdict
from heapq import heappop, heappush
from itertools import permutations
import os
import yaml
from shapely import buffer, LineString, Point, Polygon, MultiPoint, concave_hull
from typing import List, Tuple
from math import pi
from structs.landmarks import Landmark
from landmarks_manager import take_most_important
from backend.src.example_optimizer import solve_optimization, link_list_simple, print_res, get_distance
import constants
# Create corridor from tour
def create_corridor(landmarks: List[Landmark], width: float) :
corrected_width = (180*width)/(6371000*pi)
path = create_linestring(landmarks)
obj = buffer(path, corrected_width, join_style="mitre", cap_style="square", mitre_limit=2)
return obj
# Create linestring from tour
def create_linestring(landmarks: List[Landmark])->List[Point] :
points = []
for landmark in landmarks :
points.append(Point(landmark.location))
return LineString(points)
# Check if some coordinates are in area. Used for the corridor
def is_in_area(area: Polygon, coordinates) -> bool :
point = Point(coordinates)
return point.within(area)
# Function to determine if two landmarks are close to each other
def is_close_to(location1: Tuple[float], location2: Tuple[float]):
"""Determine if two locations are close by rounding their coordinates to 3 decimals."""
absx = abs(location1[0] - location2[0])
absy = abs(location1[1] - location2[1])
return absx < 0.001 and absy < 0.001
#return (round(location1[0], 3), round(location1[1], 3)) == (round(location2[0], 3), round(location2[1], 3))
# Rearrange some landmarks in the order of visit to group visit
def rearrange(landmarks: List[Landmark]) -> List[Landmark]:
i = 1
while i < len(landmarks):
j = i+1
while j < len(landmarks):
if is_close_to(landmarks[i].location, landmarks[j].location) and landmarks[i].name not in ['start', 'finish'] and landmarks[j].name not in ['start', 'finish']:
# If they are not adjacent, move the j-th element to be adjacent to the i-th element
if j != i + 1:
landmarks.insert(i + 1, landmarks.pop(j))
break # Move to the next i-th element after rearrangement
j += 1
i += 1
return landmarks
# Step 1: Build the graph
graph = defaultdict(list)
for i in range(len(landmarks)):
for j in range(len(landmarks)):
if i != j:
distance = get_distance(landmarks[i].location, landmarks[j].location, detour, speed)[1]
graph[i].append((distance, j))
# Step 2: Dijkstra's algorithm to find the shortest path from start to finish
start_idx = next(i for i, lm in enumerate(landmarks) if lm.name == 'start')
finish_idx = next(i for i, lm in enumerate(landmarks) if lm.name == 'finish')
distances = {i: float('inf') for i in range(len(landmarks))}
previous_nodes = {i: None for i in range(len(landmarks))}
distances[start_idx] = 0
priority_queue = [(0, start_idx)]
while priority_queue:
current_distance, current_index = heappop(priority_queue)
if current_distance > distances[current_index]:
continue
for neighbor_distance, neighbor_index in graph[current_index]:
distance = current_distance + neighbor_distance
if distance < distances[neighbor_index]:
distances[neighbor_index] = distance
previous_nodes[neighbor_index] = current_index
heappush(priority_queue, (distance, neighbor_index))
# Step 3: Backtrack from finish to start to find the path
path = []
current_index = finish_idx
while current_index is not None:
path.append(landmarks[current_index])
current_index = previous_nodes[current_index]
path.reverse()
return path
"""
def total_path_distance(path: List[Landmark], detour, speed) -> float:
total_distance = 0
for i in range(len(path) - 1):
total_distance += get_distance(path[i].location, path[i + 1].location, detour, speed)[1]
return total_distance
"""
def find_shortest_path_through_all_landmarks(landmarks: List[Landmark]) -> List[Landmark]:
# Read the parameters from the file
with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
detour = parameters['detour_factor']
speed = parameters['average_walking_speed']
# Step 1: Find 'start' and 'finish' landmarks
start_idx = next(i for i, lm in enumerate(landmarks) if lm.name == 'start')
finish_idx = next(i for i, lm in enumerate(landmarks) if lm.name == 'finish')
start_landmark = landmarks[start_idx]
finish_landmark = landmarks[finish_idx]
# Step 2: Create a list of unvisited landmarks excluding 'start' and 'finish'
unvisited_landmarks = [lm for i, lm in enumerate(landmarks) if i not in [start_idx, finish_idx]]
# Step 3: Initialize the path with the 'start' landmark
path = [start_landmark]
coordinates = [landmarks[start_idx].location]
current_landmark = start_landmark
# Step 4: Use nearest neighbor heuristic to visit all landmarks
while unvisited_landmarks:
nearest_landmark = min(unvisited_landmarks, key=lambda lm: get_time(current_landmark.location, lm.location, detour, speed))
path.append(nearest_landmark)
coordinates.append(nearest_landmark.location)
current_landmark = nearest_landmark
unvisited_landmarks.remove(nearest_landmark)
# Step 5: Finally add the 'finish' landmark to the path
path.append(finish_landmark)
coordinates.append(landmarks[finish_idx].location)
path_poly = Polygon(coordinates)
return path, path_poly
# Returns a list of minor landmarks around the planned path to enhance experience
def get_minor_landmarks(all_landmarks: List[Landmark], visited_landmarks: List[Landmark], width: float) -> List[Landmark] :
second_order_landmarks = []
visited_names = []
area = create_corridor(visited_landmarks, width)
for visited in visited_landmarks :
visited_names.append(visited.name)
for landmark in all_landmarks :
if is_in_area(area, landmark.location) and landmark.name not in visited_names:
second_order_landmarks.append(landmark)
with constants.LANDMARK_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
return take_most_important(second_order_landmarks, parameters, len(visited_landmarks))
# Try fix the shortest path using shapely
def fix_using_polygon(tour: List[Landmark])-> List[Landmark] :
coords = []
coords_dict = {}
for landmark in tour :
coords.append(landmark.location)
if landmark.name != 'finish' :
coords_dict[landmark.location] = landmark
tour_poly = Polygon(coords)
better_tour_poly = tour_poly.buffer(0)
try :
xs, ys = better_tour_poly.exterior.xy
if len(xs) != len(tour) :
better_tour_poly = concave_hull(MultiPoint(coords)) # Create concave hull with "core" of tour leaving out start and finish
xs, ys = better_tour_poly.exterior.xy
except :
better_tour_poly = concave_hull(MultiPoint(coords)) # Create concave hull with "core" of tour leaving out start and finish
xs, ys = better_tour_poly.exterior.xy
# reverse the xs and ys
xs.reverse()
ys.reverse()
better_tour = [] # List of ordered visit
name_index = {} # Maps the name of a landmark to its index in the concave polygon
# Loop through the polygon and generate the better (ordered) tour
for i,x in enumerate(xs[:-1]) :
y = ys[i]
better_tour.append(coords_dict[tuple((x,y))])
name_index[coords_dict[tuple((x,y))].name] = i
# Scroll the list to have start in front again
start_index = name_index['start']
better_tour = better_tour[start_index:] + better_tour[:start_index]
# Append the finish back and correct the time to reach
better_tour.append(tour[-1])
# Rearrange only if polygon still not simple
if not better_tour_poly.is_simple :
better_tour = rearrange(better_tour)
return better_tour
# Second stage of the optimization. Use linear programming again to refine the path
def refine_optimization(landmarks: List[Landmark], base_tour: List[Landmark], max_time: int, detour: int, print_infos: bool) -> List[Landmark] :
# Read the parameters from the file
with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
max_landmarks = parameters['max_landmarks']
if len(base_tour)-2 >= max_landmarks :
return base_tour
# No need to refine if no detour is taken
# if detour == 0 :
if False :
new_tour = base_tour
else :
minor_landmarks = get_minor_landmarks(landmarks, base_tour, 200)
if print_infos : print("Using " + str(len(minor_landmarks)) + " minor landmarks around the predicted path")
# full set of visitable landmarks
full_set = base_tour[:-1] + minor_landmarks # create full set of possible landmarks (without finish)
full_set.append(base_tour[-1]) # add finish back
# get a new tour
new_tour = solve_optimization(full_set, max_time+detour, False, max_landmarks)
if new_tour is None :
new_tour = base_tour
# Link the new tour
new_tour, new_dist = link_list_simple(new_tour)
# If the tour contains only one landmark, return
if len(new_tour) < 4 :
return new_tour
# Find shortest path using the nearest neighbor heuristic
better_tour, better_poly = find_shortest_path_through_all_landmarks(new_tour)
# Fix the tour using Polygons if the path looks weird
if base_tour[0].location == base_tour[-1].location and not better_poly.is_valid :
better_tour = fix_using_polygon(better_tour)
# Link the tour again
better_tour, better_dist = link_list_simple(better_tour)
# Choose the better tour depending on walked distance
if new_dist < better_dist :
final_tour = new_tour
else :
final_tour = better_tour
if print_infos :
print("\n\n\nRefined tour (result of second stage optimization): ")
print_res(final_tour)
total_score = 0
for elem in final_tour :
total_score += elem.attractiveness
print("\nTotal score : " + str(total_score))
return final_tour

View File

@ -1,80 +0,0 @@
import pandas as pd
from typing import List
from landmarks_manager import LandmarkManager
from fastapi.encoders import jsonable_encoder
from backend.src.example_optimizer import solve_optimization
# from refiner import refine_optimization
from structs.landmarks import Landmark
from structs.landmarktype import LandmarkType
from structs.preferences import Preferences, Preference
# Helper function to create a .txt file with results
def write_data(L: List[Landmark], file_name: str):
data = pd.DataFrame()
i = 0
for landmark in L :
data[i] = jsonable_encoder(landmark)
i += 1
data.to_json(file_name, indent = 2, force_ascii=False)
def main(coordinates: tuple[float, float]) -> List[Landmark]:
manager = LandmarkManager()
preferences = Preferences(
sightseeing=Preference(
name='sightseeing',
type=LandmarkType(landmark_type='sightseeing'),
score = 5
),
nature=Preference(
name='nature',
type=LandmarkType(landmark_type='nature'),
score = 5
),
shopping=Preference(
name='shopping',
type=LandmarkType(landmark_type='shopping'),
score = 5
)
)
# Create start and finish
start = Landmark(name='start', type=LandmarkType(landmark_type='start'), location=coordinates, osm_type='start', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
finish = Landmark(name='finish', type=LandmarkType(landmark_type='finish'), location=coordinates, osm_type='finish', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
# Generate the landmarks from the start location
landmarks, landmarks_short = manager.get_landmark_lists(preferences=preferences, center_coordinates=start.location)
print([l.name for l in landmarks_short])
#write_data(landmarks, "landmarks.txt")
# Insert start and finish to the landmarks list
landmarks_short.insert(0, start)
landmarks_short.append(finish)
# TODO use these parameters in another way
max_walking_time = 3 # hours
detour = 30 # minutes
# First stage optimization
base_tour = solve_optimization(landmarks_short, max_walking_time*60, True)
# Second stage optimization
# refined_tour = refine_optimization(landmarks, base_tour, max_walking_time*60+detour, True)
return base_tour
if __name__ == '__main__':
start = (48.847132, 2.312359) # Café Chez César
# start = (47.377859, 8.540585) # Zurich HB
main(start)

View File

@ -1,308 +0,0 @@
import math as m
import json, os
from typing import List, Tuple, Optional
from OSMPythonTools.overpass import Overpass, overpassQueryBuilder
from pywikibot import ItemPage, Site
from pywikibot import config
config.put_throttle = 0
config.maxlag = 0
from structs.landmarks import Landmark, LandmarkType
from structs.preferences import Preferences, Preference
SIGHTSEEING = LandmarkType(landmark_type='sightseeing')
NATURE = LandmarkType(landmark_type='nature')
SHOPPING = LandmarkType(landmark_type='shopping')
# Include the json here
# Create a list of all things to visit given some preferences and a city. Ready for the optimizer
def generate_landmarks(preferences: Preferences, coordinates: Tuple[float, float]) :
l_sights, l_nature, l_shop = get_amenities()
L = []
# List for sightseeing
if preferences.sightseeing.score != 0 :
L1 = get_landmarks(l_sights, SIGHTSEEING, coordinates=coordinates)
correct_score(L1, preferences.sightseeing)
L += L1
# List for nature
if preferences.nature.score != 0 :
L2 = get_landmarks(l_nature, NATURE, coordinates=coordinates)
correct_score(L2, preferences.nature)
L += L2
# List for shopping
if preferences.shopping.score != 0 :
L3 = get_landmarks(l_shop, SHOPPING, coordinates=coordinates)
correct_score(L3, preferences.shopping)
L += L3
L = remove_duplicates(L)
return L, take_most_important(L)
# Helper function to gather the amenities list
def get_amenities() -> List[List[str]] :
# Get the list of amenities from the files
sightseeing = get_list('/amenities/sightseeing.am')
nature = get_list('/amenities/nature.am')
shopping = get_list('/amenities/shopping.am')
return sightseeing, nature, shopping
# Helper function to read a .am file and generate the corresponding list
def get_list(path: str) -> List[str] :
with open(os.path.dirname(os.path.abspath(__file__)) + path) as f :
content = f.readlines()
amenities = []
for line in content :
if not line.startswith('#') :
amenities.append(line.strip('\n'))
return amenities
# Take the most important landmarks from the list
def take_most_important(L: List[Landmark], N = 0) -> List[Landmark] :
# Read the parameters from the file
with open (os.path.dirname(os.path.abspath(__file__)) + '/parameters/landmarks_manager.params', "r") as f :
parameters = json.loads(f.read())
N_important = parameters['N important']
L_copy = []
L_clean = []
scores = [0]*len(L)
names = []
name_id = {}
for i, elem in enumerate(L) :
if elem.name not in names :
names.append(elem.name)
name_id[elem.name] = [i]
L_copy.append(elem)
else :
name_id[elem.name] += [i]
scores = []
for j in name_id[elem.name] :
scores.append(L[j].attractiveness)
best_id = max(range(len(scores)), key=scores.__getitem__)
t = name_id[elem.name][best_id]
if t == i :
for old in L_copy :
if old.name == elem.name :
old.attractiveness = L[t].attractiveness
scores = [0]*len(L_copy)
for i, elem in enumerate(L_copy) :
scores[i] = elem.attractiveness
res = sorted(range(len(scores)), key = lambda sub: scores[sub])[-(N_important-N):]
for i, elem in enumerate(L_copy) :
if i in res :
L_clean.append(elem)
return L_clean
# Remove duplicate elements and elements with low score
def remove_duplicates(L: List[Landmark]) -> List[Landmark] :
"""
Removes duplicate landmarks based on their names from the given list.
Parameters:
L (List[Landmark]): A list of Landmark objects.
Returns:
List[Landmark]: A list of unique Landmark objects based on their names.
"""
L_clean = []
names = []
for landmark in L :
if landmark.name in names:
continue
else :
names.append(landmark.name)
L_clean.append(landmark)
return L_clean
# Correct the score of a list of landmarks by taking into account preference settings
def correct_score(L: List[Landmark], preference: Preference) :
if len(L) == 0 :
return
if L[0].type != preference.type :
raise TypeError(f"LandmarkType {preference.type} does not match the type of Landmark {L[0].name}")
for elem in L :
elem.attractiveness = int(elem.attractiveness*preference.score/5) # arbitrary computation
# Function to count elements within a certain radius of a location
def count_elements_within_radius(coordinates: Tuple[float, float], radius: int) -> int:
lat = coordinates[0]
lon = coordinates[1]
alpha = (180*radius)/(6371000*m.pi)
bbox = {'latLower':lat-alpha,'lonLower':lon-alpha,'latHigher':lat+alpha,'lonHigher': lon+alpha}
# Build the query to find elements within the radius
radius_query = overpassQueryBuilder(bbox=[bbox['latLower'],bbox['lonLower'],bbox['latHigher'],bbox['lonHigher']],
elementType=['node', 'way', 'relation'])
try :
overpass = Overpass()
radius_result = overpass.query(radius_query)
N_elem = radius_result.countWays() + radius_result.countRelations()
#print(f"There are {N_elem} ways/relations within 50m")
if N_elem is None :
return 0
return N_elem
except :
return 0
# Creates a bounding box around given coordinates, side_length in meters
def create_bbox(coordinates: Tuple[float, float], side_length: int) -> Tuple[float, float, float, float]:
lat = coordinates[0]
lon = coordinates[1]
# Half the side length in km (since it's a square bbox)
half_side_length_km = side_length / 2 / 1000
# Convert distance to degrees
lat_diff = half_side_length_km / 111 # 1 degree latitude is approximately 111 km
lon_diff = half_side_length_km / (111 * m.cos(m.radians(lat))) # Adjust for longitude based on latitude
# Calculate bbox
min_lat = lat - lat_diff
max_lat = lat + lat_diff
min_lon = lon - lon_diff
max_lon = lon + lon_diff
return min_lat, min_lon, max_lat, max_lon
def get_landmarks(list_amenity: list, landmarktype: LandmarkType, coordinates: Tuple[float, float]) -> List[Landmark] :
# Read the parameters from the file
with open (os.path.dirname(os.path.abspath(__file__)) + '/parameters/landmarks_manager.params', "r") as f :
parameters = json.loads(f.read())
tag_coeff = parameters['tag coeff']
park_coeff = parameters['park coeff']
church_coeff = parameters['church coeff']
radius = parameters['radius close to']
bbox_side = parameters['city bbox side']
# Create bbox around start location
bbox = create_bbox(coordinates, bbox_side)
# Initialize some variables
N = 0
L = []
overpass = Overpass()
for amenity in list_amenity :
query = overpassQueryBuilder(bbox=bbox, elementType=['way', 'relation'], selector=amenity, includeCenter=True, out='body')
result = overpass.query(query)
N += result.countElements()
for elem in result.elements():
name = elem.tag('name') # Add name
location = (elem.centerLat(), elem.centerLon()) # Add coordinates (lat, lon)
# skip if unprecise location
if name is None or location[0] is None:
continue
# skip if unused
if 'disused:leisure' in elem.tags().keys():
continue
# skip if part of another building
if 'building:part' in elem.tags().keys() and elem.tag('building:part') == 'yes':
continue
else :
osm_type = elem.type() # Add type : 'way' or 'relation'
osm_id = elem.id() # Add OSM id
elem_type = landmarktype # Add the landmark type as 'sightseeing,
n_tags = len(elem.tags().keys()) # Add number of tags
# remove specific tags
skip = False
for tag in elem.tags().keys() :
if "pay" in tag :
n_tags -= 1 # discard payment options for tags
if "disused" in tag :
skip = True # skip disused amenities
break
if "wikipedia" in tag :
n_tags += 3 # wikipedia entries count more
if tag == "wikidata" :
Q = elem.tag('wikidata')
site = Site("wikidata", "wikidata")
item = ItemPage(site, Q)
item.get()
n_languages = len(item.labels)
n_tags += n_languages/10
if elem_type != LandmarkType(landmark_type="nature") :
if "leisure" in tag and elem.tag('leisure') == "park":
elem_type = LandmarkType(landmark_type="nature")
if amenity not in ["'shop'='department_store'", "'shop'='mall'"] :
if "shop" in tag :
skip = True
break
if tag == "building" and elem.tag('building') in ['retail', 'supermarket', 'parking']:
skip = True
break
if skip:
continue
# Add score of given landmark based on the number of surrounding elements. Penalty for churches as there are A LOT
if amenity == "'amenity'='place_of_worship'" :
#score = int((count_elements_within_radius(location, radius) + (n_tags*tag_coeff) )*church_coeff)
score = int((count_elements_within_radius(location, radius) + ((n_tags**1.2)*tag_coeff) )*church_coeff)
elif amenity == "'leisure'='park'" :
score = int((count_elements_within_radius(location, radius) + ((n_tags**1.2)*tag_coeff) )*park_coeff)
else :
score = int(count_elements_within_radius(location, radius) + ((n_tags**1.2)*tag_coeff))
if score is not None :
# Generate the landmark and append it to the list
#print(f"There are {n_tags} tags on this Landmark. Total score : {score}\n")
landmark = Landmark(name=name, type=elem_type, location=location, osm_type=osm_type, osm_id=osm_id, attractiveness=score, must_do=False, n_tags=int(n_tags))
L.append(landmark)
return L

View File

@ -0,0 +1,34 @@
version: 1
disable_existing_loggers: False
formatters:
simple:
format: '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
handlers:
console:
class: rich.logging.RichHandler
formatter: simple
# access:
# class: logging.FileHandler
# filename: logs/access.log
# level: INFO
# formatter: simple
loggers:
uvicorn.error:
level: INFO
handlers:
- console
propagate: no
# uvicorn.access:
# level: INFO
# handlers:
# - access
# propagate: no
root:
level: INFO
handlers:
- console
propagate: yes

View File

@ -1,20 +1,24 @@
from backend.src.example_optimizer import solve_optimization
# from refiner import refine_optimization
from landmarks_manager import LandmarkManager
from structs.landmarks import Landmark
from structs.landmarktype import LandmarkType
from structs.preferences import Preferences
import logging
from fastapi import FastAPI, Query, Body
from structs.landmark import Landmark
from structs.preferences import Preferences
from structs.linked_landmarks import LinkedLandmarks
from utils.landmarks_manager import LandmarkManager
from utils.optimizer import Optimizer
from utils.refiner import Refiner
logger = logging.getLogger(__name__)
app = FastAPI()
manager = LandmarkManager()
# TODO: needs a global variable to store the landmarks accross function calls
# linked_tour = []
optimizer = Optimizer()
refiner = Refiner(optimizer=optimizer)
@app.post("/route/new")
def main1(preferences: Preferences, start: tuple[float, float], end: tuple[float, float] = None) -> str:
def get_route(preferences: Preferences, start: tuple[float, float], end: tuple[float, float] | None = None) -> str:
'''
Main function to call the optimizer.
:param preferences: the preferences specified by the user as the post body
@ -22,19 +26,23 @@ def main1(preferences: Preferences, start: tuple[float, float], end: tuple[float
:param end: the coordinates of the finishing point as a tuple of floats (as url query parameters)
:return: the uuid of the first landmark in the optimized route
'''
if preferences is None :
if preferences is None:
raise ValueError("Please provide preferences in the form of a 'Preference' BaseModel class.")
if start is None:
raise ValueError("Please provide the starting coordinates as a tuple of floats.")
if end is None:
end = start
logger.info("No end coordinates provided. Using start=end.")
start_landmark = Landmark(name='start', type='start', location=(start[0], start[1]), osm_type='start', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
end_landmark = Landmark(name='end', type='finish', location=(end[0], end[1]), osm_type='end', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
start_landmark = Landmark(name='start', type=LandmarkType(landmark_type='start'), location=(start[0], start[1]), osm_type='start', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
end_landmark = Landmark(name='end', type=LandmarkType(landmark_type='end'), location=(end[0], end[1]), osm_type='end', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
# Generate the landmarks from the start location
landmarks, landmarks_short = LandmarkManager.get_landmark_lists(preferences=preferences, coordinates=start.location)
print([l.name for l in landmarks_short])
landmarks, landmarks_short = manager.generate_landmarks_list(
center_coordinates = start,
preferences = preferences
)
# insert start and finish to the landmarks list
landmarks_short.insert(0, start_landmark)
landmarks_short.append(end_landmark)
@ -44,14 +52,13 @@ def main1(preferences: Preferences, start: tuple[float, float], end: tuple[float
detour = 30 # minutes
# First stage optimization
base_tour = solve_optimization(landmarks_short, max_walking_time*60, True)
base_tour = optimizer.solve_optimization(max_walking_time*60, landmarks_short)
# Second stage optimization
# refined_tour = refine_optimization(landmarks, base_tour, max_walking_time*60+detour, True)
refined_tour = refiner.refine_optimization(landmarks, base_tour, max_walking_time*60, detour)
# linked_tour = ...
# return linked_tour[0].uuid
return base_tour[0].uuid
linked_tour = LinkedLandmarks(refined_tour)
return linked_tour[0].uuid

View File

@ -1,446 +0,0 @@
import numpy as np
import json, os
from typing import List, Tuple
from scipy.optimize import linprog
from collections import defaultdict, deque
from geopy.distance import geodesic
from structs.landmarks import Landmark
# Function to print the result
def print_res(L: List[Landmark]):
print('The following order is suggested : ')
dist = 0
for elem in L :
if elem.time_to_reach_next is not None :
print('- ' + elem.name + ', time to reach next = ' + str(elem.time_to_reach_next))
dist += elem.time_to_reach_next
else :
print('- ' + elem.name)
print("\nMinutes walked : " + str(dist))
print(f"Visited {len(L)-2} landmarks")
# Prevent the use of a particular solution
def prevent_config(resx):
for i, elem in enumerate(resx):
resx[i] = round(elem)
N = len(resx) # Number of edges
L = int(np.sqrt(N)) # Number of landmarks
nonzeroind = np.nonzero(resx)[0] # the return is a little funky so I use the [0]
nonzero_tup = np.unravel_index(nonzeroind, (L,L))
ind_a = nonzero_tup[0].tolist()
vertices_visited = ind_a
vertices_visited.remove(0)
ones = [1]*L
h = [0]*N
for i in range(L) :
if i in vertices_visited :
h[i*L:i*L+L] = ones
return h, [len(vertices_visited)-1]
# Prevents the creation of the same circle (both directions)
def prevent_circle(circle_vertices: list, L: int) :
l1 = [0]*L*L
l2 = [0]*L*L
for i, node in enumerate(circle_vertices[:-1]) :
next = circle_vertices[i+1]
l1[node*L + next] = 1
l2[next*L + node] = 1
s = circle_vertices[0]
g = circle_vertices[-1]
l1[g*L + s] = 1
l2[s*L + g] = 1
return np.vstack((l1, l2)), [0, 0]
# Returns the order of visit as well as any circles if there are some
def is_connected(resx) :
# first round the results to have only 0-1 values
for i, elem in enumerate(resx):
resx[i] = round(elem)
N = len(resx) # length of res
L = int(np.sqrt(N)) # number of landmarks. CAST INTO INT but should not be a problem because N = L**2 by def.
n_edges = resx.sum() # number of edges
nonzeroind = np.nonzero(resx)[0] # the return is a little funny so I use the [0]
nonzero_tup = np.unravel_index(nonzeroind, (L,L))
ind_a = nonzero_tup[0].tolist()
ind_b = nonzero_tup[1].tolist()
# Step 1: Create a graph representation
graph = defaultdict(list)
for a, b in zip(ind_a, ind_b):
graph[a].append(b)
# Step 2: Function to perform BFS/DFS to extract journeys
def get_journey(start):
journey_nodes = []
visited = set()
stack = deque([start])
while stack:
node = stack.pop()
if node not in visited:
visited.add(node)
journey_nodes.append(node)
for neighbor in graph[node]:
if neighbor not in visited:
stack.append(neighbor)
return journey_nodes
# Step 3: Extract all journeys
all_journeys_nodes = []
visited_nodes = set()
for node in ind_a:
if node not in visited_nodes:
journey_nodes = get_journey(node)
all_journeys_nodes.append(journey_nodes)
visited_nodes.update(journey_nodes)
for l in all_journeys_nodes :
if 0 in l :
order = l
all_journeys_nodes.remove(l)
break
if len(all_journeys_nodes) == 0 :
return order, None
return order, all_journeys_nodes
# Function that returns the time in minutes from one location to another
def get_time(p1: Tuple[float, float], p2: Tuple[float, float], detour: float, speed: float) :
# Compute the straight-line distance in km
if p1 == p2 :
return 0
else:
dist = geodesic(p1, p2).kilometers
# Consider the detour factor for average cityto deterline walking distance (in km)
walk_dist = dist*detour
# Time to walk this distance (in minutes)
walk_time = walk_dist/speed*60
return round(walk_time)
# Initialize A and c. Compute the distances from all landmarks to each other and store attractiveness
# We want to maximize the sightseeing : max(c) st. A*x < b and A_eq*x = b_eq
def init_ub_dist(landmarks: List[Landmark], max_steps: int):
with open (os.path.dirname(os.path.abspath(__file__)) + '/parameters/optimizer.params', "r") as f :
parameters = json.loads(f.read())
detour = parameters['detour factor']
speed = parameters['average walking speed']
# Objective function coefficients. a*x1 + b*x2 + c*x3 + ...
c = []
# Coefficients of inequality constraints (left-hand side)
A_ub = []
for spot1 in landmarks :
dist_table = [0]*len(landmarks)
c.append(-spot1.attractiveness)
for j, spot2 in enumerate(landmarks) :
t = get_time(spot1.location, spot2.location, detour, speed)
dist_table[j] = t
closest = sorted(dist_table)[:22]
for i, dist in enumerate(dist_table) :
if dist not in closest :
dist_table[i] = 32700
A_ub += dist_table
c = c*len(landmarks)
return c, A_ub, [max_steps]
# Constraint to respect only one travel per landmark. Also caps the total number of visited landmarks
def respect_number(L: int, max_landmarks: int):
ones = [1]*L
zeros = [0]*L
A = ones + zeros*(L-1)
b = [1]
for i in range(L-1) :
h_new = zeros*i + ones + zeros*(L-1-i)
A = np.vstack((A, h_new))
b.append(1)
if max_landmarks is None :
# Read the parameters from the file
with open (os.path.dirname(os.path.abspath(__file__)) + '/parameters/optimizer.params', "r") as f :
parameters = json.loads(f.read())
max_landmarks = parameters['max landmarks']
A = np.vstack((A, ones*L))
b.append(max_landmarks+1)
return A, b
# Constraint to not have d14 and d41 simultaneously. Does not prevent cyclic paths with more elements
def break_sym(L):
upper_ind = np.triu_indices(L,0,L)
up_ind_x = upper_ind[0]
up_ind_y = upper_ind[1]
A = [0]*L*L # useless row to prevent overhead ? better solution welcomed
# A[up_ind_x[0]*L + up_ind_y[0]] = 1
# A[up_ind_y[0]*L + up_ind_x[0]] = 1
b = [1]
for i, _ in enumerate(up_ind_x[1:]) :
l = [0]*L*L
if up_ind_x[i] != up_ind_y[i] :
l[up_ind_x[i]*L + up_ind_y[i]] = 1
l[up_ind_y[i]*L + up_ind_x[i]] = 1
A = np.vstack((A,l))
b.append(1)
return A, b
# Constraint to not stay in position. Removes d11, d22, d33, etc.
def init_eq_not_stay(L: int):
l = [0]*L*L
for i in range(L) :
for j in range(L) :
if j == i :
l[j + i*L] = 1
l = np.array(np.array(l), dtype=np.int8)
return [l], [0]
# Go through the landmarks and force the optimizer to use landmarks marked as must_do
def respect_user_must_do(landmarks: List[Landmark]) :
L = len(landmarks)
A = [0]*L*L
b = [0]
for i, elem in enumerate(landmarks[1:]) :
if elem.must_do is True and elem.name not in ['finish', 'start']:
l = [0]*L*L
l[i*L:i*L+L] = [1]*L # set mandatory departures from landmarks tagged as 'must_do'
A = np.vstack((A,l))
b.append(1)
return A, b
# Constraint to ensure start at start and finish at goal
def respect_start_finish(L: int):
l_start = [1]*L + [0]*L*(L-1) # sets departures only for start (horizontal ones)
l_start[L-1] = 0 # prevents the jump from start to finish
l_goal = [0]*L*L # sets arrivals only for finish (vertical ones)
l_L = [0]*L*(L-1) + [1]*L # prevents arrivals at start and departures from goal
for k in range(L-1) : # sets only vertical ones for goal (go to)
l_L[k*L] = 1
if k != 0 :
l_goal[k*L+L-1] = 1
A = np.vstack((l_start, l_goal))
b = [1, 1]
A = np.vstack((A,l_L))
b.append(0)
return A, b
# Constraint to tie the problem together. Necessary but not sufficient to avoid circles
def respect_order(L: int):
A = [0]*L*L # useless row to reduce overhead ? better solution is welcome
b = [0]
for i in range(L-1) : # Prevent stacked ones
if i == 0 or i == L-1: # Don't touch start or finish
continue
else :
l = [0]*L
l[i] = -1
l = l*L
for j in range(L) :
l[i*L + j] = 1
A = np.vstack((A,l))
b.append(0)
return A, b
# Computes the time to reach from each landmark to the next
def link_list(order: List[int], landmarks: List[Landmark])->List[Landmark] :
# Read the parameters from the file
with open (os.path.dirname(os.path.abspath(__file__)) + '/parameters/optimizer.params', "r") as f :
parameters = json.loads(f.read())
detour_factor = parameters['detour factor']
speed = parameters['average walking speed']
L = []
j = 0
total_dist = 0
while j < len(order)-1 :
elem = landmarks[order[j]]
next = landmarks[order[j+1]]
d = get_time(elem.location, next.location, detour_factor, speed)
elem.time_to_reach_next = d
elem.must_do = True
elem.location = (round(elem.location[0], 5), round(elem.location[1], 5))
L.append(elem)
j += 1
total_dist += d
next.location = (round(next.location[0], 5), round(next.location[1], 5))
L.append(next)
return L, total_dist
# Same as link_list but does it on a already ordered list
def link_list_simple(ordered_visit: List[Landmark])-> List[Landmark] :
# Read the parameters from the file
with open (os.path.dirname(os.path.abspath(__file__)) + '/parameters/optimizer.params', "r") as f :
parameters = json.loads(f.read())
detour_factor = parameters['detour factor']
speed = parameters['average walking speed']
L = []
j = 0
total_dist = 0
while j < len(ordered_visit)-1 :
elem = ordered_visit[j]
next = ordered_visit[j+1]
elem.next_uuid = next.uuid
d = get_time(elem.location, next.location, detour_factor, speed)
elem.time_to_reach_next = d
if elem.name not in ['start', 'finish'] :
elem.must_do = True
L.append(elem)
j += 1
total_dist += d
L.append(next)
return L, total_dist
# Main optimization pipeline
def solve_optimization (landmarks :List[Landmark], max_steps: int, printing_details: bool, max_landmarks = None) :
L = len(landmarks)
# SET CONSTRAINTS FOR INEQUALITY
c, A_ub, b_ub = init_ub_dist(landmarks, max_steps) # Add the distances from each landmark to the other
A, b = respect_number(L, max_landmarks) # Respect max number of visits (no more possible stops than landmarks).
A_ub = np.vstack((A_ub, A), dtype=np.int16)
b_ub += b
A, b = break_sym(L) # break the 'zig-zag' symmetry
A_ub = np.vstack((A_ub, A), dtype=np.int16)
b_ub += b
# SET CONSTRAINTS FOR EQUALITY
A_eq, b_eq = init_eq_not_stay(L) # Force solution not to stay in same place
A, b = respect_user_must_do(landmarks) # Check if there are user_defined must_see. Also takes care of start/goal
A_eq = np.vstack((A_eq, A), dtype=np.int8)
b_eq += b
A, b = respect_start_finish(L) # Force start and finish positions
A_eq = np.vstack((A_eq, A), dtype=np.int8)
b_eq += b
A, b = respect_order(L) # Respect order of visit (only works when max_steps is limiting factor)
A_eq = np.vstack((A_eq, A), dtype=np.int8)
b_eq += b
# SET BOUNDS FOR DECISION VARIABLE (x can only be 0 or 1)
x_bounds = [(0, 1)]*L*L
# Solve linear programming problem
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3)
# Raise error if no solution is found
if not res.success :
raise ArithmeticError("No solution could be found, the problem is overconstrained. Please adapt your must_dos")
# If there is a solution, we're good to go, just check for connectiveness
else :
order, circles = is_connected(res.x)
#nodes, edges = is_connected(res.x)
i = 0
timeout = 80
while circles is not None and i < timeout:
A, b = prevent_config(res.x)
A_ub = np.vstack((A_ub, A))
b_ub += b
#A_ub, b_ub = prevent_circle(order, len(landmarks), A_ub, b_ub)
for circle in circles :
A, b = prevent_circle(circle, len(landmarks))
A_eq = np.vstack((A_eq, A))
b_eq += b
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3)
if not res.success :
print("Solving failed because of overconstrained problem")
return None
order, circles = is_connected(res.x)
#nodes, edges = is_connected(res.x)
if circles is None :
break
print(i)
i += 1
if i == timeout :
raise TimeoutError(f"Optimization took too long. No solution found after {timeout} iterations.")
# Add the times to reach and stop optimizing
L, _ = link_list(order, landmarks)
if printing_details is True :
if i != 0 :
print(f"Neded to recompute paths {i} times because of unconnected loops...")
print_res(L)
print("\nTotal score : " + str(int(-res.fun)))
return L

View File

@ -1,6 +1,6 @@
city_bbox_side: 5000 #m
radius_close_to: 30
church_coeff: 0.6
park_coeff: 1.5
tag_coeff: 100
radius_close_to: 50
church_coeff: 0.8
park_coeff: 1.2
tag_coeff: 10
N_important: 40

View File

@ -1,8 +0,0 @@
{
"city bbox side" : 5000,
"radius close to" : 50,
"church coeff" : 0.8,
"park coeff" : 1.2,
"tag coeff" : 10,
"N important" : 40
}

View File

@ -1,5 +0,0 @@
{
"detour factor" : 1.4,
"average walking speed" : 4.8,
"max landmarks" : 7
}

View File

@ -1,3 +1,4 @@
detour_factor: 1.4
detour_corridor_width: 200
average_walking_speed: 4.8
max_landmarks: 40
max_landmarks: 7

View File

@ -1,244 +0,0 @@
import os, json
from shapely import buffer, LineString, Point, Polygon, MultiPoint, concave_hull
from typing import List, Tuple
from math import pi
from structs.landmarks import Landmark
from landmarks_manager import take_most_important
from optimizer_v4 import solve_optimization, link_list_simple, print_res, get_time
# Create corridor from tour
def create_corridor(landmarks: List[Landmark], width: float) :
corrected_width = (180*width)/(6371000*pi)
path = create_linestring(landmarks)
obj = buffer(path, corrected_width, join_style="mitre", cap_style="square", mitre_limit=2)
return obj
# Create linestring from tour
def create_linestring(landmarks: List[Landmark])->List[Point] :
points = []
for landmark in landmarks :
points.append(Point(landmark.location))
return LineString(points)
# Check if some coordinates are in area. Used for the corridor
def is_in_area(area: Polygon, coordinates) -> bool :
point = Point(coordinates)
return point.within(area)
# Function to determine if two landmarks are close to each other
def is_close_to(location1: Tuple[float], location2: Tuple[float]):
"""Determine if two locations are close by rounding their coordinates to 3 decimals."""
absx = abs(location1[0] - location2[0])
absy = abs(location1[1] - location2[1])
return absx < 0.001 and absy < 0.001
#return (round(location1[0], 3), round(location1[1], 3)) == (round(location2[0], 3), round(location2[1], 3))
# Rearrange some landmarks in the order of visit to group visit
def rearrange(landmarks: List[Landmark]) -> List[Landmark]:
i = 1
while i < len(landmarks):
j = i+1
while j < len(landmarks):
if is_close_to(landmarks[i].location, landmarks[j].location) and landmarks[i].name not in ['start', 'finish'] and landmarks[j].name not in ['start', 'finish']:
# If they are not adjacent, move the j-th element to be adjacent to the i-th element
if j != i + 1:
landmarks.insert(i + 1, landmarks.pop(j))
break # Move to the next i-th element after rearrangement
j += 1
i += 1
return landmarks
# Simple nearest neighbour planner to try to fix the path
def find_shortest_path_through_all_landmarks(landmarks: List[Landmark]) -> Tuple[List[Landmark], Polygon]:
# Read from data
with open (os.path.dirname(os.path.abspath(__file__)) + '/parameters/optimizer.params', "r") as f :
parameters = json.loads(f.read())
detour = parameters['detour factor']
speed = parameters['average walking speed']
# Step 1: Find 'start' and 'finish' landmarks
start_idx = next(i for i, lm in enumerate(landmarks) if lm.name == 'start')
finish_idx = next(i for i, lm in enumerate(landmarks) if lm.name == 'finish')
start_landmark = landmarks[start_idx]
finish_landmark = landmarks[finish_idx]
# Step 2: Create a list of unvisited landmarks excluding 'start' and 'finish'
unvisited_landmarks = [lm for i, lm in enumerate(landmarks) if i not in [start_idx, finish_idx]]
# Step 3: Initialize the path with the 'start' landmark
path = [start_landmark]
coordinates = [landmarks[start_idx].location]
current_landmark = start_landmark
# Step 4: Use nearest neighbor heuristic to visit all landmarks
while unvisited_landmarks:
nearest_landmark = min(unvisited_landmarks, key=lambda lm: get_time(current_landmark.location, lm.location, detour, speed))
path.append(nearest_landmark)
coordinates.append(nearest_landmark.location)
current_landmark = nearest_landmark
unvisited_landmarks.remove(nearest_landmark)
# Step 5: Finally add the 'finish' landmark to the path
path.append(finish_landmark)
coordinates.append(landmarks[finish_idx].location)
path_poly = Polygon(coordinates)
return path, path_poly
# Returns a list of minor landmarks around the planned path to enhance experience
def get_minor_landmarks(all_landmarks: List[Landmark], visited_landmarks: List[Landmark], width: float) -> List[Landmark] :
second_order_landmarks = []
visited_names = []
area = create_corridor(visited_landmarks, width)
for visited in visited_landmarks :
visited_names.append(visited.name)
for landmark in all_landmarks :
if is_in_area(area, landmark.location) and landmark.name not in visited_names:
second_order_landmarks.append(landmark)
return take_most_important(second_order_landmarks, len(visited_landmarks))
# Try fix the shortest path using shapely
def fix_using_polygon(tour: List[Landmark])-> List[Landmark] :
coords = []
coords_dict = {}
for landmark in tour :
coords.append(landmark.location)
if landmark.name != 'finish' :
coords_dict[landmark.location] = landmark
tour_poly = Polygon(coords)
better_tour_poly = tour_poly.buffer(0)
try :
xs, ys = better_tour_poly.exterior.xy
if len(xs) != len(tour) :
better_tour_poly = concave_hull(MultiPoint(coords)) # Create concave hull with "core" of tour leaving out start and finish
xs, ys = better_tour_poly.exterior.xy
except :
better_tour_poly = concave_hull(MultiPoint(coords)) # Create concave hull with "core" of tour leaving out start and finish
xs, ys = better_tour_poly.exterior.xy
# reverse the xs and ys
xs.reverse()
ys.reverse()
better_tour = [] # List of ordered visit
name_index = {} # Maps the name of a landmark to its index in the concave polygon
# Loop through the polygon and generate the better (ordered) tour
for i,x in enumerate(xs[:-1]) :
y = ys[i]
better_tour.append(coords_dict[tuple((x,y))])
name_index[coords_dict[tuple((x,y))].name] = i
# Scroll the list to have start in front again
start_index = name_index['start']
better_tour = better_tour[start_index:] + better_tour[:start_index]
# Append the finish back and correct the time to reach
better_tour.append(tour[-1])
# Rearrange only if polygon still not simple
if not better_tour_poly.is_simple :
better_tour = rearrange(better_tour)
return better_tour
# Second stage of the optimization. Use linear programming again to refine the path
def refine_optimization(landmarks: List[Landmark], base_tour: List[Landmark], max_time: int, detour: int, print_infos: bool) -> List[Landmark] :
# Read from the file
with open (os.path.dirname(os.path.abspath(__file__)) + '/parameters/optimizer.params', "r") as f :
parameters = json.loads(f.read())
max_landmarks = parameters['max landmarks'] + 4
# No need to refine if no detour is taken
# if detour == 0 :
if False :
new_tour = base_tour
else :
minor_landmarks = get_minor_landmarks(landmarks, base_tour, 200)
if print_infos : print("Using " + str(len(minor_landmarks)) + " minor landmarks around the predicted path")
# full set of visitable landmarks
full_set = base_tour[:-1] + minor_landmarks # create full set of possible landmarks (without finish)
full_set.append(base_tour[-1]) # add finish back
# get a new tour
new_tour = solve_optimization(full_set, max_time+detour, False, max_landmarks)
if new_tour is None :
new_tour = base_tour
# Link the new tour
new_tour, new_dist = link_list_simple(new_tour)
# If the tour contains only one landmark, return
if len(new_tour) < 4 :
return new_tour
# Find shortest path using the nearest neighbor heuristic
better_tour, better_poly = find_shortest_path_through_all_landmarks(new_tour)
# Fix the tour using Polygons if the path looks weird
if base_tour[0].location == base_tour[-1].location and not better_poly.is_valid :
better_tour = fix_using_polygon(better_tour)
# Link the tour again
better_tour, better_dist = link_list_simple(better_tour)
# Choose the better tour depending on walked distance
if new_dist < better_dist :
final_tour = new_tour
else :
final_tour = better_tour
if print_infos :
print("\n\n\nRefined tour (result of second stage optimization): ")
print_res(final_tour)
total_score = 0
for elem in final_tour :
total_score += elem.attractiveness
print("\nTotal score : " + str(total_score))
return final_tour

View File

@ -8,7 +8,7 @@ class Landmark(BaseModel) :
# Properties of the landmark
name : str
type: Literal['sightseeing', 'nature', 'shopping']
type: Literal['sightseeing', 'nature', 'shopping', 'start', 'finish']
location : tuple
osm_type : str
osm_id : int
@ -22,8 +22,8 @@ class Landmark(BaseModel) :
uuid: str = Field(default_factory=uuid4) # TODO implement this ASAP
# Additional properties depending on specific tour
must_do : bool
must_avoid : bool
must_do : Optional[bool] = False
must_avoid : Optional[bool] = False
is_secondary : Optional[bool] = False # TODO future
time_to_reach_next : Optional[int] = 0 # TODO fix this in existing code
@ -33,5 +33,6 @@ class Landmark(BaseModel) :
return self.uuid.int
def __str__(self) -> str:
return f'Landmark: [{self.name}, {self.location}, {self.attractiveness}]'
time_to_next_str = f", time_to_next={self.time_to_reach_next}" if self.time_to_reach_next else ""
return f'Landmark({self.type}): [{self.name} @{self.location}, score={self.attractiveness}{time_to_next_str}]'

View File

@ -0,0 +1,61 @@
import uuid
from .landmark import Landmark
from utils.get_time_separation import get_time
class LinkedLandmarks:
"""
A list of landmarks that are linked together, e.g. in a route.
Each landmark serves as a node in the linked list, but since we expect these to be consumed through the rest API, a pythonic reference to the next landmark is not well suited. Instead we use the uuid of the next landmark to reference the next landmark in the list. This is not very efficient, but appropriate for the expected use case ("short" trips with onyl few landmarks).
"""
_landmarks = list[Landmark]
total_time = int
uuid = str
def __init__(self, data: list[Landmark] = None) -> None:
"""
Initialize a new LinkedLandmarks object. This expects an ORDERED list of landmarks, where the first landmark is the starting point and the last landmark is the end point.
Args:
data (list[Landmark], optional): The list of landmarks that are linked together. Defaults to None.
"""
self.uuid = uuid.uuid4()
self._landmarks = data if data else []
self._link_landmarks()
def _link_landmarks(self) -> None:
"""
Create the links between the landmarks in the list by setting their .next_uuid and the .time_to_next attributes.
"""
self.total_time = 0
for i, landmark in enumerate(self._landmarks[:-1]):
landmark.next_uuid = self._landmarks[i + 1].uuid
time_to_next = get_time(landmark.location, self._landmarks[i + 1].location)
landmark.time_to_reach_next = time_to_next
self.total_time += time_to_next
self._landmarks[-1].next_uuid = None
self._landmarks[-1].time_to_reach_next = 0
def __getitem__(self, index: int) -> Landmark:
return self._landmarks[index]
def __str__(self) -> str:
return f"LinkedLandmarks, total time: {self.total_time} minutes, {len(self._landmarks)} stops: [{','.join([str(landmark) for landmark in self._landmarks])}]"
def asdict(self) -> dict:
"""
Convert the linked landmarks to a json serializable dictionary.
Returns:
dict: A dictionary representation of the linked landmarks.
"""
return {
'uuid': self.uuid,
'total_time': self.total_time,
'landmarks': [landmark.dict() for landmark in self._landmarks]
}

View File

@ -3,7 +3,7 @@ from typing import Optional, Literal
class Preference(BaseModel) :
name: str
type: Literal['sightseeing', 'nature', 'shopping']
type: Literal['sightseeing', 'nature', 'shopping', 'start', 'finish']
score: int # score could be from 1 to 5
# Input for optimization

View File

@ -1,81 +1,85 @@
import pandas as pd
import logging
import yaml
from typing import List
from landmarks_manager import LandmarkManager
from fastapi.encoders import jsonable_encoder
from optimizer_v4 import solve_optimization
from refiner import refine_optimization
from structs.landmarks import Landmark
from structs.landmarktype import LandmarkType
from utils.landmarks_manager import LandmarkManager
from utils.optimizer import Optimizer
from utils.refiner import Refiner
from structs.landmark import Landmark
from structs.linked_landmarks import LinkedLandmarks
from structs.preferences import Preferences, Preference
# Helper function to create a .txt file with results
def write_data(L: List[Landmark], file_name: str):
data = pd.DataFrame()
i = 0
for landmark in L :
data[i] = jsonable_encoder(landmark)
i += 1
data.to_json(file_name, indent = 2, force_ascii=False)
logger = logging.getLogger(__name__)
def test4(coordinates: tuple[float, float]) -> List[Landmark]:
def test(start_coords: tuple[float, float], finish_coords: tuple[float, float] = None) -> list[Landmark]:
manager = LandmarkManager()
optimizer = Optimizer()
refiner = Refiner(optimizer=optimizer)
preferences = Preferences(
sightseeing=Preference(
name='sightseeing',
type=LandmarkType(landmark_type='sightseeing'),
type='sightseeing',
score = 5),
nature=Preference(
name='nature',
type=LandmarkType(landmark_type='nature'),
score = 0),
type='nature',
score = 5),
shopping=Preference(
name='shopping',
type=LandmarkType(landmark_type='shopping'),
score = 0))
type='shopping',
score = 5),
max_time_minute=180,
detour_tolerance_minute=30
)
# Create start and finish
start = Landmark(name='start', type=LandmarkType(landmark_type='start'), location=coordinates, osm_type='start', osm_id=0, attractiveness=0, must_do=False, n_tags = 0)
finish = Landmark(name='finish', type=LandmarkType(landmark_type='finish'), location=coordinates, osm_type='finish', osm_id=0, attractiveness=0, must_do=False, n_tags = 0)
if finish_coords is None :
finish_coords = start_coords
start = Landmark(name='start', type='start', location=start_coords, osm_type='', osm_id=0, attractiveness=0, n_tags = 0)
finish = Landmark(name='finish', type='finish', location=finish_coords, osm_type='', osm_id=0, attractiveness=0, n_tags = 0)
#finish = Landmark(name='finish', type=LandmarkType(landmark_type='finish'), location=(48.8777055, 2.3640967), osm_type='finish', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
#start = Landmark(name='start', type=LandmarkType(landmark_type='start'), location=(48.847132, 2.312359), osm_type='start', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
#finish = Landmark(name='finish', type=LandmarkType(landmark_type='finish'), location=(48.843185, 2.344533), osm_type='finish', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
#finish = Landmark(name='finish', type=LandmarkType(landmark_type='finish'), location=(48.847132, 2.312359), osm_type='finish', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
# Generate the landmarks from the start location
landmarks, landmarks_short = generate_landmarks(preferences=preferences, coordinates=start.location)
#write_data(landmarks, "landmarks_Wien.txt")
landmarks, landmarks_short = manager.generate_landmarks_list(
center_coordinates = start_coords,
preferences = preferences
)
# Store data to file for debug purposes
# write_data(landmarks, "landmarks_Strasbourg.txt")
# Insert start and finish to the landmarks list
landmarks_short.insert(0, start)
landmarks_short.append(finish)
max_walking_time = 180 # minutes
detour = 0 # minutes
# First stage optimization
base_tour = solve_optimization(landmarks_short, max_walking_time, True)
base_tour = optimizer.solve_optimization(max_time=preferences.max_time_minute, landmarks=landmarks_short)
# Second stage using linear optimization
refined_tour = refine_optimization(landmarks, base_tour, max_walking_time, detour, True)
refined_tour = refiner.refine_optimization(all_landmarks=landmarks, base_tour=base_tour, max_time = preferences.max_time_minute, detour = preferences.detour_tolerance_minute)
linked_tour = LinkedLandmarks(refined_tour)
logger.info(f"Optimized route: {linked_tour}")
# with open('linked_tour.yaml', 'w') as f:
# yaml.dump(linked_tour.asdict(), f)
return linked_tour
return refined_tour
#test4(tuple((48.8344400, 2.3220540))) # Café Chez César
#test4(tuple((48.8375946, 2.2949904))) # Point random
#test4(tuple((47.377859, 8.540585))) # Zurich HB
#test4(tuple((45.7576485, 4.8330241))) # Lyon Bellecour
test4(tuple((48.5848435, 7.7332974))) # Strasbourg Gare
#test4(tuple((48.2067858, 16.3692340))) # Vienne
#test(tuple((48.8344400, 2.3220540))) # Café Chez César
#test(tuple((48.8375946, 2.2949904))) # Point random
#test(tuple((47.377859, 8.540585))) # Zurich HB
#test(tuple((45.7576485, 4.8330241))) # Lyon Bellecour
test(tuple((48.5848435, 7.7332974))) # Strasbourg Gare
#test(tuple((48.2067858, 16.3692340))) # Vienne

View File

@ -0,0 +1,39 @@
import yaml
from geopy.distance import geodesic
import constants
with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
DETOUR_FACTOR = parameters['detour_factor']
AVERAGE_WALKING_SPEED = parameters['average_walking_speed']
def get_time(p1: tuple[float, float], p2: tuple[float, float]) -> int:
"""
Calculate the time in minutes to travel from one location to another.
Args:
p1 (Tuple[float, float]): Coordinates of the starting location.
p2 (Tuple[float, float]): Coordinates of the destination.
detour (float): Detour factor affecting the distance.
speed (float): Walking speed in kilometers per hour.
Returns:
int: Time to travel from p1 to p2 in minutes.
"""
# Compute the straight-line distance in km
if p1 == p2 :
return 0
else:
dist = geodesic(p1, p2).kilometers
# Consider the detour factor for average cityto deterline walking distance (in km)
walk_dist = dist*DETOUR_FACTOR
# Time to walk this distance (in minutes)
walk_time = walk_dist/AVERAGE_WALKING_SPEED*60
return round(walk_time)

View File

@ -0,0 +1,365 @@
import math as m
import yaml
import logging
from OSMPythonTools.overpass import Overpass, overpassQueryBuilder
from OSMPythonTools.cachingStrategy import CachingStrategy, JSON
from pywikibot import ItemPage, Site
from pywikibot import config
config.put_throttle = 0
config.maxlag = 0
from structs.preferences import Preferences, Preference
from structs.landmark import Landmark
from .take_most_important import take_most_important
import constants
SIGHTSEEING = 'sightseeing'
NATURE = 'nature'
SHOPPING = 'shopping'
class LandmarkManager:
logger = logging.getLogger(__name__)
city_bbox_side: int # bbox side in meters
radius_close_to: int # radius in meters
church_coeff: float # coeff to adjsut score of churches
park_coeff: float # coeff to adjust score of parks
tag_coeff: float # coeff to adjust weight of tags
N_important: int # number of important landmarks to consider
def __init__(self) -> None:
with constants.AMENITY_SELECTORS_PATH.open('r') as f:
self.amenity_selectors = yaml.safe_load(f)
with constants.LANDMARK_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
self.city_bbox_side = parameters['city_bbox_side']
self.radius_close_to = parameters['radius_close_to']
self.church_coeff = parameters['church_coeff']
self.park_coeff = parameters['park_coeff']
self.tag_coeff = parameters['tag_coeff']
self.N_important = parameters['N_important']
self.overpass = Overpass()
CachingStrategy.use(JSON, cacheDir=constants.OSM_CACHE_DIR)
def generate_landmarks_list(self, center_coordinates: tuple[float, float], preferences: Preferences) -> tuple[list[Landmark], list[Landmark]]:
"""
Generate and prioritize a list of landmarks based on user preferences.
This method fetches landmarks from various categories (sightseeing, nature, shopping) based on the user's preferences
and current location. It scores and corrects these landmarks, removes duplicates, and then selects the most important
landmarks based on a predefined criterion.
Parameters:
center_coordinates (tuple[float, float]): The latitude and longitude of the center location around which to search.
preferences (Preferences): The user's preference settings that influence the landmark selection.
Returns:
tuple[list[Landmark], list[Landmark]]:
- A list of all existing landmarks.
- A list of the most important landmarks based on the user's preferences.
"""
L = []
bbox = self.create_bbox(center_coordinates)
# list for sightseeing
if preferences.sightseeing.score != 0:
score_function = lambda loc, n_tags: int((self.count_elements_close_to(loc) + ((n_tags**1.2)*self.tag_coeff) )*self.church_coeff)
L1 = self.fetch_landmarks(bbox, self.amenity_selectors['sightseeing'], SIGHTSEEING, score_function)
self.correct_score(L1, preferences.sightseeing)
L += L1
# list for nature
if preferences.nature.score != 0:
score_function = lambda loc, n_tags: int((self.count_elements_close_to(loc) + ((n_tags**1.2)*self.tag_coeff) )*self.park_coeff)
L2 = self.fetch_landmarks(bbox, self.amenity_selectors['nature'], NATURE, score_function)
self.correct_score(L2, preferences.nature)
L += L2
# list for shopping
if preferences.shopping.score != 0:
score_function = lambda loc, n_tags: int(self.count_elements_close_to(loc) + ((n_tags**1.2)*self.tag_coeff))
L3 = self.fetch_landmarks(bbox, self.amenity_selectors['shopping'], SHOPPING, score_function)
self.correct_score(L3, preferences.shopping)
L += L3
L = self.remove_duplicates(L)
L_constrained = take_most_important(L, self.N_important)
self.logger.info(f'Generated {len(L)} landmarks around {center_coordinates}, and constrained to {len(L_constrained)} most important ones.')
return L, L_constrained
def remove_duplicates(self, landmarks: list[Landmark]) -> list[Landmark]:
"""
Removes duplicate landmarks based on their names from the given list. Only retains the landmark with highest score
Parameters:
landmarks (list[Landmark]): A list of Landmark objects.
Returns:
list[Landmark]: A list of unique Landmark objects based on their names.
"""
L_clean = []
names = []
for landmark in landmarks:
if landmark.name in names:
continue
else:
names.append(landmark.name)
L_clean.append(landmark)
return L_clean
def correct_score(self, landmarks: list[Landmark], preference: Preference):
"""
Adjust the attractiveness score of each landmark in the list based on user preferences.
This method updates the attractiveness of each landmark by scaling it according to the user's preference score.
The score adjustment is computed using a simple linear transformation based on the preference score.
Args:
landmarks (list[Landmark]): A list of landmarks whose scores need to be corrected.
preference (Preference): The user's preference settings that influence the attractiveness score adjustment.
Raises:
TypeError: If the type of any landmark in the list does not match the expected type in the preference.
"""
if len(landmarks) == 0:
return
if landmarks[0].type != preference.type:
raise TypeError(f"LandmarkType {preference.type} does not match the type of Landmark {landmarks[0].name}")
for elem in landmarks:
elem.attractiveness = int(elem.attractiveness*preference.score/5) # arbitrary computation
def count_elements_close_to(self, coordinates: tuple[float, float]) -> int:
"""
Count the number of OpenStreetMap elements (nodes, ways, relations) within a specified radius of the given location.
This function constructs a bounding box around the specified coordinates based on the radius. It then queries
OpenStreetMap data to count the number of elements within that bounding box.
Args:
coordinates (tuple[float, float]): The latitude and longitude of the location to search around.
Returns:
int: The number of elements (nodes, ways, relations) within the specified radius. Returns 0 if no elements
are found or if an error occurs during the query.
"""
lat = coordinates[0]
lon = coordinates[1]
radius = self.radius_close_to
alpha = (180*radius) / (6371000*m.pi)
bbox = {'latLower':lat-alpha,'lonLower':lon-alpha,'latHigher':lat+alpha,'lonHigher': lon+alpha}
# Build the query to find elements within the radius
radius_query = overpassQueryBuilder(
bbox=[bbox['latLower'],
bbox['lonLower'],
bbox['latHigher'],
bbox['lonHigher']],
elementType=['node', 'way', 'relation']
)
try:
radius_result = self.overpass.query(radius_query)
N_elem = radius_result.countWays() + radius_result.countRelations()
self.logger.debug(f"There are {N_elem} ways/relations within 50m")
if N_elem is None:
return 0
return N_elem
except:
return 0
def create_bbox(self, coordinates: tuple[float, float]) -> tuple[float, float, float, float]:
"""
Create a bounding box around the given coordinates.
Args:
coordinates (tuple[float, float]): The latitude and longitude of the center of the bounding box.
Returns:
tuple[float, float, float, float]: The minimum latitude, minimum longitude, maximum latitude, and maximum longitude
defining the bounding box.
"""
lat = coordinates[0]
lon = coordinates[1]
# Half the side length in km (since it's a square bbox)
half_side_length_km = self.city_bbox_side / 2 / 1000
# Convert distance to degrees
lat_diff = half_side_length_km / 111 # 1 degree latitude is approximately 111 km
lon_diff = half_side_length_km / (111 * m.cos(m.radians(lat))) # Adjust for longitude based on latitude
# Calculate bbox
min_lat = lat - lat_diff
max_lat = lat + lat_diff
min_lon = lon - lon_diff
max_lon = lon + lon_diff
return min_lat, min_lon, max_lat, max_lon
def fetch_landmarks(self, bbox: tuple, amenity_selector: dict, landmarktype: str, score_function: callable) -> list[Landmark]:
"""
Fetches landmarks of a specified type from OpenStreetMap (OSM) within a bounding box centered on given coordinates.
Args:
bbox (tuple[float, float, float, float]): The bounding box coordinates (min_lat, min_lon, max_lat, max_lon).
amenity_selector (dict): The Overpass API query selector for the desired landmark type.
landmarktype (str): The type of the landmark (e.g., 'sightseeing', 'nature', 'shopping').
score_function (callable): The function to compute the score of the landmark based on its attributes.
Returns:
list[Landmark]: A list of Landmark objects that were fetched and filtered based on the provided criteria.
Notes:
- Landmarks are fetched using Overpass API queries.
- Selectors are translated from the dictionary to the Overpass query format. (e.g., 'amenity'='place_of_worship')
- Landmarks are filtered based on various conditions including tags and type.
- Scores are assigned to landmarks based on their attributes and surrounding elements.
"""
return_list = []
# caution, when applying a list of selectors, overpass will search for elements that match ALL selectors simultaneously
# we need to split the selectors into separate queries and merge the results
for sel in dict_to_selector_list(amenity_selector):
self.logger.debug(f"Current selector: {sel}")
query = overpassQueryBuilder(
bbox = bbox,
elementType = ['way', 'relation'],
selector = sel,
# conditions = [],
includeCenter = True,
out = 'body'
)
try:
result = self.overpass.query(query)
except Exception as e:
self.logger.error(f"Error fetching landmarks: {e}")
return
for elem in result.elements():
name = elem.tag('name') # Add name
location = (elem.centerLat(), elem.centerLon()) # Add coordinates (lat, lon)
# TODO: exclude these from the get go
# skip if unprecise location
if name is None or location[0] is None:
continue
# skip if unused
if 'disused:leisure' in elem.tags().keys():
continue
# skip if part of another building
if 'building:part' in elem.tags().keys() and elem.tag('building:part') == 'yes':
continue
osm_type = elem.type() # Add type: 'way' or 'relation'
osm_id = elem.id() # Add OSM id
elem_type = landmarktype # Add the landmark type as 'sightseeing,
n_tags = len(elem.tags().keys()) # Add number of tags
# remove specific tags
skip = False
for tag in elem.tags().keys():
if "pay" in tag:
n_tags -= 1 # discard payment options for tags
if "disused" in tag:
skip = True # skip disused amenities
break
if "wikipedia" in tag:
n_tags += 3 # wikipedia entries count more
if tag == "wikidata":
Q = elem.tag('wikidata')
site = Site("wikidata", "wikidata")
item = ItemPage(site, Q)
item.get()
n_languages = len(item.labels)
n_tags += n_languages/10
if elem_type != "nature":
if "leisure" in tag and elem.tag('leisure') == "park":
elem_type = "nature"
if landmarktype != SHOPPING:
if "shop" in tag:
skip = True
break
if tag == "building" and elem.tag('building') in ['retail', 'supermarket', 'parking']:
skip = True
break
if skip:
continue
score = score_function(location, n_tags)
if score != 0:
# Generate the landmark and append it to the list
landmark = Landmark(
name=name,
type=elem_type,
location=location,
osm_type=osm_type,
osm_id=osm_id,
attractiveness=score,
must_do=False,
n_tags=int(n_tags)
)
return_list.append(landmark)
self.logger.debug(f"Fetched {len(return_list)} landmarks of type {landmarktype} in {bbox}")
return return_list
def dict_to_selector_list(d: dict) -> list:
"""
Convert a dictionary of key-value pairs to a list of Overpass query strings.
Args:
d (dict): A dictionary of key-value pairs representing the selector.
Returns:
list: A list of strings representing the Overpass query selectors.
"""
return_list = []
for key, value in d.items():
if type(value) == list:
val = '|'.join(value)
return_list.append(f'{key}~"{val}"')
elif type(value) == str and len(value) == 0:
return_list.append(f'{key}')
else:
return_list.append(f'{key}={value}')
return return_list

View File

@ -0,0 +1,519 @@
import yaml, logging
import numpy as np
from scipy.optimize import linprog
from collections import defaultdict, deque
from geopy.distance import geodesic
from structs.landmark import Landmark
from .get_time_separation import get_time
import constants
class Optimizer:
logger = logging.getLogger(__name__)
detour: int = None # accepted max detour time (in minutes)
detour_factor: float # detour factor of straight line vs real distance in cities
average_walking_speed: float # average walking speed of adult
max_landmarks: int # max number of landmarks to visit
def __init__(self) :
# load parameters from file
with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
self.detour_factor = parameters['detour_factor']
self.average_walking_speed = parameters['average_walking_speed']
self.max_landmarks = parameters['max_landmarks']
# Prevent the use of a particular solution
def prevent_config(self, resx):
"""
Prevent the use of a particular solution by adding constraints to the optimization.
Args:
resx (list[float]): List of edge weights.
Returns:
Tuple[list[int], list[int]]: A tuple containing a new row for constraint matrix and new value for upper bound vector.
"""
for i, elem in enumerate(resx):
resx[i] = round(elem)
N = len(resx) # Number of edges
L = int(np.sqrt(N)) # Number of landmarks
nonzeroind = np.nonzero(resx)[0] # the return is a little funky so I use the [0]
nonzero_tup = np.unravel_index(nonzeroind, (L,L))
ind_a = nonzero_tup[0].tolist()
vertices_visited = ind_a
vertices_visited.remove(0)
ones = [1]*L
h = [0]*N
for i in range(L) :
if i in vertices_visited :
h[i*L:i*L+L] = ones
return h, [len(vertices_visited)-1]
# Prevents the creation of the same circle (both directions)
def prevent_circle(self, circle_vertices: list, L: int) :
"""
Prevent circular paths by by adding constraints to the optimization.
Args:
circle_vertices (list): List of vertices forming a circle.
L (int): Number of landmarks.
Returns:
Tuple[np.ndarray, list[int]]: A tuple containing a new row for constraint matrix and new value for upper bound vector.
"""
l1 = [0]*L*L
l2 = [0]*L*L
for i, node in enumerate(circle_vertices[:-1]) :
next = circle_vertices[i+1]
l1[node*L + next] = 1
l2[next*L + node] = 1
s = circle_vertices[0]
g = circle_vertices[-1]
l1[g*L + s] = 1
l2[s*L + g] = 1
return np.vstack((l1, l2)), [0, 0]
def is_connected(self, resx) :
"""
Determine the order of visits and detect any circular paths in the given configuration.
Args:
resx (list): List of edge weights.
Returns:
Tuple[list[int], Optional[list[list[int]]]]: A tuple containing the visit order and a list of any detected circles.
"""
# first round the results to have only 0-1 values
for i, elem in enumerate(resx):
resx[i] = round(elem)
N = len(resx) # length of res
L = int(np.sqrt(N)) # number of landmarks. CAST INTO INT but should not be a problem because N = L**2 by def.
nonzeroind = np.nonzero(resx)[0] # the return is a little funny so I use the [0]
nonzero_tup = np.unravel_index(nonzeroind, (L,L))
ind_a = nonzero_tup[0].tolist()
ind_b = nonzero_tup[1].tolist()
# Step 1: Create a graph representation
graph = defaultdict(list)
for a, b in zip(ind_a, ind_b):
graph[a].append(b)
# Step 2: Function to perform BFS/DFS to extract journeys
def get_journey(start):
journey_nodes = []
visited = set()
stack = deque([start])
while stack:
node = stack.pop()
if node not in visited:
visited.add(node)
journey_nodes.append(node)
for neighbor in graph[node]:
if neighbor not in visited:
stack.append(neighbor)
return journey_nodes
# Step 3: Extract all journeys
all_journeys_nodes = []
visited_nodes = set()
for node in ind_a:
if node not in visited_nodes:
journey_nodes = get_journey(node)
all_journeys_nodes.append(journey_nodes)
visited_nodes.update(journey_nodes)
for l in all_journeys_nodes :
if 0 in l :
order = l
all_journeys_nodes.remove(l)
break
if len(all_journeys_nodes) == 0 :
return order, None
return order, all_journeys_nodes
def init_ub_dist(self, landmarks: list[Landmark], max_steps: int):
"""
Initialize the objective function coefficients and inequality constraints for the optimization problem.
This function computes the distances between all landmarks and stores their attractiveness to maximize sightseeing.
The goal is to maximize the objective function subject to the constraints A*x < b and A_eq*x = b_eq.
Args:
landmarks (list[Landmark]): List of landmarks.
max_steps (int): Maximum number of steps allowed.
Returns:
Tuple[list[float], list[float], list[int]]: Objective function coefficients, inequality constraint coefficients, and the right-hand side of the inequality constraint.
"""
# Objective function coefficients. a*x1 + b*x2 + c*x3 + ...
c = []
# Coefficients of inequality constraints (left-hand side)
A_ub = []
for spot1 in landmarks :
dist_table = [0]*len(landmarks)
c.append(-spot1.attractiveness)
for j, spot2 in enumerate(landmarks) :
t = get_time(spot1.location, spot2.location)
dist_table[j] = t
closest = sorted(dist_table)[:22]
for i, dist in enumerate(dist_table) :
if dist not in closest :
dist_table[i] = 32700
A_ub += dist_table
c = c*len(landmarks)
return c, A_ub, [max_steps]
def respect_number(self, L: int):
"""
Generate constraints to ensure each landmark is visited only once and cap the total number of visited landmarks.
Args:
L (int): Number of landmarks.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
ones = [1]*L
zeros = [0]*L
A = ones + zeros*(L-1)
b = [1]
for i in range(L-1) :
h_new = zeros*i + ones + zeros*(L-1-i)
A = np.vstack((A, h_new))
b.append(1)
A = np.vstack((A, ones*L))
b.append(self.max_landmarks+1)
return A, b
# Constraint to not have d14 and d41 simultaneously. Does not prevent cyclic paths with more elements
def break_sym(self, L):
"""
Generate constraints to prevent simultaneous travel between two landmarks in both directions.
Args:
L (int): Number of landmarks.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
upper_ind = np.triu_indices(L,0,L)
up_ind_x = upper_ind[0]
up_ind_y = upper_ind[1]
A = [0]*L*L
b = [1]
for i, _ in enumerate(up_ind_x[1:]) :
l = [0]*L*L
if up_ind_x[i] != up_ind_y[i] :
l[up_ind_x[i]*L + up_ind_y[i]] = 1
l[up_ind_y[i]*L + up_ind_x[i]] = 1
A = np.vstack((A,l))
b.append(1)
return A, b
def init_eq_not_stay(self, L: int):
"""
Generate constraints to prevent staying in the same position (e.g., removing d11, d22, d33, etc.).
Args:
L (int): Number of landmarks.
Returns:
Tuple[list[np.ndarray], list[int]]: Equality constraint coefficients and the right-hand side of the equality constraints.
"""
l = [0]*L*L
for i in range(L) :
for j in range(L) :
if j == i :
l[j + i*L] = 1
l = np.array(np.array(l), dtype=np.int8)
return [l], [0]
def respect_user_must_do(self, landmarks: list[Landmark]) :
"""
Generate constraints to ensure that landmarks marked as 'must_do' are included in the optimization.
Args:
landmarks (list[Landmark]): List of landmarks, where some are marked as 'must_do'.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
L = len(landmarks)
A = [0]*L*L
b = [0]
for i, elem in enumerate(landmarks[1:]) :
if elem.must_do is True and elem.name not in ['finish', 'start']:
l = [0]*L*L
l[i*L:i*L+L] = [1]*L # set mandatory departures from landmarks tagged as 'must_do'
A = np.vstack((A,l))
b.append(1)
return A, b
def respect_user_must_avoid(self, landmarks: list[Landmark]) :
"""
Generate constraints to ensure that landmarks marked as 'must_avoid' are skipped in the optimization.
Args:
landmarks (list[Landmark]): List of landmarks, where some are marked as 'must_avoid'.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
L = len(landmarks)
A = [0]*L*L
b = [0]
for i, elem in enumerate(landmarks[1:]) :
if elem.must_avoid is True and elem.name not in ['finish', 'start']:
l = [0]*L*L
l[i*L:i*L+L] = [1]*L
A = np.vstack((A,l))
b.append(0) # prevent departures from landmarks tagged as 'must_do'
return A, b
# Constraint to ensure start at start and finish at goal
def respect_start_finish(self, L: int):
"""
Generate constraints to ensure that the optimization starts at the designated start landmark and finishes at the goal landmark.
Args:
L (int): Number of landmarks.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
l_start = [1]*L + [0]*L*(L-1) # sets departures only for start (horizontal ones)
l_start[L-1] = 0 # prevents the jump from start to finish
l_goal = [0]*L*L # sets arrivals only for finish (vertical ones)
l_L = [0]*L*(L-1) + [1]*L # prevents arrivals at start and departures from goal
for k in range(L-1) : # sets only vertical ones for goal (go to)
l_L[k*L] = 1
if k != 0 :
l_goal[k*L+L-1] = 1
A = np.vstack((l_start, l_goal))
b = [1, 1]
A = np.vstack((A,l_L))
b.append(0)
return A, b
def respect_order(self, L: int):
"""
Generate constraints to tie the optimization problem together and prevent stacked ones, although this does not fully prevent circles.
Args:
L (int): Number of landmarks.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
A = [0]*L*L
b = [0]
for i in range(L-1) : # Prevent stacked ones
if i == 0 or i == L-1: # Don't touch start or finish
continue
else :
l = [0]*L
l[i] = -1
l = l*L
for j in range(L) :
l[i*L + j] = 1
A = np.vstack((A,l))
b.append(0)
return A, b
def link_list(self, order: list[int], landmarks: list[Landmark])->list[Landmark] :
"""
Compute the time to reach from each landmark to the next and create a list of landmarks with updated travel times.
Args:
order (list[int]): List of indices representing the order of landmarks to visit.
landmarks (list[Landmark]): List of all landmarks.
Returns:
list[Landmark]]: The updated linked list of landmarks with travel times
"""
L = []
j = 0
while j < len(order)-1 :
# get landmarks involved
elem = landmarks[order[j]]
next = landmarks[order[j+1]]
# get attributes
elem.time_to_reach_next = get_time(elem.location, next.location)
elem.must_do = True
elem.location = (round(elem.location[0], 5), round(elem.location[1], 5))
elem.next_uuid = next.uuid
L.append(elem)
j += 1
next.location = (round(next.location[0], 5), round(next.location[1], 5))
next.must_do = True
L.append(next)
return L
# Main optimization pipeline
def solve_optimization(
self,
max_time: int,
landmarks: list[Landmark],
) -> list[Landmark]:
"""
Main optimization pipeline to solve the landmark visiting problem.
This method sets up and solves a linear programming problem with constraints to find an optimal tour of landmarks,
considering user-defined must-visit landmarks, start and finish points, and ensuring no cycles are present.
Args:
max_time (int): Maximum time allowed for the tour in minutes.
landmarks (list[Landmark]): List of landmarks to visit.
Returns:
list[Landmark]: The optimized tour of landmarks with updated travel times, or None if no valid solution is found.
"""
L = len(landmarks)
# SET CONSTRAINTS FOR INEQUALITY
c, A_ub, b_ub = self.init_ub_dist(landmarks, max_time) # Add the distances from each landmark to the other
A, b = self.respect_number(L) # Respect max number of visits (no more possible stops than landmarks).
A_ub = np.vstack((A_ub, A), dtype=np.int16)
b_ub += b
A, b = self.break_sym(L) # break the 'zig-zag' symmetry
A_ub = np.vstack((A_ub, A), dtype=np.int16)
b_ub += b
# SET CONSTRAINTS FOR EQUALITY
A_eq, b_eq = self.init_eq_not_stay(L) # Force solution not to stay in same place
A, b = self.respect_user_must_do(landmarks) # Check if there are user_defined must_see. Also takes care of start/goal
A_eq = np.vstack((A_eq, A), dtype=np.int8)
b_eq += b
A, b = self.respect_user_must_avoid(landmarks) # Check if there are user_defined must_see. Also takes care of start/goal
A_eq = np.vstack((A_eq, A), dtype=np.int8)
b_eq += b
A, b = self.respect_start_finish(L) # Force start and finish positions
A_eq = np.vstack((A_eq, A), dtype=np.int8)
b_eq += b
A, b = self.respect_order(L) # Respect order of visit (only works when max_steps is limiting factor)
A_eq = np.vstack((A_eq, A), dtype=np.int8)
b_eq += b
# SET BOUNDS FOR DECISION VARIABLE (x can only be 0 or 1)
x_bounds = [(0, 1)]*L*L
# Solve linear programming problem
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3)
# Raise error if no solution is found
if not res.success :
raise ArithmeticError("No solution could be found, the problem is overconstrained. Please adapt your must_dos")
# If there is a solution, we're good to go, just check for connectiveness
order, circles = self.is_connected(res.x)
#nodes, edges = is_connected(res.x)
i = 0
timeout = 80
while circles is not None and i < timeout:
A, b = self.prevent_config(res.x)
A_ub = np.vstack((A_ub, A))
b_ub += b
#A_ub, b_ub = prevent_circle(order, len(landmarks), A_ub, b_ub)
for circle in circles :
A, b = self.prevent_circle(circle, L)
A_eq = np.vstack((A_eq, A))
b_eq += b
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3)
if not res.success :
raise ArithmeticError("Solving failed because of overconstrained problem")
return None
order, circles = self.is_connected(res.x)
#nodes, edges = is_connected(res.x)
if circles is None :
break
# print(i)
i += 1
if i == timeout :
raise TimeoutError(f"Optimization took too long. No solution found after {timeout} iterations.")
#sort the landmarks in the order of the solution
tour = [landmarks[i] for i in order]
self.logger.debug(f"Re-optimized {i} times, score: {int(-res.fun)}")
return tour

View File

@ -0,0 +1,340 @@
import yaml, logging
from shapely import buffer, LineString, Point, Polygon, MultiPoint, concave_hull
from math import pi
from structs.landmark import Landmark
from . import take_most_important, get_time_separation
from .optimizer import Optimizer
import constants
class Refiner :
logger = logging.getLogger(__name__)
detour_factor: float # detour factor of straight line vs real distance in cities
detour_corridor_width: float # width of the corridor around the path
average_walking_speed: float # average walking speed of adult
max_landmarks: int # max number of landmarks to visit
optimizer: Optimizer # optimizer object
def __init__(self, optimizer: Optimizer) :
self.optimizer = optimizer
# load parameters from file
with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
self.detour_factor = parameters['detour_factor']
self.detour_corridor_width = parameters['detour_corridor_width']
self.average_walking_speed = parameters['average_walking_speed']
self.max_landmarks = parameters['max_landmarks'] + 4
def create_corridor(self, landmarks: list[Landmark], width: float) :
"""
Create a corridor around the path connecting the landmarks.
Args:
landmarks (list[Landmark]): the landmark path around which to create the corridor
width (float): Width of the corridor in meters.
Returns:
Geometry: A buffered geometry object representing the corridor around the path.
"""
corrected_width = (180*width)/(6371000*pi)
path = self.create_linestring(landmarks)
obj = buffer(path, corrected_width, join_style="mitre", cap_style="square", mitre_limit=2)
return obj
def create_linestring(self, tour: list[Landmark]) -> LineString :
"""
Create a `LineString` object from a tour.
Args:
tour (list[Landmark]): An ordered sequence of landmarks that represents the visiting order.
Returns:
LineString: A `LineString` object representing the path through the landmarks.
"""
points = []
for landmark in tour :
points.append(Point(landmark.location))
return LineString(points)
# Check if some coordinates are in area. Used for the corridor
def is_in_area(self, area: Polygon, coordinates) -> bool :
"""
Check if a given point is within a specified area.
Args:
area (Polygon): The polygon defining the area.
coordinates (tuple[float, float]): The coordinates of the point to check.
Returns:
bool: True if the point is within the area, otherwise False.
"""
point = Point(coordinates)
return point.within(area)
# Function to determine if two landmarks are close to each other
def is_close_to(self, location1: tuple[float], location2: tuple[float]):
"""
Determine if two locations are close to each other by rounding their coordinates to 3 decimal places.
Args:
location1 (tuple[float, float]): The coordinates of the first location.
location2 (tuple[float, float]): The coordinates of the second location.
Returns:
bool: True if the locations are within 0.001 degrees of each other, otherwise False.
"""
absx = abs(location1[0] - location2[0])
absy = abs(location1[1] - location2[1])
return absx < 0.001 and absy < 0.001
#return (round(location1[0], 3), round(location1[1], 3)) == (round(location2[0], 3), round(location2[1], 3))
def rearrange(self, tour: list[Landmark]) -> list[Landmark]:
"""
Rearrange landmarks to group nearby visits together.
This function reorders landmarks so that nearby landmarks are adjacent to each other in the list,
while keeping 'start' and 'finish' landmarks in their original positions.
Args:
tour (list[Landmark]): Ordered list of landmarks to be rearranged.
Returns:
list[Landmark]: The rearranged list of landmarks with grouped nearby visits.
"""
i = 1
while i < len(tour):
j = i+1
while j < len(tour):
if self.is_close_to(tour[i].location, tour[j].location) and tour[i].name not in ['start', 'finish'] and tour[j].name not in ['start', 'finish']:
# If they are not adjacent, move the j-th element to be adjacent to the i-th element
if j != i + 1:
tour.insert(i + 1, tour.pop(j))
break # Move to the next i-th element after rearrangement
j += 1
i += 1
return tour
def find_shortest_path_through_all_landmarks(self, landmarks: list[Landmark]) -> tuple[list[Landmark], Polygon]:
"""
Find the shortest path through all landmarks using a nearest neighbor heuristic.
This function constructs a path that starts from the 'start' landmark, visits all other landmarks in the order
of their proximity, and ends at the 'finish' landmark. It returns both the ordered list of landmarks and a
polygon representing the path.
Args:
landmarks (list[Landmark]): list of all landmarks including 'start' and 'finish'.
Returns:
tuple[list[Landmark], Polygon]: A tuple where the first element is the list of landmarks in the order they
should be visited, and the second element is a `Polygon` representing
the path connecting all landmarks.
"""
# Step 1: Find 'start' and 'finish' landmarks
start_idx = next(i for i, lm in enumerate(landmarks) if lm.type == 'start')
finish_idx = next(i for i, lm in enumerate(landmarks) if lm.type == 'finish')
start_landmark = landmarks[start_idx]
finish_landmark = landmarks[finish_idx]
# Step 2: Create a list of unvisited landmarks excluding 'start' and 'finish'
unvisited_landmarks = [lm for i, lm in enumerate(landmarks) if i not in [start_idx, finish_idx]]
# Step 3: Initialize the path with the 'start' landmark
path = [start_landmark]
coordinates = [landmarks[start_idx].location]
current_landmark = start_landmark
# Step 4: Use nearest neighbor heuristic to visit all landmarks
while unvisited_landmarks:
nearest_landmark = min(unvisited_landmarks, key=lambda lm: get_time_separation.get_time(current_landmark.location, lm.location))
path.append(nearest_landmark)
coordinates.append(nearest_landmark.location)
current_landmark = nearest_landmark
unvisited_landmarks.remove(nearest_landmark)
# Step 5: Finally add the 'finish' landmark to the path
path.append(finish_landmark)
coordinates.append(landmarks[finish_idx].location)
path_poly = Polygon(coordinates)
return path, path_poly
# Returns a list of minor landmarks around the planned path to enhance experience
def get_minor_landmarks(self, all_landmarks: list[Landmark], visited_landmarks: list[Landmark], width: float) -> list[Landmark] :
"""
Identify landmarks within a specified corridor that have not been visited yet.
This function creates a corridor around the path defined by visited landmarks and then finds landmarks that fall
within this corridor. It returns a list of these landmarks, excluding those already visited, sorted by their importance.
Args:
all_landmarks (list[Landmark]): list of all available landmarks.
visited_landmarks (list[Landmark]): list of landmarks that have already been visited.
width (float): Width of the corridor around the visited landmarks.
Returns:
list[Landmark]: list of important landmarks within the corridor that have not been visited yet.
"""
second_order_landmarks = []
visited_names = []
area = self.create_corridor(visited_landmarks, width)
for visited in visited_landmarks :
visited_names.append(visited.name)
for landmark in all_landmarks :
if self.is_in_area(area, landmark.location) and landmark.name not in visited_names:
second_order_landmarks.append(landmark)
return take_most_important.take_most_important(second_order_landmarks, len(visited_landmarks))
# Try fix the shortest path using shapely
def fix_using_polygon(self, tour: list[Landmark])-> list[Landmark] :
"""
Improve the tour path using geometric methods to ensure it follows a more optimal shape.
This function creates a polygon from the given tour and attempts to refine it using a concave hull. It reorders
the landmarks to fit within this refined polygon and adjusts the tour to ensure the 'start' landmark is at the
beginning. It also checks if the final polygon is simple and rearranges the tour if necessary.
Args:
tour (list[Landmark]): list of landmarks representing the current tour path.
Returns:
list[Landmark]: Refined list of landmarks in the order of visit to produce a better tour path.
"""
coords = []
coords_dict = {}
for landmark in tour :
coords.append(landmark.location)
if landmark.name != 'finish' :
coords_dict[landmark.location] = landmark
tour_poly = Polygon(coords)
better_tour_poly = tour_poly.buffer(0)
try :
xs, ys = better_tour_poly.exterior.xy
if len(xs) != len(tour) :
better_tour_poly = concave_hull(MultiPoint(coords)) # Create concave hull with "core" of tour leaving out start and finish
xs, ys = better_tour_poly.exterior.xy
except :
better_tour_poly = concave_hull(MultiPoint(coords)) # Create concave hull with "core" of tour leaving out start and finish
xs, ys = better_tour_poly.exterior.xy
# reverse the xs and ys
xs.reverse()
ys.reverse()
better_tour = [] # list of ordered visit
name_index = {} # Maps the name of a landmark to its index in the concave polygon
# Loop through the polygon and generate the better (ordered) tour
for i,x in enumerate(xs[:-1]) :
y = ys[i]
better_tour.append(coords_dict[tuple((x,y))])
name_index[coords_dict[tuple((x,y))].name] = i
# Scroll the list to have start in front again
start_index = name_index['start']
better_tour = better_tour[start_index:] + better_tour[:start_index]
# Append the finish back and correct the time to reach
better_tour.append(tour[-1])
# Rearrange only if polygon still not simple
if not better_tour_poly.is_simple :
better_tour = self.rearrange(better_tour)
return better_tour
def refine_optimization(
self,
all_landmarks: list[Landmark],
base_tour: list[Landmark],
max_time: int,
detour: int
) -> list[Landmark]:
"""
This is the second stage of the optimization. It refines the initial tour path by considering additional minor landmarks and optimizes the path.
This method evaluates the need for further optimization based on the initial tour. If a detour is required
it adds minor landmarks around the initial predicted path and solves a new optimization problem to find a potentially better
tour. It then links the new tour and adjusts it using a nearest neighbor heuristic and polygon-based methods to
ensure a valid path. The final tour is chosen based on the shortest distance.
Args:
all_landmarks (list[Landmark]): The full list of landmarks available for the optimization.
base_tour (list[Landmark]): The initial tour path to be refined.
max_time (int): The maximum time available for the tour in minutes.
detour (int): The maximum detour time allowed for the tour in minutes.
Returns:
list[Landmark]: The refined list of landmarks representing the optimized tour path.
"""
# No need to refine if no detour is taken
if detour == 0:
return base_tour
minor_landmarks = self.get_minor_landmarks(all_landmarks, base_tour, self.detour_corridor_width)
self.logger.info(f"Using {len(minor_landmarks)} minor landmarks around the predicted path")
# full set of visitable landmarks
full_set = base_tour[:-1] + minor_landmarks # create full set of possible landmarks (without finish)
full_set.append(base_tour[-1]) # add finish back
# get a new tour
new_tour = self.optimizer.solve_optimization(
max_time = max_time + detour,
landmarks = full_set
)
if new_tour is None:
self.logger.warning("No solution found for the refined tour. Returning the initial tour.")
new_tour = base_tour
# Find shortest path using the nearest neighbor heuristic
better_tour, better_poly = self.find_shortest_path_through_all_landmarks(new_tour)
# Fix the tour using Polygons if the path looks weird
if base_tour[0].location == base_tour[-1].location and not better_poly.is_valid :
better_tour = self.fix_using_polygon(better_tour)
return better_tour

View File

@ -0,0 +1,38 @@
from structs.landmark import Landmark
def take_most_important(landmarks: list[Landmark], N_important) -> list[Landmark] :
L = len(landmarks)
L_copy = []
L_clean = []
scores = [0]*len(landmarks)
names = []
name_id = {}
for i, elem in enumerate(landmarks) :
if elem.name not in names :
names.append(elem.name)
name_id[elem.name] = [i]
L_copy.append(elem)
else :
name_id[elem.name] += [i]
scores = []
for j in name_id[elem.name] :
scores.append(L[j].attractiveness)
best_id = max(range(len(scores)), key=scores.__getitem__)
t = name_id[elem.name][best_id]
if t == i :
for old in L_copy :
if old.name == elem.name :
old.attractiveness = L[t].attractiveness
scores = [0]*len(L_copy)
for i, elem in enumerate(L_copy) :
scores[i] = elem.attractiveness
res = sorted(range(len(scores)), key = lambda sub: scores[sub])[-(N_important-L):]
for i, elem in enumerate(L_copy) :
if i in res :
L_clean.append(elem)
return L_clean

View File

View File

@ -1,40 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nav-backend
spec:
replicas: 1
selector:
matchLabels:
app: nav-backend
template:
metadata:
labels:
app: nav-backend
spec:
containers:
- name: worker
image: backend-image
ports:
- containerPort: 8000
env:
- name: NUM_WORKERS
value: "3"
- name: OSM_CACHE_DIR
value: "/osm-cache"
volumeMounts:
- name: osm-cache
mountPath: /osm-cache
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 4
memory: 10Gi
volumes:
- name: osm-cache
persistentVolumeClaim:
claimName: osm-cache

View File

@ -1,15 +0,0 @@
kind: IngressRoute
apiVersion: traefik.io/v1alpha1
metadata:
name: nav-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`nav.kluster.moll.re`)
kind: Rule
services:
- name: nav-service
port: 8000
tls:
certResolver: default-tls

View File

@ -1,16 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: nav
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
images:
- name: backend-image
newName: git.kluster.moll.re/remoll/fast_network_navigation/backend
newTag: latest

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: osm-cache
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nav-service
spec:
selector:
app: nav-backend
ports:
- protocol: TCP
port: 8000
targetPort: 8000