Compare commits

..

1 Commits

Author SHA1 Message Date
ac99ef3930 Add renovate.json 2024-07-27 12:30:05 +00:00
256 changed files with 6809 additions and 13014 deletions

View File

@ -1,24 +0,0 @@
on:
push:
tags:
- v*
name: Build and deploy the backend to production
jobs:
build-and-push:
name: Build and push image
uses: ./.gitea/workflows/workflow_build-image.yaml
with:
tag: stable
secrets:
PACKAGE_REGISTRY_ACCESS: ${{ secrets.PACKAGE_REGISTRY_ACCESS }}
deploy-prod:
name: Deploy to production
uses: ./.gitea/workflows/workflow_deploy-container.yaml
with:
overlay: prod
secrets:
KUBE_CONFIG: ${{ secrets.KUBE_CONFIG }}
needs: build-and-push

View File

@ -1,26 +0,0 @@
on:
pull_request:
branches:
- main
paths:
- backend/**
name: Build and deploy the backend to staging
jobs:
build-and-push:
name: Build and push image
uses: ./.gitea/workflows/workflow_build-image.yaml
with:
tag: unstable
secrets:
PACKAGE_REGISTRY_ACCESS: ${{ secrets.PACKAGE_REGISTRY_ACCESS }}
deploy-prod:
name: Deploy to staging
uses: ./.gitea/workflows/workflow_deploy-container.yaml
with:
overlay: stg
secrets:
KUBE_CONFIG: ${{ secrets.KUBE_CONFIG }}
needs: build-and-push

View File

@ -1,17 +1,12 @@
on: on:
workflow_call: pull_request:
inputs: branches:
tag: - main
required: true paths:
type: string - backend/**
secrets:
PACKAGE_REGISTRY_ACCESS:
required: true
name: Build and push docker image name: Build and push docker image
jobs: jobs:
build: build:
name: Build name: Build
@ -34,5 +29,5 @@ jobs:
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: backend context: backend
tags: git.kluster.moll.re/anydev/anyway-backend:${{ inputs.tag }} tags: git.kluster.moll.re/anydev/anyway-backend:latest
push: true push: true

View File

@ -1,32 +0,0 @@
on:
pull_request:
branches:
- main
paths:
- backend/**
name: Run linting on the backend code
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- uses: https://gitea.com/actions/checkout@v4
- name: Install dependencies
run: |
apt-get update && apt-get install -y python3 python3-pip
pip install pipenv
- name: Install packages
run: |
ls -la
# only install dev-packages
pipenv install --categories=dev-packages
working-directory: backend
- name: Run linter
run: pipenv run pylint src --fail-under=9
working-directory: backend

View File

@ -1,39 +0,0 @@
on:
pull_request:
branches:
- main
paths:
- backend/**
name: Run testing on the backend code
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- uses: https://gitea.com/actions/checkout@v4
- name: Install dependencies
run: |
apt-get update && apt-get install -y python3 python3-pip
pip install pipenv
- name: Install packages
run: |
ls -la
# install all packages, including dev-packages
pipenv install --dev
working-directory: backend
- name: Run Tests
run: pipenv run pytest src --html=report.html --self-contained-html --log-cli-level=DEBUG
working-directory: backend
- name: Upload HTML report
if: always()
uses: https://gitea.com/actions/upload-artifact@v3
with:
name: pytest-html-report
path: backend/report.html

View File

@ -0,0 +1,65 @@
on:
pull_request:
branches:
- main
paths:
- frontend/**
name: Build and release APK
jobs:
build:
name: Build APK
runs-on: ubuntu-latest
steps:
- name: Install prerequisites
run: |
apt-get update
apt-get install -y jq
- uses: https://gitea.com/actions/checkout@v4
- uses: https://github.com/actions/setup-java@v4
with:
java-version: '17'
distribution: 'zulu'
- name: Fix flutter SDK folder permission
run: git config --global --add safe.directory "*"
- uses: https://github.com/subosito/flutter-action@v2
with:
channel: stable
flutter-version: 3.22.0
cache: true
- name: Setup Android SDK
uses: https://github.com/android-actions/setup-android@v3
- run: flutter pub get
working-directory: ./frontend
- name: Add required secrets
run: |
echo ${{ secrets.ANDROID_SECRETS_PROPERTIES }} > ./android/secrets.properties
working-directory: ./frontend
- name: Sanity check
run: |
ls
ls -lah android
working-directory: ./frontend
- run: flutter build apk --release --split-per-abi --build-number=${{ gitea.run_number }}
working-directory: ./frontend
- name: Upload APKs to artifacts
uses: https://gitea.com/actions/upload-artifact@v3
with:
name: app-release
path: frontend/build/app/outputs/flutter-apk/
if-no-files-found: error
retention-days: 15

View File

@ -1,74 +0,0 @@
on:
pull_request:
branches:
- main
paths:
- frontend/**
name: Build and release debug APK
defaults:
run:
working-directory: frontend/android
jobs:
build:
runs-on: macos
env:
# $BUNDLE_GEMFILE must be set at the job level, so it is set for all steps
BUNDLE_GEMFILE: ${{ gitea.workspace }}/frontend/android/Gemfile
steps:
- uses: https://gitea.com/actions/checkout@v4
- uses: https://github.com/actions/setup-java@v4
with:
java-version: '17'
distribution: 'zulu'
- name: Setup Android SDK
uses: https://github.com/android-actions/setup-android@v3
- name: Fix flutter SDK folder permission
run: git config --global --add safe.directory "*"
- uses: https://github.com/subosito/flutter-action@v2
with:
channel: stable
flutter-version-file: ${{ gitea.workspace }}/frontend/pubspec.yaml
architecture: x64
cache: true
- name: Install dependencies and clean up
run: |
flutter pub get
flutter clean
- name: Set up ruby env and install fastlane
uses: https://github.com/ruby/setup-ruby@v1
with:
ruby-version: 3.3
bundler-cache: true # runs 'bundle install' and caches installed gems automatically
- name: Infer version number from git tag
id: version
env:
REF_NAME: ${{ gitea.ref_name }}
run:
# remove the 'v' prefix from the tag name
echo "BUILD_NAME=${REF_NAME//v}" >> $GITHUB_ENV
- name: Add required secret files
run: |
echo "${{ secrets.ANDROID_SECRET_PROPERTIES_BASE64 }}" | base64 -d > secrets.properties
echo "${{ secrets.ANDROID_GOOGLE_PLAY_JSON_BASE64 }}" | base64 -d > google-key.json
echo "${{ secrets.ANDROID_KEYSTORE_BASE64 }}" | base64 -d > release.keystore
- name: Run fastlane lane
run: bundle exec fastlane deploy_beta
env:
BUILD_NUMBER: ${{ gitea.run_number }}
# BUILD_NAME is implicitly available
ANDROID_GOOGLE_MAPS_API_KEY: ${{ secrets.ANDROID_GOOGLE_MAPS_API_KEY }}

View File

@ -1,72 +0,0 @@
on:
pull_request:
branches:
- main
paths:
- frontend/**
name: Build and release debugging app to ios testflight
defaults:
run:
working-directory: frontend/ios
jobs:
build:
runs-on: macos
env:
# $BUNDLE_GEMFILE must be set at the job level, so it is set for all steps
BUNDLE_GEMFILE: ${{ gitea.workspace }}/frontend/ios/Gemfile
steps:
- uses: https://gitea.com/actions/checkout@v4
- name: Install Flutter
uses: https://github.com/subosito/flutter-action@v2
with:
channel: stable
flutter-version-file: ${{ gitea.workspace }}/frontend/pubspec.yaml
architecture: x64
cache: true
- name: Install dependencies and clean up
run: |
flutter pub get
bundle exec pod install
flutter clean
bundle exec pod cache clean --all
- name: Set up ruby env
uses: https://github.com/ruby/setup-ruby@v1
with:
ruby-version: 3.3
bundler-cache: true # runs 'bundle install' and caches installed gems automatically
- name: Infer version number from git tag
id: version
env:
REF_NAME: ${{ gitea.ref_name }}
run:
# remove the 'v' prefix from the tag name
echo "BUILD_NAME=${REF_NAME//v}" >> $GITHUB_ENV
- name: Setup SSH key for match git repo
# and mark the host as known
run: |
echo $MATCH_REPO_SSH_KEY | base64 --decode > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan -p 2222 git.kluster.moll.re > ~/.ssh/known_hosts
env:
MATCH_REPO_SSH_KEY: ${{ secrets.IOS_MATCH_REPO_SSH_KEY_BASE64 }}
- name: Run fastlane lane
run: bundle exec fastlane deploy_beta
env:
BUILD_NUMBER: ${{ gitea.run_number }}
# BUILD_NAME is implicitly available
GOOGLE_MAPS_API_KEY: ${{ secrets.GOOGLE_MAPS_API_KEY }}
IOS_ASC_KEY_ID: ${{ secrets.IOS_ASC_KEY_ID }}
IOS_ASC_ISSUER_ID: ${{ secrets.IOS_ASC_ISSUER_ID }}
IOS_ASC_KEY: ${{ secrets.IOS_ASC_KEY }}
MATCH_PASSWORD: ${{ secrets.IOS_MATCH_PASSWORD }}
IOS_GOOGLE_MAPS_API_KEY: ${{ secrets.IOS_GOOGLE_MAPS_API_KEY }}

View File

@ -0,0 +1,34 @@
# on:
# pull_request:
# branches:
# - main
# paths:
# - frontend/**
# name: Build web
# jobs:
# build:
# name: Build Web
# runs-on: ubuntu-latest
# steps:
# - name: Install prerequisites
# run: |
# sudo apt-get update
# sudo apt-get install -y xz-utils
# - uses: actions/checkout@v4
# - uses: https://github.com/subosito/flutter-action@v2
# with:
# channel: stable
# flutter-version: 3.19.6
# cache: true
# - run: flutter pub get
# working-directory: ./frontend
# - run: flutter build web
# working-directory: ./frontend

View File

@ -1,35 +0,0 @@
on:
workflow_call:
inputs:
overlay:
required: true
type: string
secrets:
KUBE_CONFIG:
required: true
name: Deploy the newly built container
jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
steps:
- uses: https://gitea.com/actions/checkout@v4
with:
submodules: true
- name: setup kubectl
uses: https://github.com/azure/setup-kubectl@v4
- name: Set kubeconfig
run: |
echo "${{ secrets.KUBE_CONFIG }}" > kubeconfig
- name: Deploy to k8s
run: |
kubectl apply -k backend/deployment/overlays/${{ inputs.overlay }} --kubeconfig=kubeconfig
kubectl -n anyway-backend rollout restart deployment/anyway-backend-${{ inputs.overlay }} --kubeconfig=kubeconfig

3
.gitmodules vendored
View File

@ -1,3 +0,0 @@
[submodule "backend/deployment"]
path = backend/deployment
url = https://git.kluster.moll.re/anydev/anyway-backend-deployment

19
.vscode/launch.json vendored
View File

@ -9,16 +9,18 @@
"name": "Backend - debug", "name": "Backend - debug",
"type": "debugpy", "type": "debugpy",
"request": "launch", "request": "launch",
"module": "uvicorn",
"env": { "env": {
"DEBUG": "true" "DEBUG": "true"
}, },
"jinja": true,
"cwd": "${workspaceFolder}/backend",
"module": "fastapi",
"args": [ "args": [
"dev", "--app-dir",
"src/main.py" "src",
] "main:app",
"--reload",
],
"jinja": true,
"cwd": "${workspaceFolder}/backend"
}, },
{ {
"name": "Backend - tester", "name": "Backend - tester",
@ -36,10 +38,7 @@
"type": "dart", "type": "dart",
"request": "launch", "request": "launch",
"program": "lib/main.dart", "program": "lib/main.dart",
"cwd": "${workspaceFolder}/frontend", "cwd": "${workspaceFolder}/frontend"
"env": {
"GOOGLE_MAPS_API_KEY": "testing"
}
}, },
{ {
"name": "Frontend - profile", "name": "Frontend - profile",

View File

@ -1,30 +0,0 @@
# License
## Proprietary License
All code and resources in this repository are the property of AnyDev. The software and related documentation are provided solely for use with services provided by AnyDev. Redistribution, modification, or use of this software outside of its intended service is strictly prohibited without explicit permission.
### Copyright © 2024 AnyDev
All rights reserved.
### Restrictions
- You may not modify, distribute, copy, or reverse engineer any part of this codebase.
- This software is licensed for use solely in conjunction with services provided by AnyDev.
- Any commercial use of this software is strictly prohibited without explicit written consent from AnyDev.
## Third-Party Dependencies
This project uses third-party dependencies, which are subject to their respective licenses.
- Python backend dependencies: fastapi, pydantic, numpy, shapely, etc. Licensed under their respective licenses.
- Flutter frontend dependencies: Cupertino Icons, sliding_up_panel, http, etc. Licensed under their respective licenses.
Please refer to each project's documentation for the specific terms and conditions.
## OpenStreetMap Data Usage
This project uses data derived from **OpenStreetMap**. OpenStreetMap data is available under the [Open Database License (ODbL)](https://www.openstreetmap.org/copyright). We comply with the ODbL license, and some of the data displayed in the service may be derived from OpenStreetMap sources. We do not redistribute raw OpenStreetMap data; instead, it is processed and transformed before being used in our services.
More information about OpenStreetMap data usage can be found [here](https://www.openstreetmap.org/copyright).

View File

@ -15,7 +15,7 @@ This project is divided into two main components: a frontend and a backend. The
See the [frontend README](frontend/README.md) for more information. The application is centered around its map view, which displays the user's itinerary. This is based on the Google Maps API. See the [frontend README](frontend/README.md) for more information. The application is centered around its map view, which displays the user's itinerary. This is based on the Google Maps API.
### Backend ### Backend
See the [backend README](backend/README.md) for more information. The backend is responsible for generating the itinerary based on the user's preferences and constraints. Rather than using google maps, we use the OpenStreetMap database through the Overpass API, which is much more flexible. See the [backend README](backend/README.md) for more information. The backend is responsible for generating the itinerary based on the user's preferences and constraints. Rather than using google maps, we use the OpenStreetMap API, which is much more flexible.
## Getting Started ## Getting Started
@ -24,8 +24,8 @@ Refer to the READMEs in the `frontend` and `backend` directories for instruction
- `google_maps_flutter` plugin - `google_maps_flutter` plugin
- Python 3 - Python 3
- `fastapi` - `fastapi`
- `numpy` - `OSMPythonTools`
- `pydantic` - `numpy, scipy`
- Docker - Docker

7
backend/.gitignore vendored
View File

@ -1,9 +1,6 @@
# osm-cache # osm-cache
cache_XML/ cache/
apicache/
# secrets
*secrets.yaml
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
__pycache__/ __pycache__/
*.py[cod] *.py[cod]

View File

@ -1,656 +0,0 @@
[MAIN]
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Clear in-memory caches upon conclusion of linting. Useful if running pylint
# in a server-like mode.
clear-cache-post-run=no
# Load and enable all available extensions. Use --list-extensions to see a list
# all available extensions.
#enable-all-extensions=
# In error mode, messages with a category besides ERROR or FATAL are
# suppressed, and no reports are done by default. Error mode is compatible with
# disabling specific errors.
#errors-only=
# Always return a 0 (non-error) status code, even if lint errors are found.
# This is primarily useful in continuous integration scripts.
#exit-zero=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code.
extension-pkg-allow-list=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
# for backward compatibility.)
extension-pkg-whitelist=
# Return non-zero exit code if any of these messages/categories are detected,
# even if score is above --fail-under value. Syntax same as enable. Messages
# specified are enabled, while categories only check already-enabled messages.
fail-on=
# Specify a score threshold under which the program will exit with error.
fail-under=10
# Interpret the stdin as a python script, whose filename needs to be passed as
# the module_or_package argument.
#from-stdin=
# Files or directories to be skipped. They should be base names, not paths.
ignore=CVS
# Add files or directories matching the regular expressions patterns to the
# ignore-list. The regex matches against paths and can be in Posix or Windows
# format. Because '\\' represents the directory delimiter on Windows systems,
# it can't be used as an escape character.
ignore-paths=
# Files or directories matching the regular expression patterns are skipped.
# The regex matches against base names, not paths. The default value ignores
# Emacs file locks
ignore-patterns=^\.#
# List of module names for which member attributes should not be checked and
# will not be imported (useful for modules/projects where namespaces are
# manipulated during runtime and thus existing member attributes cannot be
# deduced by static analysis). It supports qualified module names, as well as
# Unix pattern matching.
ignored-modules=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
# number of processors available to use, and will cap the count on Windows to
# avoid hangs.
jobs=1
# Control the amount of potential inferred values when inferring a single
# object. This can help the performance when dealing with large functions or
# complex, nested conditions.
limit-inference-results=100
# List of plugins (as comma separated values of python module names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Resolve imports to .pyi stubs if available. May reduce no-member messages and
# increase not-an-iterable messages.
prefer-stubs=no
# Minimum Python version to use for version dependent checks. Will default to
# the version used to run pylint.
py-version=3.12
# Discover python modules and packages in the file system subtree.
recursive=no
# Add paths to the list of the source roots. Supports globbing patterns. The
# source root is an absolute path or a path relative to the current working
# directory used to determine a package namespace for modules located under the
# source root.
source-roots=
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages.
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# In verbose mode, extra non-checker-related info will be displayed.
#verbose=
[BASIC]
# Naming style matching correct argument names.
argument-naming-style=snake_case
# Regular expression matching correct argument names. Overrides argument-
# naming-style. If left empty, argument names will be checked with the set
# naming style.
#argument-rgx=
# Naming style matching correct attribute names.
attr-naming-style=snake_case
# Regular expression matching correct attribute names. Overrides attr-naming-
# style. If left empty, attribute names will be checked with the set naming
# style.
#attr-rgx=
# Bad variable names which should always be refused, separated by a comma.
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Bad variable names regexes, separated by a comma. If names match any regex,
# they will always be refused
bad-names-rgxs=
# Naming style matching correct class attribute names.
class-attribute-naming-style=any
# Regular expression matching correct class attribute names. Overrides class-
# attribute-naming-style. If left empty, class attribute names will be checked
# with the set naming style.
#class-attribute-rgx=
# Naming style matching correct class constant names.
class-const-naming-style=UPPER_CASE
# Regular expression matching correct class constant names. Overrides class-
# const-naming-style. If left empty, class constant names will be checked with
# the set naming style.
#class-const-rgx=
# Naming style matching correct class names.
class-naming-style=PascalCase
# Regular expression matching correct class names. Overrides class-naming-
# style. If left empty, class names will be checked with the set naming style.
#class-rgx=
# Naming style matching correct constant names.
const-naming-style=UPPER_CASE
# Regular expression matching correct constant names. Overrides const-naming-
# style. If left empty, constant names will be checked with the set naming
# style.
#const-rgx=
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming style matching correct function names.
function-naming-style=snake_case
# Regular expression matching correct function names. Overrides function-
# naming-style. If left empty, function names will be checked with the set
# naming style.
#function-rgx=
# Good variable names which should always be accepted, separated by a comma.
good-names=i,
j,
k,
ex,
Run,
_
# Good variable names regexes, separated by a comma. If names match any regex,
# they will always be accepted
good-names-rgxs=
# Include a hint for the correct naming format with invalid-name.
include-naming-hint=no
# Naming style matching correct inline iteration names.
inlinevar-naming-style=any
# Regular expression matching correct inline iteration names. Overrides
# inlinevar-naming-style. If left empty, inline iteration names will be checked
# with the set naming style.
#inlinevar-rgx=
# Naming style matching correct method names.
method-naming-style=snake_case
# Regular expression matching correct method names. Overrides method-naming-
# style. If left empty, method names will be checked with the set naming style.
#method-rgx=
# Naming style matching correct module names.
module-naming-style=snake_case
# Regular expression matching correct module names. Overrides module-naming-
# style. If left empty, module names will be checked with the set naming style.
#module-rgx=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
# These decorators are taken in consideration only for invalid-name.
property-classes=abc.abstractproperty
# Regular expression matching correct type alias names. If left empty, type
# alias names will be checked with the set naming style.
#typealias-rgx=
# Regular expression matching correct type variable names. If left empty, type
# variable names will be checked with the set naming style.
#typevar-rgx=
# Naming style matching correct variable names.
variable-naming-style=snake_case
# Regular expression matching correct variable names. Overrides variable-
# naming-style. If left empty, variable names will be checked with the set
# naming style.
#variable-rgx=
[CLASSES]
# Warn about protected attribute access inside special methods
check-protected-access-in-special-methods=no
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp,
asyncSetUp,
__post_init__
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
[DESIGN]
# List of regular expressions of class ancestor names to ignore when counting
# public methods (see R0903)
exclude-too-few-public-methods=
# List of qualified class names to ignore when counting class parents (see
# R0901)
ignored-parents=
# Maximum number of arguments for function / method.
max-args=5
# Maximum number of attributes for a class (see R0902).
max-attributes=20
# Maximum number of boolean expressions in an if statement (see R0916).
max-bool-expr=5
# Maximum number of branch for function / method body.
max-branches=12
# Maximum number of locals for function / method body.
max-locals=30
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of positional arguments for function / method.
max-positional-arguments=5
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body.
max-returns=6
# Maximum number of statements in function / method body.
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
[EXCEPTIONS]
# Exceptions that will emit a warning when caught.
overgeneral-exceptions=builtins.BaseException,builtins.Exception
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=105
# Maximum number of lines in a module.
max-module-lines=1000
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[IMPORTS]
# List of modules that can be imported at any level, not just the top level
# one.
allow-any-import-level=
# Allow explicit reexports by alias from a package __init__.
allow-reexport-from-package=no
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Deprecated modules which should not be used, separated by a comma.
deprecated-modules=
# Output a graph (.gv or any supported image format) of external dependencies
# to the given file (report RP0402 must not be disabled).
ext-import-graph=
# Output a graph (.gv or any supported image format) of all (i.e. internal and
# external) dependencies to the given file (report RP0402 must not be
# disabled).
import-graph=
# Output a graph (.gv or any supported image format) of internal dependencies
# to the given file (report RP0402 must not be disabled).
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
# Couples of modules and preferred modules, separated by a comma.
preferred-modules=
[LOGGING]
# The type of string formatting that logging methods do. `old` means using %
# formatting, `new` is for `{}` formatting.
logging-format-style=new
# Logging modules to check that the string format arguments are in logging
# function parameter format.
logging-modules=logging
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
# UNDEFINED.
confidence=HIGH,
CONTROL_FLOW,
INFERENCE,
INFERENCE_FAILURE,
UNDEFINED
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once). You can also use "--disable=all" to
# disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=raw-checker-failed,
bad-inline-option,
locally-disabled,
file-ignored,
suppressed-message,
useless-suppression,
deprecated-pragma,
use-symbolic-message-instead,
use-implicit-booleaness-not-comparison-to-string,
use-implicit-booleaness-not-comparison-to-zero,
import-error,
multiple-statements,
line-too-long,
logging-fstring-interpolation,
duplicate-code,
relative-beyond-top-level,
invalid-name,
too-many-arguments,
too-many-positional-arguments
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=
[METHOD_ARGS]
# List of qualified names (i.e., library.method) which require a timeout
# parameter e.g. 'requests.api.get,requests.api.post'
timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
# Regular expression of note tags to take in consideration.
notes-rgx=
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=sys.exit,argparse.parse_error
# Let 'consider-using-join' be raised when the separator to join on would be
# non-empty (resulting in expected fixes of the type: ``"- " + " -
# ".join(items)``)
suggest-join-with-non-empty-separator=yes
[REPORTS]
# Python expression which should return a score less than or equal to 10. You
# have access to the variables 'fatal', 'error', 'warning', 'refactor',
# 'convention', and 'info' which contain the number of messages in each
# category, as well as 'statement' which is the total number of statements
# analyzed. This score is used by the global evaluation report (RP0004).
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details.
msg-template=
# Set the output format. Available formats are: text, parseable, colorized,
# json2 (improved json format), json (old json format) and msvs (visual
# studio). You can also give a reporter class, e.g.
# mypackage.mymodule.MyReporterClass.
#output-format=
# Tells whether to display a full report or only the messages.
reports=no
# Activate the evaluation score.
score=yes
[SIMILARITIES]
# Comments are removed from the similarity computation
ignore-comments=yes
# Docstrings are removed from the similarity computation
ignore-docstrings=yes
# Imports are removed from the similarity computation
ignore-imports=yes
# Signatures are removed from the similarity computation
ignore-signatures=yes
# Minimum lines number of a similarity.
min-similarity-lines=4
[SPELLING]
# Limits count of emitted suggestions for spelling mistakes.
max-spelling-suggestions=4
# Spelling dictionary name. No available dictionaries : You need to install
# both the python package and the system dependency for enchant to work.
spelling-dict=
# List of comma separated words that should be considered directives if they
# appear at the beginning of a comment and should not be checked.
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains the private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to the private dictionary (see the
# --spelling-private-dict-file option) instead of raising a message.
spelling-store-unknown-words=no
[STRING]
# This flag controls whether inconsistent-quotes generates a warning when the
# character used as a quote delimiter is used inconsistently within a module.
check-quote-consistency=no
# This flag controls whether the implicit-str-concat should generate a warning
# on implicit string concatenation in sequences defined over several lines.
check-str-concat-over-line-jumps=no
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
# Tells whether to warn about missing members when the owner of the attribute
# is inferred to be None.
ignore-none=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of symbolic message names to ignore for Mixin members.
ignored-checks-for-mixins=no-member,
not-async-context-manager,
not-context-manager,
attribute-defined-outside-init
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
# Regex pattern to define which classes are considered mixins.
mixin-class-rgx=.*[Mm]ixin
# List of decorators that change the signature of a decorated function.
signature-mutators=
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid defining new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of names allowed to shadow builtins
allowed-redefined-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expected to
# not be used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored.
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io

View File

@ -13,8 +13,5 @@ EXPOSE 8000
# Set environment variables used by the deployment. These can be overridden by the user using this image. # Set environment variables used by the deployment. These can be overridden by the user using this image.
ENV NUM_WORKERS=1 ENV NUM_WORKERS=1
ENV OSM_CACHE_DIR=/cache ENV OSM_CACHE_DIR=/cache
ENV MEMCACHED_HOST_PATH=none
ENV LOKI_URL=none
# explicitly use a string instead of an argument list to force a shell and variable expansion
CMD fastapi run src/main.py --port 8000 --workers $NUM_WORKERS CMD fastapi run src/main.py --port 8000 --workers $NUM_WORKERS

View File

@ -4,24 +4,13 @@ verify_ssl = true
name = "pypi" name = "pypi"
[dev-packages] [dev-packages]
pylint = "*"
pytest = "*"
tomli = "*"
httpx = "*"
exceptiongroup = "*"
pytest-html = "*"
typing-extensions = "*"
dill = "*"
[packages] [packages]
numpy = "*" numpy = "*"
fastapi = "*" fastapi = "*"
pydantic = "*" pydantic = "*"
geopy = "*"
shapely = "*" shapely = "*"
pymemcache = "*"
fastapi-cli = "*"
scikit-learn = "*"
loki-logger-handler = "*"
pulp = "*"
scipy = "*" scipy = "*"
requests = "*" osmpythontools = "*"
pywikibot = "*"

2621
backend/Pipfile.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,56 +1,16 @@
# Backend # Backend
This repository contains the backend code for the application. It utilizes **FastAPI** to quickly create a RESTful API that exposes the endpoints of the route optimizer. This repository contains the backend code for the application. It utilizes FastAPI that allows to quickly create a RESTful API that exposes the endpoints of the route optimizer.
## Getting Started ## Getting Started
- The code of the python application is located in the `src` directory.
### Directory Structure - Package management is handled with `pipenv` and the dependencies are listed in the `Pipfile`.
- The code for the Python application is located in the `src` directory. - Since the application is aimed to be deployed in a container, the `Dockerfile` is provided to build the image.
- Package management is handled with **pipenv**, and the dependencies are listed in the `Pipfile`.
- Since the application is designed to be deployed in a container, the `Dockerfile` is provided to build the image.
### Setting Up the Development Environment
To set up your development environment using **pipenv**, follow these steps:
1. Install `pipenv` by running:
```bash
sudo apt install pipenv
```
2. Create and activate a virtual environment:
```bash
pipenv shell
```
3. Install the dependencies listed in the `Pipfile`:
```bash
pipenv install
```
4. The virtual environment will be created under:
```bash
~/.local/share/virtualenvs/...
```
### Deployment ### Deployment
To deploy the backend docker container, we use kubernetes. Modifications to the backend are automatically pushed to a two-stage environment through the CI pipeline. See [deployment/README](deployment/README.md] for further information. To deploy the backend docker container, we use kubernetes. The deployment configuration is located under [https://git.kluster.moll.re/anydev/deployment-backend/](https://git.kluster.moll.re/anydev/deployment-backend/).
The deployment configuration is included as a submodule in the `deployment` directory. The standalone repository is under [https://git.kluster.moll.re/anydev/anyway-backend-deployment/](https://git.kluster.moll.re/anydev/anyway-backend-deployment/).
## Development ## Development
TBD
The backend application is structured around the `src` directory, which contains the core components for handling route optimization and API logic. Development generally involves working with key modules such as the optimization engine, Overpass API integration, and utilities for managing landmarks and trip data.
### Key Areas:
- **API Endpoints**: The main interaction with the backend is through the endpoints defined in `src/main.py`. FastAPI simplifies the creation of RESTful services that manage trip and landmark data.
- **Optimization Logic**: The trip optimization and refinement are handled in the `src/optimization` module. This is where the core algorithms are implemented.
- **Landmark Management**: Fetching and prioritizing points of interest (POIs) based on user preferences happens in `src/utils/LandmarkManager`.
- **Testing**: The `src/tests` directory includes tests in various scenarii, ensuring that the logic works as expected.
For detailed information, refer to the [src README](backend/src/README.md).
### Running the Application:
To run the backend locally, ensure that the virtual environment is activated and all dependencies are installed as outlined in the "Getting Started" section. You can start the FastAPI server with:
```bash
uvicorn src.main:app --reload

View File

@ -1,47 +0,0 @@
import pytest
pytest_plugins = ["pytest_html"]
def pytest_html_report_title(report):
"""modifying the title of html report"""
report.title = "Backend Testing Report"
def pytest_html_results_table_header(cells):
cells.insert(2, "<th>Detailed trip</th>")
cells.insert(3, "<th>Trip Duration</th>")
cells.insert(4, "<th>Target Duration</th>")
cells[5] = "<th>Execution time</th>" # rename the column containing execution times to avoid confusion
def pytest_html_results_table_row(report, cells):
trip_details = getattr(report, "trip_details", "N/A") # Default to "N/A" if no trip data
trip_duration = getattr(report, "trip_duration", "N/A") # Default to "N/A" if no trip data
target_duration = getattr(report, "target_duration", "N/A") # Default to "N/A" if no trip data
cells.insert(2, f"<td>{trip_details}</td>")
cells.insert(3, f"<td>{trip_duration}</td>")
cells.insert(4, f"<td>{target_duration}</td>")
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
report.description = str(item.function.__doc__)
# Attach trip_details if it exists
if hasattr(item, "trip_details"):
report.trip_details = " - ".join(item.trip_details) # Convert list to string
else:
report.trip_details = "N/A" # Default if trip_string is not set
# Attach trip_duration if it exists
if hasattr(item, "trip_duration"):
report.trip_duration = item.trip_duration + " min"
else:
report.trip_duration = "N/A" # Default if duration is not set
# Attach target_duration if it exists
if hasattr(item, "target_duration"):
report.target_duration = item.target_duration + " min"
else:
report.target_duration = "N/A" # Default if duration is not set

@ -1 +0,0 @@
Subproject commit 904f16bfc0624b6ab8569e0a70050aaa3bd64b3f

View File

@ -1,363 +0,0 @@
[
{
"name": "Chinatown",
"type": "shopping",
"location": [
45.7554934,
4.8444852
],
"osm_type": "way",
"osm_id": 996515596,
"attractiveness": 129,
"n_tags": 0,
"image_url": null,
"website_url": null,
"wiki_url": null,
"keywords": {},
"description": null,
"duration": 30,
"name_en": null,
"uuid": "285d159c-68ee-4b37-8d71-f27ee3d38b02",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "Galeries Lafayette",
"type": "shopping",
"location": [
45.7627107,
4.8556833
],
"osm_type": "way",
"osm_id": 1069872743,
"attractiveness": 197,
"n_tags": 11,
"image_url": null,
"website_url": "http://www.galerieslafayette.com/",
"wiki_url": null,
"keywords": null,
"description": null,
"duration": 30,
"name_en": null,
"uuid": "28f1bc30-10d3-4944-8861-0ed9abca012d",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "Muji",
"type": "shopping",
"location": [
45.7615971,
4.8543781
],
"osm_type": "way",
"osm_id": 1044165817,
"attractiveness": 259,
"n_tags": 14,
"image_url": null,
"website_url": "https://www.muji.com/fr/",
"wiki_url": null,
"keywords": null,
"description": null,
"duration": 30,
"name_en": "Muji",
"uuid": "957f86a5-6c00-41a2-815d-d6f739052be4",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "HEMA",
"type": "shopping",
"location": [
45.7619133,
4.8565239
],
"osm_type": "way",
"osm_id": 1069872750,
"attractiveness": 156,
"n_tags": 9,
"image_url": null,
"website_url": "https://fr.westfield.com/lapartdieu/store/HEMA/www.hema.fr",
"wiki_url": null,
"keywords": null,
"description": null,
"duration": 30,
"name_en": null,
"uuid": "8dae9d3e-e4c4-4e80-941d-0b106e22c85b",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "Cordeliers",
"type": "shopping",
"location": [
45.7622752,
4.8337998
],
"osm_type": "node",
"osm_id": 5545183519,
"attractiveness": 813,
"n_tags": 0,
"image_url": null,
"website_url": null,
"wiki_url": null,
"keywords": {},
"description": null,
"duration": 30,
"name_en": null,
"uuid": "ba02adb5-e28f-4645-8c2d-25ead6232379",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "Halles de Lyon Paul Bocuse",
"type": "shopping",
"location": [
45.7628282,
4.8505601
],
"osm_type": "relation",
"osm_id": 971529,
"attractiveness": 272,
"n_tags": 12,
"image_url": null,
"website_url": "https://www.halles-de-lyon-paulbocuse.com/",
"wiki_url": "fr:Halles de Lyon-Paul Bocuse",
"keywords": {
"importance": "national",
"height": null,
"place_type": "marketplace",
"date": null
},
"description": "Halles de Lyon Paul Bocuse is a marketplace of national importance.",
"duration": 30,
"name_en": null,
"uuid": "bbd50de3-aa91-425d-90c2-d4abfd1b4abe",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "Grand Bazar",
"type": "shopping",
"location": [
45.7632141,
4.8361975
],
"osm_type": "way",
"osm_id": 82399951,
"attractiveness": 93,
"n_tags": 7,
"image_url": null,
"website_url": null,
"wiki_url": null,
"keywords": null,
"description": null,
"duration": 30,
"name_en": null,
"uuid": "3de9131c-87c5-4efb-9fa8-064896fb8b29",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "Shopping Area",
"type": "shopping",
"location": [
45.7673452,
4.8438683
],
"osm_type": "node",
"osm_id": 0,
"attractiveness": 156,
"n_tags": 0,
"image_url": null,
"website_url": null,
"wiki_url": null,
"keywords": {},
"description": null,
"duration": 30,
"name_en": null,
"uuid": "df2482a8-7e2e-4536-aad3-564899b2fa65",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "Cour Oxyg\u00e8ne",
"type": "shopping",
"location": [
45.7620905,
4.8568873
],
"osm_type": "way",
"osm_id": 132673030,
"attractiveness": 63,
"n_tags": 5,
"image_url": null,
"website_url": null,
"wiki_url": null,
"keywords": null,
"description": null,
"duration": 30,
"name_en": null,
"uuid": "ed134f76-9a02-4bee-9c10-78454f7bc4ce",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "P\u00f4le de Commerces et de Loisirs Confluence",
"type": "shopping",
"location": [
45.7410414,
4.8171031
],
"osm_type": "way",
"osm_id": 440270633,
"attractiveness": 259,
"n_tags": 14,
"image_url": null,
"website_url": "https://www.confluence.fr/",
"wiki_url": null,
"keywords": null,
"description": null,
"duration": 30,
"name_en": null,
"uuid": "dd7e2f5f-0e60-4560-b903-e5ded4b6e36a",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "Grand H\u00f4tel-Dieu",
"type": "shopping",
"location": [
45.7586955,
4.8364597
],
"osm_type": "relation",
"osm_id": 300128,
"attractiveness": 546,
"n_tags": 22,
"image_url": null,
"website_url": "https://grand-hotel-dieu.com",
"wiki_url": "fr:H\u00f4tel-Dieu de Lyon",
"keywords": {
"importance": "international",
"height": null,
"place_type": "building",
"date": "C17"
},
"description": "Grand H\u00f4tel-Dieu is an internationally famous building. It was constructed in C17.",
"duration": 30,
"name_en": null,
"uuid": "a91265a8-ffbd-44f7-a7ab-3ff75f08fbab",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "Westfield La Part-Dieu",
"type": "shopping",
"location": [
45.761331,
4.855676
],
"osm_type": "way",
"osm_id": 62338376,
"attractiveness": 546,
"n_tags": 22,
"image_url": null,
"website_url": "https://fr.westfield.com/lapartdieu",
"wiki_url": "fr:La Part-Dieu (centre commercial)",
"keywords": null,
"description": null,
"duration": 30,
"name_en": null,
"uuid": "7d60316f-d689-4fcf-be68-ffc09353b826",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
},
{
"name": "Ainay",
"type": "shopping",
"location": [
45.7553105,
4.8312084
],
"osm_type": "node",
"osm_id": 5545126047,
"attractiveness": 132,
"n_tags": 0,
"image_url": null,
"website_url": null,
"wiki_url": null,
"keywords": {},
"description": null,
"duration": 30,
"name_en": null,
"uuid": "ad214f3d-a4b9-4078-876a-446caa7ab01c",
"must_do": false,
"must_avoid": false,
"is_secondary": false,
"time_to_reach_next": 0,
"next_uuid": null,
"is_viewpoint": false,
"is_place_of_worship": false
}
]

File diff suppressed because one or more lines are too long

View File

@ -1,65 +0,0 @@
# Overview of backend/src
This project is structured into several components that handle different aspects of the application's functionality. Below is a high-level overview of each folder and the key Python files in the |src| directory.
## Folders
### src/optimization
This folder contains modules related to the optimization algorithm used to compute the optimal trip. It comprises the optimizer for the first rough trip and a refiner to include less famous landmarks as well.
### src/overpass
This folder handles interactions with the Overpass API, including constructing and sending queries, caching responses, and parsing results from the Overpass database.
### src/parameters
The modules in this folder define and manage parameters for various parts of the application. This includes configuration values for the optimizer or the list of selectors for Overpass queries.
### src/structs
This folder defines the commonly used data structures used within the project. The models leverage Pydantic's `BaseModel` to ensure data validation, serialization, and easy interaction between different components of the application. The main classes are:
- **Landmark**:
- Represents a point of interest in the context of a trip. It stores various attributes like the landmark's name, type, location (latitude and longitude), and its OSM details.
- It also includes other optional fields like image URLs, website links, and descriptions. Additionally, the class has properties to track its attractiveness score or elative importance.
- **Preferences**:
- This class captures user-defined preferences needed to personalize a trip. Preferences are provided for sightseeing (history and culture), nature (parks and gardens), and shopping. These preferences guide the trip optimization process.
- **Trip**:
- The `Trip` class represents the complete travel plan generated by the system. It holds key information like the trip's total time and the first landmark's UUID.
### src/tests
This folder contains unit tests and test cases for the application's various modules. It is used to ensure the correctness and stability of the code.
### src/utils
The `utils` folder contains utility classes and functions that provide core functionality for the application. The main component in this folder is the `LandmarkManager`, which is central to the process of fetching and organizing landmarks.
- **LandmarkManager**:
- The `LandmarkManager` is responsible for fetching landmarks from OpenStreetMap (via the Overpass API) and managing their classification based on user preferences. It processes raw geographical data, filters landmarks into relevant categories (such as sightseeing, nature, shopping), and prioritizes them for trip planning.
## Files
### src/cache.py
This file manages the caching mechanisms used throughout the application. It defines the caching strategy for storing and retrieving data, improving the performance of repeated operations by avoiding redundant API calls or computations.
### src/constants.py
This module defines global constants used throughout the project. These constants may include API endpoints, fixed configuration values, or reusable strings and integers that need to remain consistent.
### src/logging_config.py
This file configures the logging system for the application. It defines how logs are formatted, where they are output (e.g., console or file), and the logging levels (e.g., debug, info, error).
### src/main.py
This file contains the main application logic and API endpoints for interacting with the system. The application is built using the FastAPI framework, which provides several endpoints for creating trips, fetching trips, and retrieving landmarks or nearby facilities. The key endpoints include:
- **POST /trip/new**:
- This endpoint allows users to create a new trip by specifying preferences, start coordinates, and optionally end coordinates. The preferences guide the optimization process for selecting landmarks.
- Returns: A `Trip` object containing the optimized route, landmarks, and trip details.
- **GET /trip/{trip_uuid}**:
- This endpoint fetches an already generated trip by its unique identifier (`trip_uuid`). It retrieves the trip data from the cache.
- Returns: A `Trip` object corresponding to the given `trip_uuid`.
- **GET /landmark/{landmark_uuid}**:
- This endpoint retrieves a specific landmark by its unique identifier (`landmark_uuid`) from the cache.
- Returns: A `Landmark` object containing the details of the requested landmark.
- **POST /toilets/new**:
- This endpoint searches for public toilets near a specified location within a given radius. The location and radius are passed as query parameters.
- Returns: A list of `Toilets` objects located within the specified radius of the provided coordinates.

View File

@ -1,75 +0,0 @@
"""Module used for handling cache"""
from pymemcache import serde
from pymemcache.client.base import Client
from .constants import MEMCACHED_HOST_PATH
class DummyClient:
"""
A dummy in-memory client that mimics the behavior of a memcached client.
This class is designed to simulate the behavior of the `pymemcache.Client`
for testing or development purposes. It stores data in a Python dictionary
and provides methods to set, get, and update key-value pairs.
Attributes:
_data (dict): A dictionary that holds the key-value pairs.
Methods:
set(key, value, **kwargs):
Stores the given key-value pair in the internal dictionary.
set_many(data, **kwargs):
Updates the internal dictionary with multiple key-value pairs.
get(key, **kwargs):
Retrieves the value associated with the given key from the internal
dictionary.
"""
_data = {}
def set(self, key, value, **kwargs): # pylint: disable=unused-argument
"""
Store a key-value pair in the internal dictionary.
Args:
key: The key for the item to be stored.
value: The value to be stored under the given key.
**kwargs: Additional keyword arguments (unused).
"""
self._data[key] = value
def set_many(self, data, **kwargs): # pylint: disable=unused-argument
"""
Update the internal dictionary with multiple key-value pairs.
Args:
data: A dictionary containing key-value pairs to be added.
**kwargs: Additional keyword arguments (unused).
"""
self._data.update(data)
def get(self, key, **kwargs): # pylint: disable=unused-argument
"""
Retrieve the value associated with the given key.
Args:
key: The key for the item to be retrieved.
**kwargs: Additional keyword arguments (unused).
Returns:
The value associated with the given key if it exists.
"""
return self._data[key]
if MEMCACHED_HOST_PATH is None:
client = DummyClient()
else:
client = Client(
MEMCACHED_HOST_PATH,
timeout=1,
allow_unicode_keys=True,
encoding='utf-8',
serde=serde.pickle_serde
)

View File

@ -1,9 +1,6 @@
"""Module setting global parameters for the application such as cache, route generation, etc.""" import logging.config
import os
from pathlib import Path from pathlib import Path
from typing import List, Literal, Tuple import os
LOCATION_PREFIX = Path('src') LOCATION_PREFIX = Path('src')
PARAMETERS_DIR = LOCATION_PREFIX / 'parameters' PARAMETERS_DIR = LOCATION_PREFIX / 'parameters'
@ -12,12 +9,19 @@ LANDMARK_PARAMETERS_PATH = PARAMETERS_DIR / 'landmark_parameters.yaml'
OPTIMIZER_PARAMETERS_PATH = PARAMETERS_DIR / 'optimizer_parameters.yaml' OPTIMIZER_PARAMETERS_PATH = PARAMETERS_DIR / 'optimizer_parameters.yaml'
cache_dir_string = os.getenv('OSM_CACHE_DIR', './cache') cache_dir_string = os.getenv('OSM_CACHE_DIR', './cache')
OSM_CACHE_DIR = Path(cache_dir_string) OSM_CACHE_DIR = Path(cache_dir_string)
OSM_TYPES = List[Literal['way', 'node', 'relation']]
BBOX = Tuple[float, float, float, float]
MEMCACHED_HOST_PATH = os.getenv('MEMCACHED_HOST_PATH', None) import logging
if MEMCACHED_HOST_PATH == "none": import yaml
MEMCACHED_HOST_PATH = None
LOGGING_CONFIG = LOCATION_PREFIX / 'log_config.yaml'
config = yaml.safe_load(LOGGING_CONFIG.read_text())
logging.config.dictConfig(config)
# if we are in a debug session, set the log level to debug
if os.getenv('DEBUG', False):
logging.getLogger().setLevel(logging.DEBUG)

View File

@ -1,300 +0,0 @@
"""Find clusters of interest to add more general areas of visit to the tour."""
import logging
from typing import Literal, Tuple
import numpy as np
from sklearn.cluster import DBSCAN
from pydantic import BaseModel
from ..overpass.overpass import Overpass, get_base_info
from ..structs.landmark import Landmark
from ..utils.get_time_distance import get_distance
from ..utils.bbox import create_bbox
# silence the overpass logger
logging.getLogger('Overpass').setLevel(level=logging.CRITICAL)
class Cluster(BaseModel):
""""
A class representing an interesting area for shopping or sightseeing.
It can represent either a general area or a specifc route with start and end point.
The importance represents the number of shops found in this cluster.
Attributes:
type : either a 'street' or 'area' (representing a denser field of shops).
importance : size of the cluster (number of points).
centroid : center of the cluster.
start : if the type is a street it goes from here...
end : ...to here
"""
type: Literal['street', 'area']
importance: int
centroid: Tuple[float, float]
# start: Optional[list] = None # for later use if we want to have streets as well
# end: Optional[list] = None
class ClusterManager:
"""
A manager responsible for clustering points of interest, such as shops or historic sites,
to identify areas worth visiting. It uses the DBSCAN algorithm to detect clusters
based on a set of points retrieved from OpenStreetMap (OSM).
Attributes:
logger (logging.Logger): Logger for capturing relevant events and errors.
valid (bool): Indicates whether clusters were successfully identified.
all_points (list): All points retrieved from OSM, representing locations of interest.
cluster_points (list): Points identified as part of a cluster.
cluster_labels (list): Labels corresponding to the clusters each point belongs to.
cluster_type (Literal['sightseeing', 'shopping']): Type of clustering, either for sightseeing
landmarks or shopping areas.
"""
logger = logging.getLogger(__name__)
# NOTE: all points are in (lat, lon) format
valid: bool # Ensure the manager is valid (ie there are some clusters to be found)
all_points: list
cluster_points: list
cluster_labels: list
cluster_type: Literal['sightseeing', 'shopping']
def __init__(self, bbox: tuple, cluster_type: Literal['sightseeing', 'shopping']) -> None:
"""
Upon intialization, generate the point cloud used for cluster detection.
The points represent bag/clothes shops and general boutiques.
If the first step is successful, it applies the DBSCAN clustering algorithm with different
parameters depending on the size of the city (number of points).
It filters out noise points and keeps only the largest clusters.
A successful initialization updates:
- `self.cluster_points`: The points belonging to clusters.
- `self.cluster_labels`: The labels for the points in clusters.
The method also calls `filter_clusters()` to retain only the largest clusters.
Args:
bbox: The bounding box coordinates (around:radius, center_lat, center_lon).
"""
# Setup the caching in the Overpass class.
self.overpass = Overpass()
self.cluster_type = cluster_type
if cluster_type == 'shopping' :
osm_types = ['node']
sel = '"shop"~"^(bag|boutique|clothes)$"'
out = 'ids center'
elif cluster_type == 'sightseeing' :
osm_types = ['way']
sel = '"historic"~"^(monument|building|yes)$"'
out = 'ids center'
else :
raise NotImplementedError("Please choose only an available option for cluster detection")
# Initialize the points for cluster detection
try:
result = self.overpass.send_query(
bbox = bbox,
osm_types = osm_types,
selector = sel,
out = out
)
except Exception as e:
self.logger.warning(f"Error fetching clusters: {e}")
if result is None :
self.logger.debug(f"Found no {cluster_type} clusters, overpass query returned no datapoints.")
self.valid = False
else :
points = []
for elem in result:
osm_type = elem.get('type')
# Get coordinates and append them to the points list
_, coords = get_base_info(elem, osm_type)
if coords is not None :
points.append(coords)
if points :
self.all_points = np.array(points)
# Apply DBSCAN to find clusters. Choose different settings for different cities.
if self.cluster_type == 'shopping' and len(self.all_points) > 200 :
dbscan = DBSCAN(eps=0.00118, min_samples=15, algorithm='kd_tree') # for large cities
elif self.cluster_type == 'sightseeing' :
dbscan = DBSCAN(eps=0.0025, min_samples=15, algorithm='kd_tree') # for historic neighborhoods
else :
dbscan = DBSCAN(eps=0.00075, min_samples=10, algorithm='kd_tree') # for small cities
labels = dbscan.fit_predict(self.all_points)
# Check that there are is least 1 cluster
if len(set(labels)) > 1 :
self.logger.info(f"Found {len(set(labels))} different {cluster_type} clusters.")
# Separate clustered points and noise points
self.cluster_points = self.all_points[labels != -1]
self.cluster_labels = labels[labels != -1]
self.filter_clusters() # ValueError here sometimes. I dont know why. # Filter the clusters to keep only the largest ones.
self.valid = True
else :
self.logger.info(f"Found 0 {cluster_type} clusters.")
self.valid = False
else :
self.logger.debug(f"Detected 0 {cluster_type} clusters.")
self.valid = False
def generate_clusters(self) -> list[Landmark]:
"""
Generate a list of landmarks based on identified clusters.
This method iterates over the different clusters, calculates the centroid
(as the mean of the points within each cluster), and assigns an importance
based on the size of the cluster.
The generated shopping locations are stored in `self.clusters`
as a list of `Cluster` objects, each with:
- `type`: Set to 'area'.
- `centroid`: The calculated centroid of the cluster.
- `importance`: The number of points in the cluster.
"""
if not self.valid :
return [] # Return empty list if no clusters were found
locations = []
# loop through the different clusters
for label in set(self.cluster_labels):
# Extract points belonging to the current cluster
current_cluster = self.cluster_points[self.cluster_labels == label]
# Calculate the centroid as the mean of the points
centroid = np.mean(current_cluster, axis=0)
centroid = tuple((round(centroid[0], 7), round(centroid[1], 7)))
if self.cluster_type == 'shopping' :
score = len(current_cluster)*3
else :
score = len(current_cluster)*15
locations.append(Cluster(
type='area',
centroid=centroid,
importance = score
))
# Transform the locations in landmarks and return the list
cluster_landmarks = []
for cluster in locations :
cluster_landmarks.append(self.create_landmark(cluster))
return cluster_landmarks
def create_landmark(self, cluster: Cluster) -> Landmark:
"""
Create a Landmark object based on the given shopping location.
This method queries the Overpass API for nearby neighborhoods and shopping malls
within a 1000m radius around the shopping location centroid. It selects the closest
result and creates a landmark with the associated details such as name, type, and OSM ID.
Parameters:
shopping_location (Cluster): A Cluster object containing
the centroid and importance of the area.
Returns:
Landmark: A Landmark object containing details such as the name, type,
location, attractiveness, and OSM details.
"""
# Define the bounding box for a given radius around the coordinates
bbox = create_bbox(cluster.centroid, 300)
# Query neighborhoods and shopping malls
selectors = ['"place"~"^(suburb|neighborhood|neighbourhood|quarter|city_block)$"']
if self.cluster_type == 'shopping' :
selectors.append('"shop"="mall"')
new_name = 'Shopping Area'
t = 30
else :
new_name = 'Neighborhood'
t = 20
min_dist = float('inf')
osm_id = 0
osm_type = 'node'
osm_types = ['node', 'way', 'relation']
for sel in selectors :
try:
result = self.overpass.send_query(bbox = bbox,
osm_types = osm_types,
selector = sel,
out = 'ids center tags'
)
except Exception as e:
self.logger.warning(f"Error fetching clusters: {e}")
continue
if result is None :
self.logger.warning(f"Error fetching clusters: query result is None")
continue
for elem in result:
# Get basic info
id, coords, name = get_base_info(elem, elem.get('type'), with_name=True)
if name is None or coords is None :
continue
d = get_distance(cluster.centroid, coords)
if d < min_dist :
min_dist = d
new_name = name # add name
osm_type = elem.get('type') # add type: 'way' or 'relation'
osm_id = id # add OSM id
return Landmark(
name=new_name,
type=self.cluster_type,
location=cluster.centroid, # later: use the fact the we can also recognize streets.
attractiveness=cluster.importance,
n_tags=0,
osm_id=osm_id,
osm_type=osm_type,
duration=t
)
def filter_clusters(self):
"""
Filter clusters to retain only the 5 largest clusters by point count.
This method calculates the size of each cluster and filters out all but the
5 largest clusters. It then updates the cluster points and labels to reflect
only those from the top 5 clusters.
"""
label_counts = np.bincount(self.cluster_labels)
# Step 3: Get the indices (labels) of the 5 largest clusters
top_5_labels = np.argsort(label_counts)[-5:] # Get the largest 5 clusters
# Step 4: Filter points to keep only the points in the top 5 clusters
filtered_cluster_points = []
filtered_cluster_labels = []
for label in top_5_labels:
filtered_cluster_points.append(self.cluster_points[self.cluster_labels == label])
filtered_cluster_labels.append(np.full((label_counts[label],), label)) # Replicate the label
# update the cluster points and labels with the filtered data
self.cluster_points = np.vstack(filtered_cluster_points) # ValueError here
self.cluster_labels = np.concatenate(filtered_cluster_labels)

View File

@ -1,440 +0,0 @@
"""Module used to import data from OSM and arrange them in categories."""
import logging
import yaml
from ..structs.preferences import Preferences
from ..structs.landmark import Landmark
from ..utils.take_most_important import take_most_important
from .cluster_manager import ClusterManager
from ..overpass.overpass import Overpass, get_base_info
from ..utils.bbox import create_bbox
from ..constants import AMENITY_SELECTORS_PATH, LANDMARK_PARAMETERS_PATH, OPTIMIZER_PARAMETERS_PATH
class LandmarkManager:
"""
Use this to manage landmarks.
Uses the overpass api to fetch landmarks and classify them.
"""
logger = logging.getLogger(__name__)
radius_close_to: int # radius in meters
church_coeff: float # coeff to adjsut score of churches
nature_coeff: float # coeff to adjust score of parks
overall_coeff: float # coeff to adjust weight of tags
n_important: int # number of important landmarks to consider
def __init__(self) -> None:
with AMENITY_SELECTORS_PATH.open('r') as f:
self.amenity_selectors = yaml.safe_load(f)
with LANDMARK_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
self.max_bbox_side = parameters['max_bbox_side']
self.church_coeff = parameters['church_coeff']
self.nature_coeff = parameters['nature_coeff']
self.overall_coeff = parameters['overall_coeff']
self.tag_exponent = parameters['tag_exponent']
self.image_bonus = parameters['image_bonus']
self.wikipedia_bonus = parameters['wikipedia_bonus']
self.viewpoint_bonus = parameters['viewpoint_bonus']
self.pay_bonus = parameters['pay_bonus']
self.n_important = parameters['N_important']
with OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
self.walking_speed = parameters['average_walking_speed']
self.detour_factor = parameters['detour_factor']
# Setup the caching in the Overpass class.
self.overpass = Overpass()
self.logger.info('LandmakManager successfully initialized.')
def generate_landmarks_list(self, center_coordinates: tuple[float, float], preferences: Preferences) -> tuple[list[Landmark], list[Landmark]]:
"""
Generate and prioritize a list of landmarks based on user preferences.
This method fetches landmarks from various categories (sightseeing, nature, shopping) based on the user's preferences
and current location. It scores and corrects these landmarks, removes duplicates, and then selects the most important
landmarks based on a predefined criterion.
Args:
center_coordinates (tuple[float, float]): The latitude and longitude of the center location around which to search.
preferences (Preferences): The user's preference settings that influence the landmark selection.
Returns:
tuple[list[Landmark], list[Landmark]]:
- A list of all existing landmarks.
- A list of the most important landmarks based on the user's preferences.
"""
self.logger.debug('Starting to fetch landmarks...')
max_walk_dist = int((preferences.max_time_minute/2)/60*self.walking_speed*1000/self.detour_factor)
radius = min(max_walk_dist, int(self.max_bbox_side/2))
# use set to avoid duplicates, this requires some __methods__ to be set in Landmark
all_landmarks = set()
# Create a bbox using the around technique, tuple of strings
bbox = create_bbox(center_coordinates, radius)
# list for sightseeing
if preferences.sightseeing.score != 0:
self.logger.debug('Fetching sightseeing landmarks...')
current_landmarks = self.fetch_landmarks(bbox, self.amenity_selectors['sightseeing'], preferences.sightseeing.type, preferences.sightseeing.score)
all_landmarks.update(current_landmarks)
self.logger.info(f'Found {len(current_landmarks)} sightseeing landmarks')
# special pipeline for historic neighborhoods
neighborhood_manager = ClusterManager(bbox, 'sightseeing')
historic_clusters = neighborhood_manager.generate_clusters()
all_landmarks.update(historic_clusters)
# list for nature
if preferences.nature.score != 0:
self.logger.debug('Fetching nature landmarks...')
current_landmarks = self.fetch_landmarks(bbox, self.amenity_selectors['nature'], preferences.nature.type, preferences.nature.score)
all_landmarks.update(current_landmarks)
self.logger.info(f'Found {len(current_landmarks)} nature landmarks')
# list for shopping
if preferences.shopping.score != 0:
self.logger.debug('Fetching shopping landmarks...')
current_landmarks = self.fetch_landmarks(bbox, self.amenity_selectors['shopping'], preferences.shopping.type, preferences.shopping.score)
self.logger.info(f'Found {len(current_landmarks)} shopping landmarks')
# set time for all shopping activites :
for landmark in current_landmarks :
landmark.duration = 30
all_landmarks.update(current_landmarks)
# special pipeline for shopping malls
shopping_manager = ClusterManager(bbox, 'shopping')
shopping_clusters = shopping_manager.generate_clusters()
all_landmarks.update(shopping_clusters)
landmarks_constrained = take_most_important(all_landmarks, self.n_important)
# self.logger.info(f'All landmarks generated : {len(all_landmarks)} landmarks around {center_coordinates}, and constrained to {len(landmarks_constrained)} most important ones.')
return all_landmarks, landmarks_constrained
def set_landmark_score(self, landmark: Landmark, landmarktype: str, preference_level: int) :
"""
Calculate and set the attractiveness score for a given landmark.
This method evaluates the landmark's attractiveness based on its properties
(number of tags, presence of Wikipedia URL, image, website, and whether it's
a place of worship) and adjusts the score using the user's preference level.
Args:
landmark (Landmark): The landmark object to score.
landmarktype (str): The type of the landmark (currently unused).
preference_level (int): The user's preference level for this landmark type.
"""
score = landmark.n_tags**self.tag_exponent
if landmark.wiki_url :
score *= self.wikipedia_bonus
if landmark.image_url :
score *= self.image_bonus
if landmark.website_url :
score *= self.wikipedia_bonus
if landmark.is_place_of_worship :
score *= self.church_coeff
if landmark.is_viewpoint :
score *= self.viewpoint_bonus
if landmarktype == 'nature' :
score *= self.nature_coeff
landmark.attractiveness = int(score * preference_level * 2)
def fetch_landmarks(self, bbox: tuple, amenity_selector: dict, landmarktype: str, preference_level: int) -> list[Landmark]:
"""
Fetches landmarks of a specified type from OpenStreetMap (OSM) within a bounding box centered on given coordinates.
Args:
bbox (tuple[float, float, float, float]): The bounding box coordinates (around:radius, center_lat, center_lon).
amenity_selector (dict): The Overpass API query selector for the desired landmark type.
landmarktype (str): The type of the landmark (e.g., 'sightseeing', 'nature', 'shopping').
Returns:
list[Landmark]: A list of Landmark objects that were fetched and filtered based on the provided criteria.
Notes:
- Landmarks are fetched using Overpass API queries.
- Selectors are translated from the dictionary to the Overpass query format. (e.g., 'amenity'='place_of_worship')
- Landmarks are filtered based on various conditions including tags and type.
"""
return_list = []
if landmarktype == 'nature' : query_conditions = None
else : query_conditions = ['count_tags()>5']
# caution, when applying a list of selectors, overpass will search for elements that match ALL selectors simultaneously
# we need to split the selectors into separate queries and merge the results
for sel in dict_to_selector_list(amenity_selector):
# self.logger.debug(f"Current selector: {sel}")
osm_types = ['way', 'relation']
if 'viewpoint' in sel :
query_conditions = None
osm_types.append('node')
# Send the overpass query
try:
result = self.overpass.send_query(
bbox = bbox,
osm_types = osm_types,
selector = sel,
conditions = query_conditions, # except for nature....
out = 'ids center tags'
)
except Exception as e:
self.logger.debug(f"Failed to fetch landmarks, proceeding without: {str(e)}")
continue
return_list += self._to_landmarks(result, landmarktype, preference_level)
# self.logger.debug(f"Fetched {len(return_list)} landmarks of type {landmarktype} in {bbox}")
return return_list
def _to_landmarks(self, elements: list, landmarktype, preference_level) -> list[Landmark]:
"""
Parse the Overpass API result and extract landmarks.
This method processes the JSON elements returned by the Overpass API and
extracts landmarks of types 'node', 'way', and 'relation'. It retrieves
relevant information such as name, coordinates, and tags, and converts them
into Landmark objects.
Args:
elements (list): The elements of json response from Overpass API.
elem_type (str): The type of landmark (e.g., node, way, relation).
Returns:
list[Landmark]: A list of Landmark objects extracted from the JSON data.
"""
if elements is None :
return []
landmarks = []
for elem in elements:
osm_type = elem.get('type')
id, coords, name = get_base_info(elem, osm_type, with_name=True)
if name is None or coords is None :
continue
tags = elem.get('tags')
# Convert this to Landmark object
landmark = Landmark(name=name,
type=landmarktype,
location=coords,
osm_id=id,
osm_type=osm_type,
attractiveness=0,
n_tags=len(tags))
# Browse through tags to add information to landmark.
for key, value in tags.items():
# Skip this landmark if not suitable.
if key == 'building:part' and value == 'yes' :
break
if 'disused:' in key :
break
if 'boundary:' in key :
break
if 'shop' in key and landmarktype != 'shopping' :
break
# if value == 'apartments' :
# break
# Fill in the other attributes.
if key == 'image' :
landmark.image_url = value
if key == 'website' :
landmark.website_url = value
if value == 'place_of_worship' :
landmark.is_place_of_worship = True
if key == 'wikipedia' :
landmark.wiki_url = value
if key == 'name:en' :
landmark.name_en = value
if 'building:' in key or 'pay' in key :
landmark.n_tags -= 1
# Set the duration.
if value in ['museum', 'aquarium', 'planetarium'] :
landmark.duration = 60
elif value == 'viewpoint' :
landmark.is_viewpoint = True
landmark.duration = 10
elif value == 'cathedral' :
landmark.is_place_of_worship = False
landmark.duration = 10
landmark.description, landmark.keywords = self.description_and_keywords(tags)
self.set_landmark_score(landmark, landmarktype, preference_level)
landmarks.append(landmark)
continue
return landmarks
def description_and_keywords(self, tags: dict):
"""
Generates a description and a set of keywords for a given landmark based on its tags.
Params:
tags (dict): A dictionary containing metadata about the landmark, including its name,
importance, height, date of construction, and visitor information.
Returns:
description (str): A string description of the landmark.
keywords (dict): A dictionary of keywords with fields such as 'importance', 'height',
'place_type', and 'date'.
"""
# Extract relevant fields
name = tags.get('name')
importance = tags.get('importance', None)
n_visitors = tags.get('tourism:visitors', None)
height = tags.get('height')
place_type = self.get_place_type(tags)
date = self.get_date(tags)
if place_type is None :
return None, None
# Start the description.
if importance is None :
if len(tags.keys()) < 5 :
return None, None
if len(tags.keys()) < 10 :
description = f"{name} is a well known {place_type}."
elif len(tags.keys()) < 17 :
importance = 'national'
description = f"{name} is a {place_type} of national importance."
else :
importance = 'international'
description = f"{name} is an internationally famous {place_type}."
else :
description = f"{name} is a {place_type} of {importance} importance."
if height is not None and date is not None :
description += f" This {place_type} was constructed in {date} and is ca. {height} meters high."
elif height is not None :
description += f" This {place_type} stands ca. {height} meters tall."
elif date is not None:
description += f" It was constructed in {date}."
# Format the visitor number
if n_visitors is not None :
n_visitors = int(n_visitors)
if n_visitors < 1000000 :
description += f" It welcomes {int(n_visitors/1000)} thousand visitors every year."
else :
description += f" It welcomes {round(n_visitors/1000000, 1)} million visitors every year."
# Set the keywords.
keywords = {"importance": importance,
"height": height,
"place_type": place_type,
"date": date}
return description, keywords
def get_place_type(self, data):
"""
Determines the type of the place based on available tags such as 'amenity', 'building',
'historic', and 'leisure'. The priority order is: 'historic' > 'building' (if not generic) >
'amenity' > 'leisure'.
Params:
data (dict): A dictionary containing metadata about the place.
Returns:
place_type (str): The determined type of the place, or None if no relevant type is found.
"""
amenity = data.get('amenity', None)
building = data.get('building', None)
historic = data.get('historic', None)
leisure = data.get('leisure')
if historic and historic != "yes":
return historic
if building and building not in ["yes", "civic", "government", "apartments", "residential", "commericial", "industrial", "retail", "religious", "public", "service"]:
return building
if amenity:
return amenity
if leisure:
return leisure
return None
def get_date(self, data):
"""
Extracts the most relevant date from the available tags, prioritizing 'construction_date',
'start_date', 'year_of_construction', and 'opening_date' in that order.
Params:
data (dict): A dictionary containing metadata about the place.
Returns:
date (str): The most relevant date found, or None if no date is available.
"""
construction_date = data.get('construction_date', None)
opening_date = data.get('opening_date', None)
start_date = data.get('start_date', None)
year_of_construction = data.get('year_of_construction', None)
# Prioritize based on availability
if construction_date:
return construction_date
if start_date:
return start_date
if year_of_construction:
return year_of_construction
if opening_date:
return opening_date
return None
def dict_to_selector_list(d: dict) -> list:
"""
Convert a dictionary of key-value pairs to a list of Overpass query strings.
Args:
d (dict): A dictionary of key-value pairs representing the selector.
Returns:
list: A list of strings representing the Overpass query selectors.
"""
return_list = []
for key, value in d.items():
if isinstance(value, list):
val = '|'.join(value)
return_list.append(f'{key}~"^({val})$"')
elif isinstance(value, str) and len(value) == 0:
return_list.append(f'{key}')
else:
return_list.append(f'{key}={value}')
return return_list

View File

@ -0,0 +1,34 @@
version: 1
disable_existing_loggers: False
formatters:
simple:
format: '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
handlers:
console:
class: rich.logging.RichHandler
formatter: simple
# access:
# class: logging.FileHandler
# filename: logs/access.log
# level: INFO
# formatter: simple
loggers:
uvicorn.error:
level: INFO
handlers:
- console
propagate: no
# uvicorn.access:
# level: INFO
# handlers:
# - access
# propagate: no
root:
level: INFO
handlers:
- console
propagate: yes

View File

@ -1,56 +0,0 @@
"""Sets up global logging configuration for the application."""
import logging
import os
logger = logging.getLogger(__name__)
def configure_logging():
"""
Called at startup of a FastAPI application instance to setup logging. Depending on the environment, it will log to stdout or to Loki.
"""
is_debug = os.getenv('DEBUG', "false") == "true"
is_kubernetes = os.getenv('KUBERNETES_SERVICE_HOST') is not None
if is_kubernetes:
# in that case we want to log to stdout and also to loki
from loki_logger_handler.loki_logger_handler import LokiLoggerHandler
loki_url = os.getenv('LOKI_URL')
if loki_url is None:
raise ValueError("LOKI_URL environment variable is not set")
loki_handler = LokiLoggerHandler(
url = loki_url,
labels = {'app': 'anyway', 'environment': 'staging' if is_debug else 'production'}
)
logger.info(f"Logging to Loki at {loki_url} with {loki_handler.labels} and {is_debug=}")
logging_handlers = [loki_handler, logging.StreamHandler()]
logging_level = logging.DEBUG if is_debug else logging.INFO
# silence the chatty logs loki generates itself
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
# no need for time since it's added by loki or can be shown in kube logs
logging_format = '%(name)s - %(levelname)s - %(message)s'
else:
# if we are in a debug (local) session, set verbose and rich logging
from rich.logging import RichHandler
logging_handlers = [RichHandler()]
logging_level = logging.DEBUG if is_debug else logging.INFO
logging_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(
level = logging_level,
format = logging_format,
handlers = logging_handlers
)
# also overwrite the uvicorn loggers
logging.getLogger('uvicorn').handlers = logging_handlers
logging.getLogger('uvicorn.access').handlers = logging_handlers
logging.getLogger('uvicorn.error').handlers = logging_handlers

View File

@ -1,97 +1,41 @@
"""Main app for backend api"""
import logging import logging
import time from fastapi import FastAPI, Query, Body
from contextlib import asynccontextmanager
from fastapi import FastAPI, HTTPException, BackgroundTasks
from .logging_config import configure_logging from structs.landmark import Landmark
from .structs.landmark import Landmark from structs.preferences import Preferences
from .structs.preferences import Preferences from structs.linked_landmarks import LinkedLandmarks
from .structs.linked_landmarks import LinkedLandmarks from utils.landmarks_manager import LandmarkManager
from .structs.trip import Trip from utils.optimizer import Optimizer
from .landmarks.landmarks_manager import LandmarkManager from utils.refiner import Refiner
from .toilets.toilet_routes import router as toilets_router
from .optimization.optimizer import Optimizer
from .optimization.refiner import Refiner
from .overpass.overpass import fill_cache
from .cache import client as cache_client
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
app = FastAPI()
manager = LandmarkManager() manager = LandmarkManager()
optimizer = Optimizer() optimizer = Optimizer()
refiner = Refiner(optimizer=optimizer) refiner = Refiner(optimizer=optimizer)
@asynccontextmanager @app.post("/route/new")
async def lifespan(app: FastAPI): def get_route(preferences: Preferences, start: tuple[float, float], end: tuple[float, float] | None = None) -> str:
"""Function to run at the start of the app""" '''
logger.info("Setting up logging")
configure_logging()
yield
logger.info("Shutting down logging")
app = FastAPI(lifespan=lifespan)
app.include_router(toilets_router)
@app.post("/trip/new")
def new_trip(preferences: Preferences,
start: tuple[float, float],
end: tuple[float, float] | None = None,
background_tasks: BackgroundTasks = None) -> Trip:
"""
Main function to call the optimizer. Main function to call the optimizer.
:param preferences: the preferences specified by the user as the post body
Args: :param start: the coordinates of the starting point as a tuple of floats (as url query parameters)
preferences : the preferences specified by the user as the post body :param end: the coordinates of the finishing point as a tuple of floats (as url query parameters)
start : the coordinates of the starting point :return: the uuid of the first landmark in the optimized route
end : the coordinates of the finishing point '''
Returns:
(uuid) : The uuid of the first landmark in the optimized route
"""
if preferences is None: if preferences is None:
raise HTTPException(status_code=406, detail="Preferences not provided or incomplete.") raise ValueError("Please provide preferences in the form of a 'Preference' BaseModel class.")
if (preferences.shopping.score == 0 and
preferences.sightseeing.score == 0 and
preferences.nature.score == 0) :
raise HTTPException(status_code=406, detail="All preferences are 0.")
if start is None: if start is None:
raise HTTPException(status_code=406, detail="Start coordinates not provided") raise ValueError("Please provide the starting coordinates as a tuple of floats.")
if not (-90 <= start[0] <= 90 or -180 <= start[1] <= 180):
raise HTTPException(status_code=422, detail="Start coordinates not in range")
if end is None: if end is None:
end = start end = start
logger.info("No end coordinates provided. Using start=end.") logger.info("No end coordinates provided. Using start=end.")
logger.info(f"Requested new trip generation. Details:\n\tCoordinates: {start}\n\tTime: {preferences.max_time_minute}\n\tSightseeing: {preferences.sightseeing.score}\n\tNature: {preferences.nature.score}\n\tShopping: {preferences.shopping.score}") start_landmark = Landmark(name='start', type='start', location=(start[0], start[1]), osm_type='start', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
end_landmark = Landmark(name='end', type='finish', location=(end[0], end[1]), osm_type='end', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
start_landmark = Landmark(name='start',
type='start',
location=(start[0], start[1]),
osm_type='start',
osm_id=0,
attractiveness=0,
duration=0,
must_do=True,
n_tags = 0)
end_landmark = Landmark(name='finish',
type='finish',
location=(end[0], end[1]),
osm_type='end',
osm_id=0,
attractiveness=0,
duration=0,
must_do=True,
n_tags=0)
start_time = time.time()
# Generate the landmarks from the start location # Generate the landmarks from the start location
landmarks, landmarks_short = manager.generate_landmarks_list( landmarks, landmarks_short = manager.generate_landmarks_list(
@ -99,128 +43,26 @@ def new_trip(preferences: Preferences,
preferences = preferences preferences = preferences
) )
if len(landmarks) == 0 :
raise HTTPException(status_code=500, detail="No landmarks were found.")
# insert start and finish to the landmarks list # insert start and finish to the landmarks list
landmarks_short.insert(0, start_landmark) landmarks_short.insert(0, start_landmark)
landmarks_short.append(end_landmark) landmarks_short.append(end_landmark)
t_generate_landmarks = time.time() - start_time # TODO infer these parameters from the preferences
logger.info(f'Fetched {len(landmarks)} landmarks in \t: {round(t_generate_landmarks,3)} seconds') max_walking_time = 4 # hours
start_time = time.time() detour = 30 # minutes
# First stage optimization # First stage optimization
try: base_tour = optimizer.solve_optimization(max_walking_time*60, landmarks_short)
base_tour = optimizer.solve_optimization(preferences.max_time_minute, landmarks_short)
except Exception as exc:
logger.error(f"Trip generation failed: {str(exc)}")
raise HTTPException(status_code=500, detail=f"Optimization failed: {str(exc)}") from exc
t_first_stage = time.time() - start_time
start_time = time.time()
# Second stage optimization # Second stage optimization
# TODO : only if necessary (not enough landmarks for ex.) refined_tour = refiner.refine_optimization(landmarks, base_tour, max_walking_time*60, detour)
try :
refined_tour = refiner.refine_optimization(landmarks, base_tour,
preferences.max_time_minute,
preferences.detour_tolerance_minute)
except Exception as exc :
logger.warning(f"Refiner failed. Proceeding with base trip {str(exc)}")
refined_tour = base_tour
t_second_stage = time.time() - start_time
logger.debug(f'First stage optimization\t: {round(t_first_stage,3)} seconds')
logger.debug(f'Second stage optimization\t: {round(t_second_stage,3)} seconds')
logger.info(f'Total computation time\t: {round(t_first_stage + t_second_stage,3)} seconds')
linked_tour = LinkedLandmarks(refined_tour) linked_tour = LinkedLandmarks(refined_tour)
return linked_tour[0].uuid
# upon creation of the trip, persistence of both the trip and its landmarks is ensured.
trip = Trip.from_linked_landmarks(linked_tour, cache_client)
logger.info(f'Generated a trip of {trip.total_time} minutes with {len(refined_tour)} landmarks in {round(t_generate_landmarks + t_first_stage + t_second_stage,3)} seconds.')
logger.debug('Detailed trip :\n\t' + '\n\t'.join(f'{landmark}' for landmark in refined_tour))
background_tasks.add_task(fill_cache)
return trip
#### For already existing trips/landmarks
@app.get("/trip/{trip_uuid}")
def get_trip(trip_uuid: str) -> Trip:
"""
Look-up the cache for a trip that has been previously generated using its identifier.
Args:
trip_uuid (str) : unique identifier for a trip.
Returns:
(Trip) : the corresponding trip.
"""
try:
trip = cache_client.get(f"trip_{trip_uuid}")
return trip
except KeyError as exc:
logger.error(f"Failed to fetch trip with UUID {trip_uuid}: {str(exc)}")
raise HTTPException(status_code=404, detail="Trip not found") from exc
@app.get("/landmark/{landmark_uuid}") @app.get("/landmark/{landmark_uuid}")
def get_landmark(landmark_uuid: str) -> Landmark: def get_landmark(landmark_uuid: str) -> Landmark:
""" #cherche dans linked_tour et retourne le landmark correspondant
Returns a Landmark from its unique identifier. pass
Args:
landmark_uuid (str) : unique identifier for a Landmark.
Returns:
(Landmark) : the corresponding Landmark.
"""
try:
landmark = cache_client.get(f"landmark_{landmark_uuid}")
return landmark
except KeyError as exc:
logger.error(f"Failed to fetch landmark with UUID {landmark_uuid}: {str(exc)}")
raise HTTPException(status_code=404, detail="Landmark not found") from exc
@app.post("/trip/recompute-time/{trip_uuid}/{removed_landmark_uuid}")
def update_trip_time(trip_uuid: str, removed_landmark_uuid: str) -> Trip:
"""
Updates the reaching times of a given trip when removing a landmark.
Args:
landmark_uuid (str) : unique identifier for a Landmark.
Returns:
(Landmark) : the corresponding Landmark.
"""
# First, fetch the trip in the cache.
try:
trip = cache_client.get(f'trip_{trip_uuid}')
except KeyError as exc:
logger.error(f"Failed to update trip with UUID {trip_uuid} (trip not found): {str(exc)}")
raise HTTPException(status_code=404, detail='Trip not found') from exc
landmarks = []
next_uuid = trip.first_landmark_uuid
# Extract landmarks
try :
while next_uuid is not None:
landmark = cache_client.get(f'landmark_{next_uuid}')
# Filter out the removed landmark.
if next_uuid != removed_landmark_uuid :
landmarks.append(landmark)
next_uuid = landmark.next_uuid # Prepare for the next iteration
except KeyError as exc:
logger.error(f"Failed to update trip with UUID {trip_uuid} : {str(exc)}")
raise HTTPException(status_code=404, detail=f'landmark {next_uuid} not found') from exc
# Re-link every thing and compute times again
linked_tour = LinkedLandmarks(landmarks)
trip = Trip.from_linked_landmarks(linked_tour, cache_client)
return trip

View File

@ -1,638 +0,0 @@
"""Module responsible for sloving an MILP to find best tour around the given landmarks."""
import logging
from collections import defaultdict, deque
import yaml
import numpy as np
import pulp as pl
from ..structs.landmark import Landmark
from ..utils.get_time_distance import get_time
from ..constants import OPTIMIZER_PARAMETERS_PATH
# Silence the pupl logger
logging.getLogger('pulp').setLevel(level=logging.CRITICAL)
class Optimizer:
"""
Optimizes the balance between the efficiency of a tour and the inclusion of landmarks.
The `Optimizer` class is responsible for calculating the best possible detour adjustments
to a tour based on specific parameters such as detour time, walking speed, and the maximum
number of landmarks to visit. It helps refine a tour by determining whether adding additional
landmarks would significantly reduce the overall efficiency.
Responsibilities:
- Calculates the maximum detour time allowed for a given tour.
- Considers the detour factor, which accounts for real-world walking paths versus straight-line distance.
- Takes into account the average walking speed to estimate walking times.
- Limits the number of landmarks that can be added to the tour to prevent excessive detouring.
- Allows some overflow (overshoot) in the maximum detour time to accommodate for slight inefficiencies.
Attributes:
logger (logging.Logger): Logger for capturing relevant events and errors.
detour (int): The accepted maximum detour time in minutes.
detour_factor (float): The ratio between straight-line distance and actual walking distance in cities.
average_walking_speed (float): The average walking speed of an adult (in meters per second or kilometers per hour).
max_landmarks (int): The maximum number of landmarks to include in the tour.
overshoot (float): The overshoot allowance for exceeding the maximum detour time in a restrictive manner.
"""
logger = logging.getLogger(__name__)
detour: int = None # accepted max detour time (in minutes)
detour_factor: float # detour factor of straight line vs real distance in cities
average_walking_speed: float # average walking speed of adult
max_landmarks: int # max number of landmarks to visit
overshoot: float # overshoot to allow maxtime to overflow. Optimizer is a bit restrictive
def __init__(self) :
# load parameters from file
with OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
self.detour_factor = parameters['detour_factor']
self.average_walking_speed = parameters['average_walking_speed']
self.max_landmarks = parameters['max_landmarks']
self.overshoot = parameters['overshoot']
self.time_limit = parameters['time_limit']
self.gap_rel = parameters['gap_rel']
self.max_iter = parameters['max_iter']
def init_ub_time(self, prob: pl.LpProblem, x: pl.LpVariable, L: int, landmarks: list[Landmark], max_time: int):
"""
Initialize the objective function and inequality constraints for the linear program.
This function sets up the objective to maximize the attractiveness of visiting landmarks,
while ensuring that the total time (including travel and visit duration) does not exceed
the maximum allowed time. It calculates the pairwise travel times between landmarks and
incorporates visit duration to form the inequality constraints.
The objective is to maximize sightseeing by selecting the most attractive landmarks within
the time limit.
Args:
prob (pl.LpProblem): The linear programming problem where constraints and the objective will be added.
x (pl.LpVariable): A decision variable representing whether a landmark is visited.
L (int): The number of landmarks.
landmarks (list[Landmark]): List of landmarks to visit.
max_time (int): Maximum allowable time for sightseeing, including travel and visit duration.
Returns:
None: Adds the objective function and constraints to the LP problem directly.
constraint coefficients, and the right-hand side of the inequality constraint.
"""
L = len(landmarks)
# Objective function coefficients. a*x1 + b*x2 + c*x3 + ...
c = np.zeros(L, dtype=np.int16)
# inequality matrix and vector
A_ub = np.zeros(L*L, dtype=np.int16)
b_ub = round(max_time*(1.1+max_time*self.overshoot))
for i, spot1 in enumerate(landmarks) :
c[i] = spot1.attractiveness
for j in range(i+1, L) :
if i !=j :
t = get_time(spot1.location, landmarks[j].location)
A_ub[i*L + j] = t + spot1.duration
A_ub[j*L + i] = t + landmarks[j].duration
# Expand 'c' to L*L for every decision variable and ad
c = np.tile(c, L)
# Now sort and modify A_ub for each row
if L > 22 :
for i in range(L):
# Get indices of the 4 smallest values in row i
row_values = A_ub[i*L:i*L+L]
closest_indices = np.argpartition(row_values, 22)[:22]
# Create a mask for non-closest landmarks
mask = np.ones(L, dtype=bool)
mask[closest_indices] = False
# Set non-closest landmarks to 32765
row_values[mask] = 32765
A_ub[i*L:i*L+L] = row_values
# Add the objective and the 1 distance constraint
prob += pl.lpSum([c[j] * x[j] for j in range(L*L)])
prob += (pl.lpSum([A_ub[j] * x[j] for j in range(L*L)]) <= b_ub)
def respect_number(self, prob: pl.LpProblem, x: pl.LpVariable, L: int, max_landmarks: int):
"""
Generate constraints to ensure each landmark is visited at most once and cap the total number of visited landmarks.
This function adds the following constraints to the linear program:
1. Each landmark is visited at most once by creating L-2 constraints (one for each landmark).
2. The total number of visited landmarks is capped by the specified maximum number (`max_landmarks`) plus 2.
Args:
prob (pl.LpProblem): The linear programming problem where constraints will be added.
x (pl.LpVariable): Decision variable indicating whether a landmark is visited.
L (int): The total number of landmarks.
max_landmarks (int): The maximum number of landmarks that can be visited.
Returns:
None: This function directly modifies the `prob` object by adding constraints.
"""
# L-2 constraints: each landmark is visited exactly once
for i in range(1, L-1):
prob += (pl.lpSum([x[L*i + j] for j in range(L)]) <= 1)
# 1 constraint: cap the total number of visits
prob += (pl.lpSum([1 * x[j] for j in range(L*L)]) <= max_landmarks+2)
def break_sym(self, prob: pl.LpProblem, x: pl.LpVariable, L: int):
"""
Generate constraints to prevent simultaneous travel between two landmarks
in both directions. This constraint ensures that, for any pair of landmarks,
travel from landmark i to landmark j (dij) and travel from landmark j to landmark i (dji)
cannot happen simultaneously.
This method adds constraints to break symmetry, specifically to prevent
cyclic paths with only two elements. It does not prevent cyclic paths involving more than two elements.
Args:
prob (pl.LpProblem): The linear programming problem where constraints will be added.
x (pl.LpVariable): Decision variable representing travel between landmarks.
L (int): The total number of landmarks.
Returns:
None: This function modifies the `prob` object by adding constraints in-place.
"""
upper_ind = np.triu_indices(L, 0, L) # Get the upper triangular indices
up_ind_x = upper_ind[0]
up_ind_y = upper_ind[1]
# Loop over the upper triangular indices, excluding diagonal elements
for i, up_ind in enumerate(up_ind_x):
if up_ind != up_ind_y[i]:
# Add (L*L-L)/2 constraints to break symmetry
prob += (x[up_ind*L + up_ind_y[i]] + x[up_ind_y[i]*L + up_ind] <= 1)
def init_eq_not_stay(self, prob: pl.LpProblem, x: pl.LpVariable, L: int):
"""
Generate constraints to prevent staying at the same position during travel.
Specifically, it removes travel from a landmark to itself (e.g., d11, d22, d33, etc.).
This function adds one equality constraint to the optimization problem that ensures
no decision variable corresponding to staying at the same landmark is included
in the solution. This helps in ensuring that the path does not include self-loops.
Args:
prob (pl.LpProblem): The linear programming problem where constraints will be added.
x (pl.LpVariable): Decision variable representing travel between landmarks.
L (int): The total number of landmarks.
Returns:
None: This function modifies the `prob` object by adding an equality constraint in-place.
"""
A_eq = np.zeros((L, L), dtype=np.int8)
# Set diagonal elements to 1 (to prevent staying in the same position)
np.fill_diagonal(A_eq, 1)
A_eq = A_eq.flatten()
# First equality constraint
prob += (pl.lpSum([A_eq[j] * x[j] for j in range(L*L)]) == 0)
def respect_start_finish(self, prob: pl.LpProblem, x: pl.LpVariable, L: int):
"""
Generate constraints to ensure that the optimization starts at the designated
start landmark and finishes at the goal landmark.
Specifically, this function adds three equality constraints:
1. Ensures that the path starts at the designated start landmark (row 0).
2. Ensures that the path finishes at the designated goal landmark (row 1).
3. Prevents any arrivals at the start landmark or departures from the goal landmark (row 2).
Args:
prob (pl.LpProblem): The linear programming problem where constraints will be added.
x (pl.LpVariable): Decision variable representing travel between landmarks.
L (int): The total number of landmarks.
Returns:
None: This function modifies the `prob` object by adding three equality constraints in-place.
"""
# Fill-in row 0.
A_eq = np.zeros((3,L*L), dtype=np.int8)
A_eq[0, :L] = np.ones(L, dtype=np.int8) # sets departures only for start (horizontal ones)
for k in range(L-1) :
if k != 0 :
# Fill-in row 1
A_eq[1, k*L+L-1] = 1 # sets arrivals only for finish (vertical ones)
# Fill-in row 1
A_eq[2, k*L] = 1
A_eq[2, L*(L-1):] = np.ones(L, dtype=np.int8) # prevents arrivals at start and departures from goal
b_eq= [1, 1, 0]
# Add the constraints to pulp
for i in range(3) :
prob += (pl.lpSum([A_eq[i][j] * x[j] for j in range(L*L)]) == b_eq[i])
def respect_order(self, prob: pl.LpProblem, x: pl.LpVariable, L: int):
"""
Generate constraints to tie the optimization problem together and prevent
stacked ones, although this does not fully prevent circles.
This function adds constraints to the optimization problem that prevent
simultaneous travel between landmarks in a way that would result in stacked ones.
However, it does not fully prevent circular paths.
Args:
prob (pl.LpProblem): The linear programming problem where constraints will be added.
x (pl.LpVariable): Decision variable representing travel between landmarks.
L (int): The total number of landmarks.
Returns:
None: This function modifies the `prob` object by adding L-2 equality constraints in-place.
"""
# Loop through rows 1 to L-2 to prevent stacked ones
for i in range(1, L-1):
# Add the constraint that sums across each "row" or "block" in the decision variables
row_sum = -pl.lpSum(x[i + j*L] for j in range(L)) + pl.lpSum(x[i*L:(i+1)*L])
prob += (row_sum == 0)
def respect_user_must(self, prob: pl.LpProblem, x: pl.LpVariable, L: int, landmarks: list[Landmark]) :
"""
Generate constraints to ensure that landmarks marked as 'must_do' are included in the optimization.
This function adds constraints to the optimization problem to ensure that landmarks marked as
'must_do' are included in the solution. It precomputes the constraints and adds them to the
problem accordingly.
Args:
prob (pl.LpProblem): The linear programming problem where constraints will be added.
x (pl.LpVariable): Decision variable representing travel between landmarks.
L (int): The total number of landmarks.
landmarks (list[Landmark]): List of landmarks, where some are marked as 'must_do'.
Returns:
None: This function modifies the `prob` object by adding equality constraints in-place.
"""
ones = np.ones(L, dtype=np.int8)
A_eq = np.zeros(L*L, dtype=np.int8)
for i, elem in enumerate(landmarks) :
if elem.must_do is True and i not in [0, L-1]:
A_eq[i*L:i*L+L] = ones
prob += (pl.lpSum([A_eq[j] * x[j] for j in range(L*L)]) == 1)
if elem.must_avoid is True and i not in [0, L-1]:
A_eq[i*L:i*L+L] = ones
prob += (pl.lpSum([A_eq[j] * x[j] for j in range(L*L)]) == 2)
def prevent_circle(self, prob: pl.LpProblem, x: pl.LpVariable, circle_vertices: list, L: int) :
"""
Prevent circular paths by adding constraints to the optimization.
This function ensures that circular paths in both directions (i.e., forward and reverse)
between landmarks are avoided in the optimization problem by adding the corresponding constraints.
Args:
prob (pl.LpProblem): The linear programming problem instance to which the constraints will be added.
x (pl.LpVariable): Decision variable representing the travel between landmarks in the problem.
circle_vertices (list): List of indices representing the landmarks that form a circular path.
L (int): The total number of landmarks.
Returns:
None: This function modifies the `prob` object by adding two equality constraints that
prevent circular paths in both directions for the specified circle vertices.
"""
l = np.zeros((2, L*L), dtype=np.int8)
for i, node in enumerate(circle_vertices[:-1]) :
next = circle_vertices[i+1]
l[0, node*L + next] = 1
l[1, next*L + node] = 1
s = circle_vertices[0]
g = circle_vertices[-1]
l[0, g*L + s] = 1
l[1, s*L + g] = 1
# Add the constraints
prob += (pl.lpSum([l[0][j] * x[j] for j in range(L*L)]) == 0)
prob += (pl.lpSum([l[1][j] * x[j] for j in range(L*L)]) == 0)
def is_connected(self, resx) :
"""
Determine the order of visits and detect any circular paths in the given configuration.
Args:
resx (list): List of edge weights.
Returns:
tuple[list[int], Optional[list[list[int]]]]: A tuple containing the visit order and a list of any detected circles.
"""
resx = np.round(resx).astype(np.int8) # round all elements and cast them to int
N = len(resx) # length of res
L = int(np.sqrt(N)) # number of landmarks. CAST INTO INT but should not be a problem because N = L**2 by def.
nonzeroind = np.nonzero(resx)[0] # the return is a little funny so I use the [0]
nonzero_tup = np.unravel_index(nonzeroind, (L,L))
ind_a = nonzero_tup[0]
ind_b = nonzero_tup[1]
# Extract all journeys
all_journeys_nodes = []
visited_nodes = set()
for node in ind_a:
if node not in visited_nodes:
journey_nodes = self.get_journey(node, ind_a, ind_b)
all_journeys_nodes.append(journey_nodes)
visited_nodes.update(journey_nodes)
for l in all_journeys_nodes :
if 0 in l :
all_journeys_nodes.remove(l)
break
if not all_journeys_nodes :
return None
return all_journeys_nodes
def get_journey(self, start, ind_a, ind_b):
"""
Trace the journey starting from a given node and follow the connections between landmarks.
This method constructs a graph from two lists of landmark connections, `ind_a` and `ind_b`,
where each element in `ind_a` is connected to the corresponding element in `ind_b`.
It then performs a depth-first search (DFS) starting from the `start` node to determine
the path (journey) by following the connections.
Args:
start (int): The starting node of the journey.
ind_a (list[int]): List of "from" nodes, representing the starting points of each connection.
ind_b (list[int]): List of "to" nodes, representing the endpoints of each connection.
Returns:
list[int]: A list of nodes representing the order of the journey, starting from the `start` node.
Example:
If `ind_a = [0, 1, 2]` and `ind_b = [1, 2, 3]`, starting from node 0, the journey would be `[0, 1, 2, 3]`.
"""
graph = defaultdict(list)
for a, b in zip(ind_a, ind_b):
graph[a].append(b)
journey_nodes = []
visited = set()
stack = deque([start])
while stack:
node = stack.pop()
if node not in visited:
visited.add(node)
journey_nodes.append(node)
for neighbor in graph[node]:
if neighbor not in visited:
stack.append(neighbor)
return journey_nodes
def get_order(self, resx):
"""
Determine the order of visits given the result of the optimization.
Args:
resx (list): List of edge weights.
Returns:
list[int]: A list containing the visit order.
"""
resx = np.round(resx).astype(np.uint8) # must contain only 0 and 1
N = len(resx) # length of res
L = int(np.sqrt(N)) # number of landmarks. CAST INTO INT but should not be a problem because N = L**2 by def.
nonzeroind = np.nonzero(resx)[0] # the return is a little funny so I use the [0]
nonzero_tup = np.unravel_index(nonzeroind, (L,L))
ind_a = nonzero_tup[0].tolist()
ind_b = nonzero_tup[1].tolist()
order = [0]
current = 0
used_indices = set() # Track visited index pairs
while True:
# Find index of the current node in ind_a
try:
i = ind_a.index(current)
except ValueError:
break # No more links, stop the search
if i in used_indices:
break # Prevent infinite loops
used_indices.add(i) # Mark this index as visited
next_node = ind_b[i] # Get the corresponding node in ind_b
order.append(next_node) # Add it to the path
# Switch roles, now look for next_node in ind_a
try:
current = next_node
except ValueError:
break # No further connections, end the path
return order
def link_list(self, order: list[int], landmarks: list[Landmark])->list[Landmark] :
"""
Compute the time to reach from each landmark to the next and create a list of landmarks with updated travel times.
Args:
order (list[int]): List of indices representing the order of landmarks to visit.
landmarks (list[Landmark]): List of all landmarks.
Returns:
list[Landmark]]: The updated linked list of landmarks with travel times
"""
L = []
j = 0
while j < len(order)-1 :
# get landmarks involved
elem = landmarks[order[j]]
next = landmarks[order[j+1]]
# get attributes
elem.time_to_reach_next = get_time(elem.location, next.location)
elem.must_do = True
elem.location = (round(elem.location[0], 5), round(elem.location[1], 5))
elem.next_uuid = next.uuid
L.append(elem)
j += 1
next.location = (round(next.location[0], 5), round(next.location[1], 5))
next.must_do = True
L.append(next)
return L
def warm_start(self, x: list[pl.LpVariable], L: int) :
"""
This function sets the initial values of the decision variables to a feasible solution.
This can help the solver start with a feasible or heuristic solution,
potentially speeding up convergence.
Args:
x (list[pl.LpVariable]): A list of PuLP decision variables (binary variables).
L (int): The size parameter, representing a dimension (likely related to a grid or matrix).
Returns:
list[pl.LpVariable]: The modified list of PuLP decision variables with initial values set.
"""
for i in range(L*L) :
x[i].setInitialValue(0)
x[1].setInitialValue(1)
x[2*L-1].setInitialValue(1)
return x
def pre_processing(self, L: int, landmarks: list[Landmark], max_time: int, max_landmarks: int | None) :
"""
Preprocesses the optimization problem by setting up constraints and variables for the tour optimization.
This method initializes and prepares the linear programming problem to optimize a tour that includes landmarks,
while respecting various constraints such as time limits, the number of landmarks to visit, and user preferences.
The pre-processing step sets up the problem before solving it using a linear programming solver.
Responsibilities:
- Defines the optimization problem using linear programming (LP) with the objective to maximize the tour value.
- Creates binary decision variables for each potential transition between landmarks.
- Sets up inequality constraints to respect the maximum time available for the tour and the maximum number of landmarks.
- Implements equality constraints to ensure the tour respects the start and finish positions, avoids staying in the same place,
and adheres to a visit order.
- Forces inclusion or exclusion of specific landmarks based on user preferences.
Attributes:
prob (pl.LpProblem): The linear programming problem to be solved.
x (list): A list of binary variables representing transitions between landmarks.
L (int): The total number of landmarks considered in the optimization.
landmarks (list[Landmark]): The list of landmarks to be visited in the tour.
max_time (int): The maximum allowable time for the entire tour.
max_landmarks (int | None): The maximum number of landmarks to visit in the tour, or None if no limit is set.
Returns:
prob (pl.LpProblem): The linear programming problem setup for optimization.
x (list): The list of binary variables for transitions between landmarks in the tour.
"""
if max_landmarks is None :
max_landmarks = self.max_landmarks
# Initalize the optimization problem
prob = pl.LpProblem("OptimizationProblem", pl.LpMaximize)
# Define the problem
x_bounds = [(0, 1)]*L*L
x = [pl.LpVariable(f"x_{i}", lowBound=x_bounds[i][0], upBound=x_bounds[i][1], cat='Binary') for i in range(L*L)]
# Setup the inequality constraints
self.init_ub_time(prob, x, L, landmarks, max_time) # Adds the distances from each landmark to the other.
self.respect_number(prob, x, L, max_landmarks) # Respects max number of visits (no more possible stops than landmarks).
self.break_sym(prob, x, L) # Breaks the 'zig-zag' symmetry. Avoids d12 and d21 but not larger cirlces.
# Setup the equality constraints
self.init_eq_not_stay(prob, x, L) # Force solution not to stay in same place
self.respect_start_finish(prob, x, L) # Force start and finish positions
self.respect_order(prob, x, L) # Respect order of visit (only works when max_time is limiting factor)
self.respect_user_must(prob, x, L, landmarks) # Force to do/avoid landmarks set by user.
# return prob, self.warm_start(x, L)
return prob, x
def solve_optimization(self, max_time: int, landmarks: list[Landmark], max_landmarks: int = None) -> list[Landmark]:
"""
Main optimization pipeline to solve the landmark visiting problem.
This method sets up and solves a linear programming problem with constraints to find an optimal tour of landmarks,
considering user-defined must-visit landmarks, start and finish points, and ensuring no cycles are present.
Args:
max_time (int): Maximum time allowed for the tour in minutes.
landmarks (list[Landmark]): List of landmarks to visit.
max_landmarks (int): Maximum number of landmarks visited
Returns:
list[Landmark]: The optimized tour of landmarks with updated travel times, or None if no valid solution is found.
"""
# Setup the optimization proplem.
L = len(landmarks)
prob, x = self.pre_processing(L, landmarks, max_time, max_landmarks)
# Solve the problem and extract results.
try :
prob.solve(pl.PULP_CBC_CMD(msg=False, timeLimit=self.time_limit+1, gapRel=self.gap_rel))
except Exception as exc :
raise Exception(f"No solution found: {str(exc)}") from exc
status = pl.LpStatus[prob.status]
solution = [pl.value(var) for var in x] # The values of the decision variables (will be 0 or 1)
self.logger.debug("First results are out. Looking out for circles and correcting...")
# Raise error if no solution is found. FIXME: for now this throws the internal server error
if status != 'Optimal' :
self.logger.warning("The problem is overconstrained, no solution on first try.")
raise ArithmeticError("No solution could be found. Please try again with more time or different preferences.")
# If there is a solution, we're good to go, just check for connectiveness
circles = self.is_connected(solution)
i = 0
while circles is not None :
i += 1
if i == self.max_iter :
self.logger.warning(f'Timeout: No solution found after {self.max_iter} iterations.')
raise TimeoutError(f"Optimization took too long. No solution found after {self.max_iter} iterations.")
for circle in circles :
self.prevent_circle(prob, x, circle, L)
# Solve the problem again
try :
prob.solve(pl.PULP_CBC_CMD(msg=False, timeLimit=self.time_limit, gapRel=self.gap_rel))
except Exception as exc :
self.logger.warning("No solution found: {str(exc)")
raise Exception(f"No solution found: {str(exc)}") from exc
solution = [pl.value(var) for var in x]
if pl.LpStatus[prob.status] != 'Optimal' :
self.logger.warning("The problem is overconstrained, no solution after {i} cycles.")
raise ArithmeticError("No solution could be found. Please try again with more time or different preferences.")
circles = self.is_connected(solution)
if circles is None :
break
# Sort the landmarks in the order of the solution
order = self.get_order(solution)
tour = [landmarks[i] for i in order]
self.logger.info(f"Re-optimized {i} times, objective value : {int(pl.value(prob.objective))}")
return tour

View File

@ -1,136 +0,0 @@
"""Module defining the handling of cache data from Overpass requests."""
import os
import json
import hashlib
from ..constants import OSM_CACHE_DIR, OSM_TYPES
def get_cache_key(query: str) -> str:
"""
Generate a unique cache key for the query using a hash function.
This ensures that queries with different parameters are cached separately.
"""
return hashlib.md5(query.encode('utf-8')).hexdigest()
class CachingStrategyBase:
"""
Base class for implementing caching strategies.
"""
def get(self, key):
"""Retrieve the cached data associated with the provided key."""
raise NotImplementedError('Subclass should implement get')
def set(self, key, value):
"""Store data in the cache with the specified key."""
raise NotImplementedError('Subclass should implement set')
def set_hollow(self, key, **kwargs):
"""Create a hollow (empty) cache entry with a specific key."""
raise NotImplementedError('Subclass should implement set_hollow')
def close(self):
"""Clean up or close any resources used by the caching strategy."""
class JSONCache(CachingStrategyBase):
"""
A caching strategy that stores and retrieves data in JSON format.
"""
def __init__(self, cache_dir=OSM_CACHE_DIR):
# Add the class name as a suffix to the directory
self._cache_dir = f'{cache_dir}'
if not os.path.exists(self._cache_dir):
os.makedirs(self._cache_dir)
def _filename(self, key):
return os.path.join(self._cache_dir, f'{key}.json')
def get(self, key):
"""Retrieve JSON data from the cache and parse it as an ElementTree."""
filename = self._filename(key)
if os.path.exists(filename):
try:
# Open and parse the cached JSON data
with open(filename, 'r', encoding='utf-8') as file:
data = json.load(file)
# Return the data as a list of dicts.
return data
except json.JSONDecodeError:
return None # Return None if parsing fails
return None
def set(self, key, value):
"""Save the JSON data in the cache."""
filename = self._filename(key)
try:
# Write the JSON data to the cache file
with open(filename, 'w', encoding='utf-8') as file:
json.dump(value, file, ensure_ascii=False, indent=4)
except IOError as e:
raise IOError(f"Error writing to cache file: {filename} - {e}") from e
def set_hollow(self, key, cell: tuple, osm_types: list,
selector: str, conditions: list=None, out='center'):
"""Create an empty placeholder cache entry for a future fill."""
hollow_key = f'hollow_{key}'
filename = self._filename(hollow_key)
# Create the hollow JSON structure
hollow_data = {
"key": key,
"cell": list(cell),
"osm_types": list(osm_types),
"selector": selector,
"conditions": conditions,
"out": out
}
# Write the hollow data to the cache file
try:
with open(filename, 'w', encoding='utf-8') as file:
json.dump(hollow_data, file, ensure_ascii=False, indent=4)
except IOError as e:
raise IOError(f"Error writing hollow cache to file: {filename} - {e}") from e
def close(self):
"""Cleanup method, if needed."""
class CachingStrategy:
"""
A class to manage different caching strategies.
"""
__strategy = JSONCache() # Default caching strategy
__strategies = {
'JSON': JSONCache,
}
@classmethod
def use(cls, strategy_name='JSON', **kwargs):
"""Define the caching strategy to use."""
if cls.__strategy:
cls.__strategy.close()
strategy_class = cls.__strategies.get(strategy_name)
if not strategy_class:
raise ValueError(f"Unknown caching strategy: {strategy_name}")
cls.__strategy = strategy_class(**kwargs)
return cls.__strategy
@classmethod
def get(cls, key):
"""Get the data from the cache."""
return cls.__strategy.get(key)
@classmethod
def set(cls, key, value):
"""Save the data in the cache."""
cls.__strategy.set(key, value)
@classmethod
def set_hollow(cls, key, cell: tuple, osm_types: OSM_TYPES,
selector: str, conditions: list=None, out='center'):
"""Create a hollow cache entry."""
cls.__strategy.set_hollow(key, cell, osm_types, selector, conditions, out)

View File

@ -1,423 +0,0 @@
"""Module allowing connexion to overpass api and fectch data from OSM."""
import os
import time
import urllib
import math
import logging
import json
from typing import List, Tuple
from .caching_strategy import get_cache_key, CachingStrategy
from ..constants import OSM_CACHE_DIR, OSM_TYPES, BBOX
RESOLUTION = 0.05
CELL = Tuple[int, int]
class Overpass :
"""
Overpass class to manage the query building and sending to overpass api.
The caching strategy is a part of this class and initialized upon creation of the Overpass object.
"""
logger = logging.getLogger(__name__)
def __init__(self, caching_strategy: str = 'JSON', cache_dir: str = OSM_CACHE_DIR) :
"""
Initialize the Overpass instance with the url, headers and caching strategy.
"""
self.overpass_url = "https://overpass-api.de/api/interpreter"
self.headers = {'User-Agent': 'Mozilla/5.0 (compatible; OverpassQuery/1.0; +http://example.com)',}
self.caching_strategy = CachingStrategy.use(caching_strategy, cache_dir=cache_dir)
def send_query(self, bbox: BBOX, osm_types: OSM_TYPES,
selector: str, conditions: list=None, out='center') -> List[dict]:
"""
Sends the Overpass QL query to the Overpass API and returns the parsed json response.
Args:
bbox (tuple): Bounding box for the query.
osm_types (list[str]): List of OSM element types (e.g., 'node', 'way').
selector (str): Key or tag to filter OSM elements (e.g., 'highway').
conditions (list): Optional list of additional filter conditions in Overpass QL format.
out (str): Output format ('center', 'body', etc.). Defaults to 'center'.
Returns:
list: Parsed json response from the Overpass API, or cached data if available.
"""
# Determine which grid cells overlap with this bounding box.
overlapping_cells = Overpass._get_overlapping_cells(bbox)
# Retrieve cached data and identify missing cache entries
cached_responses, non_cached_cells = self._retrieve_cached_data(overlapping_cells, osm_types, selector, conditions, out)
self.logger.debug(f'Cache hit for {len(overlapping_cells)-len(non_cached_cells)}/{len(overlapping_cells)} quadrants.')
# If there is no missing data, return the cached responses after filtering.
if not non_cached_cells :
return Overpass._filter_landmarks(cached_responses, bbox)
# If there is no cached data, fetch all from Overpass.
if not cached_responses :
query_str = Overpass.build_query(bbox, osm_types, selector, conditions, out)
self.logger.debug(f'Query string: {query_str}')
return self.fetch_data_from_api(query_str)
# Resize the bbox for smaller search area and build new query string.
non_cached_bbox = Overpass._get_non_cached_bbox(non_cached_cells, bbox)
query_str = Overpass.build_query(non_cached_bbox, osm_types, selector, conditions, out)
self.logger.debug(f'Query string: {query_str}')
non_cached_responses = self.fetch_data_from_api(query_str)
return Overpass._filter_landmarks(cached_responses, bbox) + non_cached_responses
def fetch_data_from_api(self, query_str: str) -> List[dict]:
"""
Fetch data from the Overpass API and return the json data.
Args:
query_str (str): The Overpass query string.
Returns:
dict: Combined cached and fetched data.
"""
try:
data = urllib.parse.urlencode({'data': query_str}).encode('utf-8')
request = urllib.request.Request(self.overpass_url, data=data, headers=self.headers)
with urllib.request.urlopen(request) as response:
response_data = response.read().decode('utf-8') # Convert the HTTPResponse to a string
data = json.loads(response_data) # Load the JSON from the string
elements = data.get('elements', [])
# self.logger.debug(f'Query = {query_str}')
return elements
except urllib.error.URLError as e:
self.logger.error(f"Error connecting to Overpass API: {str(e)}")
raise ConnectionError(f"Error connecting to Overpass API: {str(e)}") from e
except Exception as exc :
self.logger.error(f"unexpected error while fetching data from Overpass: {str(exc)}")
raise Exception(f'An unexpected error occured: {str(exc)}') from exc
def fill_cache(self, json_data: dict) :
"""
Fill cache with data by using a hollow cache entry's information.
"""
query_str, cache_key = Overpass._build_query_from_hollow(json_data)
try:
data = urllib.parse.urlencode({'data': query_str}).encode('utf-8')
request = urllib.request.Request(self.overpass_url, data=data, headers=self.headers)
with urllib.request.urlopen(request) as response:
# Convert the HTTPResponse to a string and load data
response_data = response.read().decode('utf-8')
data = json.loads(response_data)
# Get elements and set cache
elements = data.get('elements', [])
self.caching_strategy.set(cache_key, elements)
self.logger.debug(f'Cache set for {cache_key}')
except urllib.error.URLError as e:
raise ConnectionError(f"Error connecting to Overpass API: {str(e)}") from e
except Exception as exc :
raise Exception(f'An unexpected error occured: {str(exc)}') from exc
@staticmethod
def build_query(bbox: BBOX, osm_types: OSM_TYPES,
selector: str, conditions: list=None, out='center') -> str:
"""
Constructs a query string for the Overpass API to retrieve OpenStreetMap (OSM) data.
Args:
bbox (tuple): A tuple representing the geographical search area, typically in the format
(lat_min, lon_min, lat_max, lon_max).
osm_types (list[str]): A list of OSM element types to search for. Must be one or more of
'Way', 'Node', or 'Relation'.
selector (str): The key or tag to filter the OSM elements (e.g., 'amenity', 'highway', etc.).
conditions (list, optional): A list of conditions to apply as additional filters for the
selected OSM elements. The conditions should be written in
the Overpass QL format, and they are combined with '&&' if
multiple are provided. Defaults to an empty list.
out (str, optional): Specifies the output type, such as 'center', 'body', or 'tags'.
Defaults to 'center'.
Returns:
str: The constructed Overpass QL query string.
Notes:
- If no conditions are provided, the query will just use the `selector` to filter the OSM
elements without additional constraints.
"""
query = '[out:json][timeout:20];('
# convert the bbox to string.
bbox_str = f"({','.join(map(str, bbox))})"
if conditions is not None and len(conditions) > 0:
conditions = '(if: ' + ' && '.join(conditions) + ')'
else :
conditions = ''
for elem in osm_types :
query += elem + '[' + selector + ']' + conditions + bbox_str + ';'
query += ');' + f'out {out};'
return query
def _retrieve_cached_data(self, overlapping_cells: CELL, osm_types: OSM_TYPES,
selector: str, conditions: list, out: str) -> Tuple[List[dict], list[CELL]]:
"""
Retrieve cached data and identify missing cache quadrants.
Args:
overlapping_cells (list): Cells to check for cached data.
osm_types (list): OSM types (e.g., 'node', 'way').
selector (str): Key or tag to filter OSM elements.
conditions (list): Additional conditions to apply.
out (str): Output format.
Returns:
tuple: A tuple containing:
- cached_responses (list): List of cached data found.
- non_cached_cells (list(tuple)): List of cells with missing data.
"""
cell_key_dict = {}
for cell in overlapping_cells :
for elem in osm_types :
key_str = f"{elem}[{selector}]{conditions}({','.join(map(str, cell))})"
cell_key_dict[cell] = get_cache_key(key_str)
cached_responses = []
non_cached_cells = []
# Retrieve the cached data and mark the missing entries as hollow
for cell, key in cell_key_dict.items():
cached_data = self.caching_strategy.get(key)
if cached_data is not None :
cached_responses += cached_data
else:
self.caching_strategy.set_hollow(key, cell, osm_types, selector, conditions, out)
non_cached_cells.append(cell)
return cached_responses, non_cached_cells
@staticmethod
def _build_query_from_hollow(json_data: dict) -> Tuple[str, str]:
"""
Build query string using information from a hollow cache entry.
"""
# Extract values from the JSON object
key = json_data.get('key')
cell = tuple(json_data.get('cell'))
bbox = Overpass._get_bbox_from_grid_cell(cell)
osm_types = json_data.get('osm_types')
selector = json_data.get('selector')
conditions = json_data.get('conditions')
out = json_data.get('out')
query_str = Overpass.build_query(bbox, osm_types, selector, conditions, out)
return query_str, key
@staticmethod
def _get_overlapping_cells(query_bbox: tuple) -> List[CELL]:
"""
Returns a set of all grid cells that overlap with the given bounding box.
"""
# Extract location from the query bbox
lat_min, lon_min, lat_max, lon_max = query_bbox
min_lat_cell, min_lon_cell = Overpass._get_grid_cell(lat_min, lon_min)
max_lat_cell, max_lon_cell = Overpass._get_grid_cell(lat_max, lon_max)
overlapping_cells = set()
for lat_idx in range(min_lat_cell, max_lat_cell + 1):
for lon_idx in range(min_lon_cell, max_lon_cell + 1):
overlapping_cells.add((lat_idx, lon_idx))
return overlapping_cells
@staticmethod
def _get_grid_cell(lat: float, lon: float) -> CELL:
"""
Returns the grid cell coordinates for a given latitude and longitude.
Each grid cell is 0.05°lat x 0.05°lon resolution in size.
"""
lat_index = math.floor(lat / RESOLUTION)
lon_index = math.floor(lon / RESOLUTION)
return (lat_index, lon_index)
@staticmethod
def _get_bbox_from_grid_cell(cell: CELL) -> BBOX:
"""
Returns the bounding box for a given grid cell index.
Each grid cell is resolution x resolution in size.
The bounding box is returned as (min_lat, min_lon, max_lat, max_lon).
"""
# Calculate the southwest (min_lat, min_lon) corner of the bounding box
min_lat = round(cell[0] * RESOLUTION, 2)
min_lon = round(cell[1] * RESOLUTION, 2)
# Calculate the northeast (max_lat, max_lon) corner of the bounding box
max_lat = round((cell[0] + 1) * RESOLUTION, 2)
max_lon = round((cell[1] + 1) * RESOLUTION, 2)
return (min_lat, min_lon, max_lat, max_lon)
@staticmethod
def _get_non_cached_bbox(non_cached_cells: List[CELL], original_bbox: BBOX):
"""
Calculate the non-cached bounding box by excluding cached cells.
Args:
non_cached_cells (list): The list of cells that were not found in the cache.
original_bbox (tuple): The original bounding box (min_lat, min_lon, max_lat, max_lon).
Returns:
tuple: The new bounding box that excludes cached cells, or None if all cells are cached.
"""
if not non_cached_cells:
return None # All cells were cached
# Initialize the non-cached bounding box with extreme values
min_lat, min_lon, max_lat, max_lon = float('inf'), float('inf'), float('-inf'), float('-inf')
# Iterate over non-cached cells to find the new bounding box
for cell in non_cached_cells:
cell_min_lat, cell_min_lon, cell_max_lat, cell_max_lon = Overpass._get_bbox_from_grid_cell(cell)
min_lat = min(min_lat, cell_min_lat)
min_lon = min(min_lon, cell_min_lon)
max_lat = max(max_lat, cell_max_lat)
max_lon = max(max_lon, cell_max_lon)
# If no update to bounding box, return the original
if min_lat == float('inf') or min_lon == float('inf'):
return None
return (max(min_lat, original_bbox[0]),
max(min_lon, original_bbox[1]),
min(max_lat, original_bbox[2]),
min(max_lon, original_bbox[3]))
@staticmethod
def _filter_landmarks(elements: List[dict], bbox: BBOX) -> List[dict]:
"""
Filters elements based on whether their coordinates are inside the given bbox.
Args:
- elements (list of dict): List of elements containing coordinates.
- bbox (tuple): A bounding box defined as (min_lat, min_lon, max_lat, max_lon).
Returns:
- list: A list of elements whose coordinates are inside the bounding box.
"""
filtered_elements = []
min_lat, min_lon, max_lat, max_lon = bbox
for elem in elements:
# Extract coordinates based on the 'type' of element
if elem.get('type') != 'node':
center = elem.get('center', {})
lat = float(center.get('lat', 0))
lon = float(center.get('lon', 0))
else:
lat = float(elem.get('lat', 0))
lon = float(elem.get('lon', 0))
# Check if the coordinates fall within the given bounding box
if min_lat <= lat <= max_lat and min_lon <= lon <= max_lon:
filtered_elements.append(elem)
return filtered_elements
def get_base_info(elem: dict, osm_type: OSM_TYPES, with_name=False) :
"""
Extracts base information (coordinates, OSM ID, and optionally a name) from an OSM element.
This function retrieves the latitude and longitude coordinates, OSM ID, and optionally the name
of a given OpenStreetMap (OSM) element. It handles different OSM types (e.g., 'node', 'way') by
extracting coordinates either directly or from a center tag, depending on the element type.
Args:
elem (dict): The JSON element representing the OSM entity.
osm_type (str): The type of the OSM entity (e.g., 'node', 'way'). If 'node', the coordinates
are extracted directly from the element; otherwise, from the 'center' tag.
with_name (bool): Whether to extract and return the name of the element. If True, it attempts
to find the 'name' tag within the element and return its value. Defaults to False.
Returns:
tuple: A tuple containing:
- osm_id (str): The OSM ID of the element.
- coords (tuple): A tuple of (latitude, longitude) coordinates.
- name (str, optional): The name of the element if `with_name` is True; otherwise, not included.
"""
# 1. extract coordinates
if osm_type != 'node' :
center = elem.get('center')
lat = float(center.get('lat'))
lon = float(center.get('lon'))
else :
lat = float(elem.get('lat'))
lon = float(elem.get('lon'))
coords = tuple((lat, lon))
# 2. Extract OSM id
osm_id = elem.get('id')
# 3. Extract name if specified and return
if with_name :
name = elem.get('tags', {}).get('name')
return osm_id, coords, name
return osm_id, coords
def fill_cache():
"""
Scans the specified cache directory for files starting with 'hollow_' and attempts to load
their contents as JSON to fill the cache of the Overpass system.
"""
overpass = Overpass()
n_files = 0
total = 0
with os.scandir(OSM_CACHE_DIR) as it:
for entry in it:
if entry.is_file() and entry.name.startswith('hollow_'):
total += 1
try :
# Read the whole file content as a string
with open(entry.path, 'r', encoding='utf-8') as f:
# load data and fill the cache with the query and key
json_data = json.load(f)
overpass.fill_cache(json_data)
n_files += 1
time.sleep(1)
# Now delete the file as the cache is filled
os.remove(entry.path)
except Exception as exc :
overpass.logger.error(f'An error occured while parsing file {entry.path} as .json file: {str(exc)}')
overpass.logger.info(f"Successfully filled {n_files}/{total} cache files.")

View File

@ -1,6 +1,3 @@
# Tags were picked mostly arbitrarily, based on the OSM wiki and the OSM tags page.
# See https://taginfo.openstreetmap.org for more inspiration.
nature: nature:
leisure: park leisure: park
geological: '' geological: ''
@ -14,24 +11,7 @@ nature:
- alpine_hut - alpine_hut
- viewpoint - viewpoint
- zoo - zoo
- resort waterway: waterfall
- picnic_site
water:
- pond
- lake
- river
- basin
- stream
- lagoon
- rapids
waterway:
- waterfall
- river
- canal
- dam
- dock
- boatyard
shopping: shopping:
shop: shop:
@ -43,51 +23,10 @@ sightseeing:
- museum - museum
- attraction - attraction
- gallery - gallery
- artwork
- aquarium
historic: '' historic: ''
amenity: amenity:
- planetarium - planetarium
- place_of_worship - place_of_worship
- fountain - fountain
- townhall water:
water: reflecting_pool - reflecting_pool
bridge:
- aqueduct
- viaduct
- boardwalk
- cantilever
- abandoned
building: cathedral
# unused sightseeing/buildings:
# - church
# - chapel
# - mosque
# - synagogue
# - ruins
# - temple
# - government
# - cathedral
# - castle
# - museum
museums:
tourism:
- museum
- aquarium
# to be used later on
restauration:
shop:
- coffee
- bakery
- restaurant
- pastry
amenity:
- restaurant
- cafe
- ice_cream
- food_court
- biergarten

View File

@ -1,11 +1,6 @@
max_bbox_side: 4000 #m city_bbox_side: 5000 #m
radius_close_to: 50 radius_close_to: 50
church_coeff: 0.75 church_coeff: 0.8
nature_coeff: 1.6 park_coeff: 1.2
overall_coeff: 10 tag_coeff: 10
tag_exponent: 1.15 N_important: 40
image_bonus: 1.1
viewpoint_bonus: 10
wikipedia_bonus: 1.25
N_important: 60
pay_bonus: -1

View File

@ -1,9 +1,4 @@
detour_factor: 1.4 detour_factor: 1.4
detour_corridor_width: 300 detour_corridor_width: 200
average_walking_speed: 4.8 average_walking_speed: 4.8
max_landmarks: 10 max_landmarks: 7
max_landmarks_refiner: 20
overshoot: 0.0016
time_limit: 1
gap_rel: 0.025
max_iter: 80

View File

@ -1,40 +1,10 @@
"""Definition of the Landmark class to handle visitable objects across the world."""
from typing import Optional, Literal from typing import Optional, Literal
from uuid import uuid4, UUID
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from uuid import uuid4
# Output to frontend # Output to frontend
class Landmark(BaseModel) : class Landmark(BaseModel) :
"""
A class representing a landmark or point of interest (POI) in the context of a trip.
The Landmark class is used to model visitable locations, such as tourist attractions,
natural sites, shopping locations, and start/end points in travel itineraries. It
holds information about the landmark's attributes and supports comparisons and
calculations, such as distance between landmarks.
Attributes:
name (str): The name of the landmark.
type (Literal): The type of the landmark, which can be one of ['sightseeing', 'nature',
'shopping', 'start', 'finish'].
location (tuple): A tuple representing the (latitude, longitude) of the landmark.
osm_type (str): The OpenStreetMap (OSM) type of the landmark.
osm_id (int): The OpenStreetMap (OSM) ID of the landmark.
attractiveness (int): A score representing the attractiveness of the landmark.
n_tags (int): The number of tags associated with the landmark.
image_url (Optional[str]): A URL to an image of the landmark.
website_url (Optional[str]): A URL to the landmark's official website.
description (Optional[str]): A text description of the landmark.
duration (Optional[int]): The estimated time to visit the landmark (in minutes).
name_en (Optional[str]): The English name of the landmark.
uuid (UUID): A unique identifier for the landmark, generated by default using uuid4.
must_do (Optional[bool]): Whether the landmark is a "must-do" attraction.
must_avoid (Optional[bool]): Whether the landmark should be avoided.
is_secondary (Optional[bool]): Whether the landmark is secondary or less important.
time_to_reach_next (Optional[int]): Estimated time (in minutes) to reach the next landmark.
next_uuid (Optional[UUID]): UUID of the next landmark in sequence (if applicable).
"""
# Properties of the landmark # Properties of the landmark
name : str name : str
@ -44,82 +14,25 @@ class Landmark(BaseModel) :
osm_id : int osm_id : int
attractiveness : int attractiveness : int
n_tags : int n_tags : int
image_url : Optional[str] = None # TODO future
# Optional properties to gather more information. description : Optional[str] = None # TODO future
image_url : Optional[str] = None duration : Optional[int] = 0 # TODO future
website_url : Optional[str] = None
wiki_url : Optional[str] = None
keywords: Optional[dict] = {}
description : Optional[str] = None
duration : Optional[int] = 5
name_en : Optional[str] = None
# Unique ID of a given landmark # Unique ID of a given landmark
uuid: UUID = Field(default_factory=uuid4) uuid: str = Field(default_factory=uuid4) # TODO implement this ASAP
# Additional properties depending on specific tour # Additional properties depending on specific tour
must_do : Optional[bool] = False must_do : Optional[bool] = False
must_avoid : Optional[bool] = False must_avoid : Optional[bool] = False
is_secondary : Optional[bool] = False is_secondary : Optional[bool] = False # TODO future
time_to_reach_next : Optional[int] = 0 time_to_reach_next : Optional[int] = 0 # TODO fix this in existing code
next_uuid : Optional[UUID] = None next_uuid : Optional[str] = None # TODO implement this ASAP
# More properties to define the score
is_viewpoint : Optional[bool] = False
is_place_of_worship : Optional[bool] = False
def __str__(self) -> str:
"""
String representation of the Landmark object.
Returns:
str: A formatted string with the landmark's type, name, location, attractiveness score,
time to the next landmark (if available), and whether the landmark is secondary.
"""
t_to_next_str = f", time_to_next={self.time_to_reach_next}" if self.time_to_reach_next else ""
is_secondary_str = ", secondary" if self.is_secondary else ""
type_str = '(' + self.type + ')'
return (f'Landmark{type_str}: [{self.name} @{self.location}, '
f'score={self.attractiveness}{t_to_next_str}{is_secondary_str}]')
def distance(self, value: 'Landmark') -> float:
"""
Calculates the squared distance between this landmark and another.
Args:
value (Landmark): Another Landmark object to calculate the distance to.
Returns:
float: The squared Euclidean distance between the two landmarks.
"""
return (self.location[0] - value.location[0])**2 + (self.location[1] - value.location[1])**2
def __hash__(self) -> int: def __hash__(self) -> int:
""" return self.uuid.int
Generates a hash for the Landmark based on its name.
Returns: def __str__(self) -> str:
int: The hash of the landmark. time_to_next_str = f", time_to_next={self.time_to_reach_next}" if self.time_to_reach_next else ""
""" return f'Landmark({self.type}): [{self.name} @{self.location}, score={self.attractiveness}{time_to_next_str}]'
return hash(self.name)
def __eq__(self, value: 'Landmark') -> bool:
"""
Checks equality between two Landmark objects based on UUID, OSM ID, and name.
Args:
value (Landmark): Another Landmark object to compare.
Returns:
bool: True if the landmarks are equal, False otherwise.
"""
# eq and hash must be consistent
# in particular, if two objects are equal, their hash must be equal
# uuid and osm_id are just shortcuts to avoid comparing all the properties
# if they are equal, we know that the name is also equal and in turn the hash is equal
return (self.uuid == value.uuid or
self.osm_id == value.osm_id or
(self.name == value.name and self.distance(value) < 0.001))

View File

@ -1,78 +1,61 @@
"""Linked and ordered list of Landmarks that represents the visiting order.""" import uuid
from .landmark import Landmark from .landmark import Landmark
from ..utils.get_time_distance import get_time from utils.get_time_separation import get_time
class LinkedLandmarks: class LinkedLandmarks:
""" """
A list of landmarks that are linked together, e.g. in a route. A list of landmarks that are linked together, e.g. in a route.
Each landmark serves as a node in the linked list, but since we expect Each landmark serves as a node in the linked list, but since we expect these to be consumed through the rest API, a pythonic reference to the next landmark is not well suited. Instead we use the uuid of the next landmark to reference the next landmark in the list. This is not very efficient, but appropriate for the expected use case ("short" trips with onyl few landmarks).
these to be consumed through the rest API, a pythonic reference to the next
landmark is not well suited. Instead we use the uuid of the next landmark
to reference the next landmark in the list. This is not very efficient,
but appropriate for the expected use case
("short" trips with onyl few landmarks).
""" """
_landmarks = list[Landmark] _landmarks = list[Landmark]
total_time: int = 0 total_time = int
uuid = str
def __init__(self, data: list[Landmark] = None) -> None: def __init__(self, data: list[Landmark] = None) -> None:
""" """
Initialize a new LinkedLandmarks object. This expects an ORDERED list of landmarks, Initialize a new LinkedLandmarks object. This expects an ORDERED list of landmarks, where the first landmark is the starting point and the last landmark is the end point.
where the first landmark is the starting point and the last landmark is the end point.
Args: Args:
data (list[Landmark], optional): The list of landmarks that are linked together. data (list[Landmark], optional): The list of landmarks that are linked together. Defaults to None.
Defaults to None.
""" """
self.uuid = uuid.uuid4()
self._landmarks = data if data else [] self._landmarks = data if data else []
self._link_landmarks() self._link_landmarks()
def _link_landmarks(self) -> None: def _link_landmarks(self) -> None:
""" """
Create the links between the landmarks in the list by setting their Create the links between the landmarks in the list by setting their .next_uuid and the .time_to_next attributes.
.next_uuid and the .time_to_next attributes.
""" """
self.total_time = 0
# Mark secondary landmarks as such
self.update_secondary_landmarks()
for i, landmark in enumerate(self._landmarks[:-1]): for i, landmark in enumerate(self._landmarks[:-1]):
landmark.next_uuid = self._landmarks[i + 1].uuid landmark.next_uuid = self._landmarks[i + 1].uuid
time_to_next = get_time(landmark.location, self._landmarks[i + 1].location) time_to_next = get_time(landmark.location, self._landmarks[i + 1].location)
landmark.time_to_reach_next = time_to_next landmark.time_to_reach_next = time_to_next
self.total_time += time_to_next self.total_time += time_to_next
self.total_time += landmark.duration
self._landmarks[-1].next_uuid = None self._landmarks[-1].next_uuid = None
self._landmarks[-1].time_to_reach_next = 0 self._landmarks[-1].time_to_reach_next = 0
def update_secondary_landmarks(self) -> None:
"""
Mark landmarks with lower importance as secondary.
"""
# Extract the attractiveness scores and sort them in descending order
scores = sorted([landmark.attractiveness for landmark in self._landmarks], reverse=True)
# Determine the 10th highest score
if len(scores) >= 10:
threshold_score = scores[9]
else:
# If there are fewer than 10 landmarks, use the lowest score as the threshold
threshold_score = min(scores) if scores else 0
# Update 'is_secondary' for landmarks with attractiveness below the threshold score
for landmark in self._landmarks:
if (landmark.attractiveness < threshold_score and landmark.type not in ["start", "finish"]):
landmark.is_secondary = True
def __getitem__(self, index: int) -> Landmark: def __getitem__(self, index: int) -> Landmark:
return self._landmarks[index] return self._landmarks[index]
def __str__(self) -> str: def __str__(self) -> str:
return f"LinkedLandmarks [{' ->'.join([str(landmark) for landmark in self._landmarks])}]" return f"LinkedLandmarks, total time: {self.total_time} minutes, {len(self._landmarks)} stops: [{','.join([str(landmark) for landmark in self._landmarks])}]"
def asdict(self) -> dict:
"""
Convert the linked landmarks to a json serializable dictionary.
Returns:
dict: A dictionary representation of the linked landmarks.
"""
return {
'uuid': self.uuid,
'total_time': self.total_time,
'landmarks': [landmark.dict() for landmark in self._landmarks]
}

View File

@ -1,26 +1,13 @@
"""Defines the Preferences used as input for trip generation."""
from typing import Optional, Literal
from pydantic import BaseModel from pydantic import BaseModel
from typing import Optional, Literal
class Preference(BaseModel) : class Preference(BaseModel) :
""" name: str
Type of preference.
Attributes:
type: what kind of landmark type.
score: how important that type is.
"""
type: Literal['sightseeing', 'nature', 'shopping', 'start', 'finish'] type: Literal['sightseeing', 'nature', 'shopping', 'start', 'finish']
score: int # score could be from 1 to 5 score: int # score could be from 1 to 5
# Input for optimization # Input for optimization
class Preferences(BaseModel) : class Preferences(BaseModel) :
""""
Full collection of preferences needed to generate a personalized trip.
"""
# Sightseeing / History & Culture (Musées, bâtiments historiques, opéras, églises) # Sightseeing / History & Culture (Musées, bâtiments historiques, opéras, églises)
sightseeing : Preference sightseeing : Preference
@ -30,5 +17,5 @@ class Preferences(BaseModel) :
# Shopping (diriger plutôt vers des zones / rues commerçantes) # Shopping (diriger plutôt vers des zones / rues commerçantes)
shopping : Preference shopping : Preference
max_time_minute: Optional[int] = 3*60 max_time_minute: Optional[int] = 6*60
detour_tolerance_minute: Optional[int] = 0 detour_tolerance_minute: Optional[int] = 0

View File

@ -1,26 +0,0 @@
"""Definition of the Toilets class."""
from typing import Optional
from pydantic import BaseModel, ConfigDict
class Toilets(BaseModel) :
"""
Model for toilets. When false/empty the information is either false either not known.
"""
location : tuple
wheelchair : Optional[bool] = False
changing_table : Optional[bool] = False
fee : Optional[bool] = False
opening_hours : Optional[str] = ""
def __str__(self) -> str:
"""
String representation of the Toilets object.
Returns:
str: A formatted string with the toilets location.
"""
return f'Toilets @{self.location}'
model_config = ConfigDict(from_attributes=True)

View File

@ -1,48 +0,0 @@
"""Definition of the Trip class."""
from uuid import uuid4, UUID
from pydantic import BaseModel, Field
from pymemcache.client.base import Client
from .linked_landmarks import LinkedLandmarks
class Trip(BaseModel):
""""
A Trip represents the final guided tour that can be passed to frontend.
Attributes:
uuid: unique identifier for this particular trip.
total_time: duration of the trip (in minutes).
first_landmark_uuid: unique identifier of the first Landmark to visit.
Methods:
from_linked_landmarks: create a Trip from LinkedLandmarks object.
"""
uuid: UUID = Field(default_factory=uuid4)
total_time: int
first_landmark_uuid: UUID
@classmethod
def from_linked_landmarks(cls, landmarks: LinkedLandmarks, cache_client: Client) -> "Trip":
"""
Initialize a new Trip object and ensure it is stored in the cache.
"""
trip = Trip(
total_time = landmarks.total_time,
first_landmark_uuid = landmarks[0].uuid
)
# Store the trip in the cache
cache_client.set(f"trip_{trip.uuid}", trip)
# Make sure to await the result (noreply=False).
# Otherwise the cache might not be inplace when the trip is actually requested.
cache_client.set_many({f"landmark_{landmark.uuid}": landmark for landmark in landmarks},
expire=3600, noreply=False)
# is equivalent to:
# for landmark in landmarks:
# cache_client.set(f"landmark_{landmark.uuid}", landmark, expire=3600)
return trip

85
backend/src/tester.py Normal file
View File

@ -0,0 +1,85 @@
import logging
import yaml
from utils.landmarks_manager import LandmarkManager
from utils.optimizer import Optimizer
from utils.refiner import Refiner
from structs.landmark import Landmark
from structs.linked_landmarks import LinkedLandmarks
from structs.preferences import Preferences, Preference
logger = logging.getLogger(__name__)
def test(start_coords: tuple[float, float], finish_coords: tuple[float, float] = None) -> list[Landmark]:
manager = LandmarkManager()
optimizer = Optimizer()
refiner = Refiner(optimizer=optimizer)
preferences = Preferences(
sightseeing=Preference(
name='sightseeing',
type='sightseeing',
score = 5),
nature=Preference(
name='nature',
type='nature',
score = 5),
shopping=Preference(
name='shopping',
type='shopping',
score = 5),
max_time_minute=180,
detour_tolerance_minute=30
)
# Create start and finish
if finish_coords is None :
finish_coords = start_coords
start = Landmark(name='start', type='start', location=start_coords, osm_type='', osm_id=0, attractiveness=0, n_tags = 0)
finish = Landmark(name='finish', type='finish', location=finish_coords, osm_type='', osm_id=0, attractiveness=0, n_tags = 0)
#finish = Landmark(name='finish', type=LandmarkType(landmark_type='finish'), location=(48.8777055, 2.3640967), osm_type='finish', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
#start = Landmark(name='start', type=LandmarkType(landmark_type='start'), location=(48.847132, 2.312359), osm_type='start', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
#finish = Landmark(name='finish', type=LandmarkType(landmark_type='finish'), location=(48.843185, 2.344533), osm_type='finish', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
#finish = Landmark(name='finish', type=LandmarkType(landmark_type='finish'), location=(48.847132, 2.312359), osm_type='finish', osm_id=0, attractiveness=0, must_do=True, n_tags = 0)
# Generate the landmarks from the start location
landmarks, landmarks_short = manager.generate_landmarks_list(
center_coordinates = start_coords,
preferences = preferences
)
# Store data to file for debug purposes
# write_data(landmarks, "landmarks_Strasbourg.txt")
# Insert start and finish to the landmarks list
landmarks_short.insert(0, start)
landmarks_short.append(finish)
# First stage optimization
base_tour = optimizer.solve_optimization(max_time=preferences.max_time_minute, landmarks=landmarks_short)
# Second stage using linear optimization
refined_tour = refiner.refine_optimization(all_landmarks=landmarks, base_tour=base_tour, max_time = preferences.max_time_minute, detour = preferences.detour_tolerance_minute)
linked_tour = LinkedLandmarks(refined_tour)
logger.info(f"Optimized route: {linked_tour}")
# with open('linked_tour.yaml', 'w') as f:
# yaml.dump(linked_tour.asdict(), f)
return linked_tour
#test(tuple((48.8344400, 2.3220540))) # Café Chez César
#test(tuple((48.8375946, 2.2949904))) # Point random
#test(tuple((47.377859, 8.540585))) # Zurich HB
#test(tuple((45.7576485, 4.8330241))) # Lyon Bellecour
test(tuple((48.5848435, 7.7332974))) # Strasbourg Gare
#test(tuple((48.2067858, 16.3692340))) # Vienne

View File

@ -1,62 +0,0 @@
"""Collection of tests to ensure correct handling of invalid input."""
from fastapi.testclient import TestClient
import pytest
from ..main import app
@pytest.fixture(scope="module")
def invalid_client():
"""Client used to call the app."""
return TestClient(app)
@pytest.mark.parametrize(
"start,preferences,status_code",
[
# Invalid case: no preferences at all.
([48.8566, 2.3522], {}, 422),
# Invalid cases: incomplete preferences.
([48.084588, 7.280405], {"sightseeing": {"type": "nature", "score": 5}, # no shopping
"nature": {"type": "nature", "score": 5},
}, 422),
([48.084588, 7.280405], {"sightseeing": {"type": "nature", "score": 5}, # no nature
"shopping": {"type": "shopping", "score": 5},
}, 422),
([48.084588, 7.280405], {"nature": {"type": "nature", "score": 5}, # no sightseeing
"shopping": {"type": "shopping", "score": 5},
}, 422),
# Invalid cases: unexisting coords
([91, 181], {"sightseeing": {"type": "nature", "score": 5},
"nature": {"type": "nature", "score": 5},
"shopping": {"type": "shopping", "score": 5},
}, 422),
([-91, 181], {"sightseeing": {"type": "nature", "score": 5},
"nature": {"type": "nature", "score": 5},
"shopping": {"type": "shopping", "score": 5},
}, 422),
([91, -181], {"sightseeing": {"type": "nature", "score": 5},
"nature": {"type": "nature", "score": 5},
"shopping": {"type": "shopping", "score": 5},
}, 422),
([-91, -181], {"sightseeing": {"type": "nature", "score": 5},
"nature": {"type": "nature", "score": 5},
"shopping": {"type": "shopping", "score": 5},
}, 422),
]
)
def test_input(invalid_client, start, preferences, status_code): # pylint: disable=redefined-outer-name
"""
Test new trip creation with different sets of preferences and locations.
"""
response = invalid_client.post(
"/trip/new",
json={
"preferences": preferences,
"start": start
}
)
assert response.status_code == status_code

View File

@ -1,343 +0,0 @@
"""Collection of tests to ensure correct implementation and track progress. """
import time
from fastapi.testclient import TestClient
import pytest
from .test_utils import load_trip_landmarks, log_trip_details
from ..main import app
@pytest.fixture(scope="module")
def client():
"""Client used to call the app."""
return TestClient(app)
def test_turckheim(client, request): # pylint: disable=redefined-outer-name
"""
Test n°1 : Custom test in Turckheim to ensure small villages are also supported.
Args:
client:
request:
"""
start_time = time.time() # Start timer
duration_minutes = 20
response = client.post(
"/trip/new",
json={
"preferences": {"sightseeing": {"type": "sightseeing", "score": 5},
"nature": {"type": "nature", "score": 0},
"shopping": {"type": "shopping", "score": 0},
"max_time_minute": duration_minutes,
"detour_tolerance_minute": 0},
"start": [48.084588, 7.280405]
# "start": [45.74445023349939, 4.8222687890538865]
# "start": [45.75156398104873, 4.827154464827647]
}
)
result = response.json()
landmarks = load_trip_landmarks(client, result['first_landmark_uuid'])
# Get computation time
comp_time = time.time() - start_time
# Add details to report
log_trip_details(request, landmarks, result['total_time'], duration_minutes)
# checks :
assert response.status_code == 200 # check for successful planning
assert isinstance(landmarks, list) # check that the return type is a list
assert len(landmarks) > 2 # check that there is something to visit
assert comp_time < 30, f"Computation time exceeded 30 seconds: {comp_time:.2f} seconds"
assert duration_minutes*0.8 < result['total_time'], f"Trip too short: {result['total_time']} instead of {duration_minutes}"
assert duration_minutes*1.2 > result['total_time'], f"Trip too long: {result['total_time']} instead of {duration_minutes}"
# assert 2!= 3
def test_bellecour(client, request) : # pylint: disable=redefined-outer-name
"""
Test n°2 : Custom test in Lyon centre to ensure proper decision making in crowded area.
Args:
client:
request:
"""
start_time = time.time() # Start timer
duration_minutes = 120
response = client.post(
"/trip/new",
json={
"preferences": {"sightseeing": {"type": "sightseeing", "score": 5},
"nature": {"type": "nature", "score": 5},
"shopping": {"type": "shopping", "score": 5},
"max_time_minute": duration_minutes,
"detour_tolerance_minute": 0},
"start": [45.7576485, 4.8330241]
}
)
result = response.json()
landmarks = load_trip_landmarks(client, result['first_landmark_uuid'])
# Get computation time
comp_time = time.time() - start_time
# Add details to report
log_trip_details(request, landmarks, result['total_time'], duration_minutes)
# for elem in landmarks :
# print(elem)
# checks :
assert response.status_code == 200 # check for successful planning
assert comp_time < 30, f"Computation time exceeded 30 seconds: {comp_time:.2f} seconds"
assert duration_minutes*0.8 < result['total_time'], f"Trip too short: {result['total_time']} instead of {duration_minutes}"
assert duration_minutes*1.2 > result['total_time'], f"Trip too long: {result['total_time']} instead of {duration_minutes}"
def test_cologne(client, request) : # pylint: disable=redefined-outer-name
"""
Test n°3 : Custom test in Cologne to ensure proper decision making in crowded area.
Args:
client:
request:
"""
start_time = time.time() # Start timer
duration_minutes = 240
response = client.post(
"/trip/new",
json={
"preferences": {"sightseeing": {"type": "sightseeing", "score": 5},
"nature": {"type": "nature", "score": 5},
"shopping": {"type": "shopping", "score": 5},
"max_time_minute": duration_minutes,
"detour_tolerance_minute": 0},
"start": [50.942352665, 6.957777972392]
}
)
result = response.json()
landmarks = load_trip_landmarks(client, result['first_landmark_uuid'])
# Get computation time
comp_time = time.time() - start_time
# Add details to report
log_trip_details(request, landmarks, result['total_time'], duration_minutes)
# for elem in landmarks :
# print(elem)
# checks :
assert response.status_code == 200 # check for successful planning
assert comp_time < 30, f"Computation time exceeded 30 seconds: {comp_time:.2f} seconds"
assert duration_minutes*0.8 < result['total_time'], f"Trip too short: {result['total_time']} instead of {duration_minutes}"
assert duration_minutes*1.2 > result['total_time'], f"Trip too long: {result['total_time']} instead of {duration_minutes}"
def test_strasbourg(client, request) : # pylint: disable=redefined-outer-name
"""
Test n°4 : Custom test in Strasbourg to ensure proper decision making in crowded area.
Args:
client:
request:
"""
start_time = time.time() # Start timer
duration_minutes = 180
response = client.post(
"/trip/new",
json={
"preferences": {"sightseeing": {"type": "sightseeing", "score": 5},
"nature": {"type": "nature", "score": 5},
"shopping": {"type": "shopping", "score": 5},
"max_time_minute": duration_minutes,
"detour_tolerance_minute": 0},
"start": [48.5846589226, 7.74078715721]
}
)
result = response.json()
landmarks = load_trip_landmarks(client, result['first_landmark_uuid'])
# Get computation time
comp_time = time.time() - start_time
# Add details to report
log_trip_details(request, landmarks, result['total_time'], duration_minutes)
# for elem in landmarks :
# print(elem)
# checks :
assert response.status_code == 200 # check for successful planning
assert comp_time < 30, f"Computation time exceeded 30 seconds: {comp_time:.2f} seconds"
assert duration_minutes*0.8 < result['total_time'], f"Trip too short: {result['total_time']} instead of {duration_minutes}"
assert duration_minutes*1.2 > result['total_time'], f"Trip too long: {result['total_time']} instead of {duration_minutes}"
def test_zurich(client, request) : # pylint: disable=redefined-outer-name
"""
Test n°5 : Custom test in Zurich to ensure proper decision making in crowded area.
Args:
client:
request:
"""
start_time = time.time() # Start timer
duration_minutes = 180
response = client.post(
"/trip/new",
json={
"preferences": {"sightseeing": {"type": "sightseeing", "score": 5},
"nature": {"type": "nature", "score": 5},
"shopping": {"type": "shopping", "score": 5},
"max_time_minute": duration_minutes,
"detour_tolerance_minute": 0},
"start": [47.377884227, 8.5395114066]
}
)
result = response.json()
landmarks = load_trip_landmarks(client, result['first_landmark_uuid'])
# Get computation time
comp_time = time.time() - start_time
# Add details to report
log_trip_details(request, landmarks, result['total_time'], duration_minutes)
# for elem in landmarks :
# print(elem)
# checks :
assert response.status_code == 200 # check for successful planning
assert comp_time < 30, f"Computation time exceeded 30 seconds: {comp_time:.2f} seconds"
assert duration_minutes*0.8 < result['total_time'], f"Trip too short: {result['total_time']} instead of {duration_minutes}"
assert duration_minutes*1.2 > result['total_time'], f"Trip too long: {result['total_time']} instead of {duration_minutes}"
def test_paris(client, request) : # pylint: disable=redefined-outer-name
"""
Test n°6 : Custom test in Paris (les Halles) centre to ensure proper decision making in crowded area.
Args:
client:
request:
"""
start_time = time.time() # Start timer
duration_minutes = 200
response = client.post(
"/trip/new",
json={
"preferences": {"sightseeing": {"type": "sightseeing", "score": 5},
"nature": {"type": "nature", "score": 0},
"shopping": {"type": "shopping", "score": 5},
"max_time_minute": duration_minutes,
"detour_tolerance_minute": 0},
"start": [48.85468881798671, 2.3423925755998374]
}
)
result = response.json()
landmarks = load_trip_landmarks(client, result['first_landmark_uuid'])
# Get computation time
comp_time = time.time() - start_time
# Add details to report
log_trip_details(request, landmarks, result['total_time'], duration_minutes)
# for elem in landmarks :
# print(elem)
# checks :
assert response.status_code == 200 # check for successful planning
assert comp_time < 30, f"Computation time exceeded 30 seconds: {comp_time:.2f} seconds"
assert duration_minutes*0.8 < result['total_time'], f"Trip too short: {result['total_time']} instead of {duration_minutes}"
assert duration_minutes*1.2 > result['total_time'], f"Trip too long: {result['total_time']} instead of {duration_minutes}"
def test_new_york(client, request) : # pylint: disable=redefined-outer-name
"""
Test n°7 : Custom test in New York to ensure proper decision making in crowded area.
Args:
client:
request:
"""
start_time = time.time() # Start timer
duration_minutes = 600
response = client.post(
"/trip/new",
json={
"preferences": {"sightseeing": {"type": "sightseeing", "score": 5},
"nature": {"type": "nature", "score": 5},
"shopping": {"type": "shopping", "score": 5},
"max_time_minute": duration_minutes,
"detour_tolerance_minute": 0},
"start": [40.72592726802, -73.9920434795]
}
)
result = response.json()
landmarks = load_trip_landmarks(client, result['first_landmark_uuid'])
# Get computation time
comp_time = time.time() - start_time
# Add details to report
log_trip_details(request, landmarks, result['total_time'], duration_minutes)
# for elem in landmarks :
# print(elem)
# checks :
assert response.status_code == 200 # check for successful planning
assert comp_time < 30, f"Computation time exceeded 30 seconds: {comp_time:.2f} seconds"
assert duration_minutes*0.8 < result['total_time'], f"Trip too short: {result['total_time']} instead of {duration_minutes}"
assert duration_minutes*1.2 > result['total_time'], f"Trip too long: {result['total_time']} instead of {duration_minutes}"
def test_shopping(client, request) : # pylint: disable=redefined-outer-name
"""
Test n°8 : Custom test in Lyon centre to ensure shopping clusters are found.
Args:
client:
request:
"""
start_time = time.time() # Start timer
duration_minutes = 240
response = client.post(
"/trip/new",
json={
"preferences": {"sightseeing": {"type": "sightseeing", "score": 0},
"nature": {"type": "nature", "score": 0},
"shopping": {"type": "shopping", "score": 5},
"max_time_minute": duration_minutes,
"detour_tolerance_minute": 0},
"start": [45.7576485, 4.8330241]
}
)
result = response.json()
landmarks = load_trip_landmarks(client, result['first_landmark_uuid'])
# Get computation time
comp_time = time.time() - start_time
# Add details to report
log_trip_details(request, landmarks, result['total_time'], duration_minutes)
# for elem in landmarks :
# print(elem)
# checks :
assert response.status_code == 200 # check for successful planning
assert comp_time < 30, f"Computation time exceeded 30 seconds: {comp_time:.2f} seconds"
assert duration_minutes*0.8 < result['total_time'], f"Trip too short: {result['total_time']} instead of {duration_minutes}"
assert duration_minutes*1.2 > result['total_time'], f"Trip too long: {result['total_time']} instead of {duration_minutes}"

View File

@ -1,101 +0,0 @@
"""Collection of tests to ensure correct implementation and track progress. """
from fastapi.testclient import TestClient
import pytest
from ..structs.toilets import Toilets
from ..main import app
@pytest.fixture(scope="module")
def client():
"""Client used to call the app."""
return TestClient(app)
@pytest.mark.parametrize(
"location,radius,status_code",
[
({}, None, 422), # Invalid case: no location at all.
([443], None, 422), # Invalid cases: invalid location.
([443, 433], None, 422), # Invalid cases: invalid location.
]
)
def test_invalid_input(client, location, radius, status_code): # pylint: disable=redefined-outer-name
"""
Test n°1 : Verify handling of invalid input.
Args:
client:
request:
"""
response = client.post(
"/toilets/new",
params={
"location": location,
"radius": radius
}
)
# checks :
assert response.status_code == status_code
@pytest.mark.parametrize(
"location,status_code",
[
([48.2270, 7.4370], 200), # Orschwiller.
([10.2012, 10.123], 200), # Nigerian desert.
([63.989, -19.677], 200), # Hekla volcano, Iceland
]
)
def test_no_toilets(client, location, status_code): # pylint: disable=redefined-outer-name
"""
Test n°3 : Verify the code finds some toilets in big cities.
Args:
client:
request:
"""
response = client.post(
"/toilets/new",
params={
"location": location
}
)
toilets_list = [Toilets.model_validate(toilet) for toilet in response.json()]
# checks :
assert response.status_code == status_code # check for successful planning
assert isinstance(toilets_list, list) # check that the return type is a list
@pytest.mark.parametrize(
"location,status_code",
[
([45.7576485, 4.8330241], 200), # Lyon, Bellecour.
([-6.913795, 107.60278], 200), # Bandung, train station
([-22.970140, -43.18181], 200), # Rio de Janeiro, Copacabana
]
)
def test_toilets(client, location, status_code): # pylint: disable=redefined-outer-name
"""
Test n°3 : Verify the code finds some toilets in big cities.
Args:
client:
request:
"""
response = client.post(
"/toilets/new",
params={
"location": location,
"radius" : 600
}
)
toilets_list = [Toilets.model_validate(toilet) for toilet in response.json()]
# checks :
assert response.status_code == status_code # check for successful planning
assert isinstance(toilets_list, list) # check that the return type is a list
assert len(toilets_list) > 0

View File

@ -1,93 +0,0 @@
"""Helper methods for testing."""
import logging
from fastapi import HTTPException
from ..structs.landmark import Landmark
from ..cache import client as cache_client
def landmarks_to_osmid(landmarks: list[Landmark]) -> list[int] :
"""
Convert the list of landmarks into a list containing their osm ids for quick landmark checking.
Args :
landmarks (list): the list of landmarks
Returns :
ids (list) : the list of corresponding OSM ids
"""
ids = []
for landmark in landmarks :
ids.append(landmark.osm_id)
return ids
def fetch_landmark(landmark_uuid: str):
"""
Fetch landmark data from the cache based on the landmark UUID.
Args:
landmark_uuid (str): The UUID of the landmark.
Returns:
dict: Landmark data fetched from the cache or raises an HTTP exception.
"""
logger = logging.getLogger(__name__)
# Try to fetch the landmark data from the cache
try:
landmark = cache_client.get(f'landmark_{landmark_uuid}')
if not landmark :
logger.error(f'Cache miss for landmark UUID: {landmark_uuid}')
raise HTTPException(status_code=404, detail=f'Landmark with UUID {landmark_uuid} not found in cache.')
# Validate that the fetched data is a dictionary
if not isinstance(landmark, Landmark):
logger.error(f'Invalid cache data format for landmark UUID: {landmark_uuid}. Expected dict, got {type(landmark).__name__}.')
raise HTTPException(status_code=500, detail="Invalid cache data format.")
return landmark
except Exception as exc:
logger.error(f'Unexpected error occurred while fetching landmark UUID {landmark_uuid}: {exc}')
raise HTTPException(status_code=500, detail="An unexpected error occurred while fetching the landmark from the cache") from exc
def load_trip_landmarks(client, first_uuid: str) -> list[Landmark]:
"""
Load all landmarks for a trip using the response from the API.
Args:
first_uuid (str) : The first UUID of the landmark.
Returns:
landmarks (list) : An list containing all landmarks for the trip.
"""
landmarks = []
next_uuid = first_uuid
while next_uuid is not None:
landmark = fetch_landmark(next_uuid)
landmarks.append(landmark)
next_uuid = landmark.next_uuid # Prepare for the next iteration
return landmarks
def log_trip_details(request, landmarks: list[Landmark], duration: int, target_duration: int) :
"""
Allows to show the detailed trip in the html test report.
Args:
request:
landmarks (list): the ordered list of visited landmarks
duration (int): the total duration of this trip
target_duration(int): the target duration of this trip
"""
trip_string = [f'{landmark.name} ({landmark.attractiveness} | {landmark.duration}) - {landmark.time_to_reach_next}' for landmark in landmarks]
# Pass additional info to pytest for reporting
request.node.trip_details = trip_string
request.node.trip_duration = str(duration) # result['total_time']
request.node.target_duration = str(target_duration)

View File

@ -1,38 +0,0 @@
"""Defines the endpoint for fetching toilet locations."""
from fastapi import HTTPException, APIRouter, Query
from ..structs.toilets import Toilets
from .toilets_manager import ToiletsManager
# Define the API router
router = APIRouter()
@router.post("/toilets/new")
def get_toilets(location: tuple[float, float] = Query(...), radius: int = 500) -> list[Toilets] :
"""
Endpoint to find toilets within a specified radius from a given location.
This endpoint expects the `location` and `radius` as **query parameters**, not in the request body.
Args:
location (tuple[float, float]): The latitude and longitude of the location to search from.
radius (int, optional): The radius (in meters) within which to search for toilets. Defaults to 500 meters.
Returns:
list[Toilets]: A list of Toilets objects that meet the criteria.
"""
if location is None:
raise HTTPException(status_code=406, detail="Coordinates not provided or invalid")
if not (-90 <= location[0] <= 90 or -180 <= location[1] <= 180):
raise HTTPException(status_code=422, detail="Start coordinates not in range")
toilets_manager = ToiletsManager(location, radius)
try :
toilets_list = toilets_manager.generate_toilet_list()
except KeyError as exc:
raise HTTPException(status_code=404, detail="No toilets found") from exc
return toilets_list

View File

@ -1,122 +0,0 @@
"""Module for finding public toilets around given coordinates."""
import logging
from ..overpass.overpass import Overpass, get_base_info
from ..structs.toilets import Toilets
from ..utils.bbox import create_bbox
# silence the overpass logger
logging.getLogger('Overpass').setLevel(level=logging.CRITICAL)
class ToiletsManager:
"""
Manages the process of fetching and caching toilet information from
OpenStreetMap (OSM) based on a specified location and radius.
This class is responsible for:
- Fetching toilet data from OSM using Overpass API around a given set of
coordinates (latitude, longitude).
- Using a caching strategy to optimize requests by saving and retrieving
data from a local cache.
- Logging important events and errors related to data fetching.
Attributes:
logger (logging.Logger): Logger for the class to capture events.
location (tuple[float, float]): Latitude and longitude representing the
location to search around.
radius (int): The search radius in meters for finding nearby toilets.
overpass (Overpass): The Overpass API instance used to query OSM.
"""
logger = logging.getLogger(__name__)
location: tuple[float, float]
radius: int # radius in meters
def __init__(self, location: tuple[float, float], radius : int) -> None:
self.radius = radius
self.location = location
# Setup the caching in the Overpass class.
self.overpass = Overpass()
def generate_toilet_list(self) -> list[Toilets] :
"""
Generates a list of toilet locations by fetching data from OpenStreetMap (OSM)
around the given coordinates stored in `self.location`.
Returns:
list[Toilets]: A list of `Toilets` objects containing detailed information
about the toilets found around the given coordinates.
"""
bbox = create_bbox(self.location, self.radius)
osm_types = ['node', 'way', 'relation']
toilets_list = []
query = Overpass.build_query(
bbox = bbox,
osm_types = osm_types,
selector = '"amenity"="toilets"',
out = 'ids center tags'
)
try:
result = self.overpass.fetch_data_from_api(query_str=query)
except Exception as e:
self.logger.error(f"Error fetching toilets: {e}")
return None
toilets_list = self.to_toilets(result)
return toilets_list
def to_toilets(self, elements: list) -> list[Toilets]:
"""
Parse the Overpass API result and extract landmarks.
This method processes the JSON elements returned by the Overpass API and
extracts landmarks of types 'node', 'way', and 'relation'. It retrieves
relevant information such as name, coordinates, and tags, and converts them
into Landmark objects.
Args:
list (osm elements): The root element of the JSON response from Overpass API.
elem_type (str): The type of landmark (e.g., node, way, relation).
Returns:
list[Landmark]: A list of Landmark objects extracted from the JSON data.
"""
if elements is None :
return []
toilets_list = []
for elem in elements:
osm_type = elem.get('type')
# Get coordinates and append them to the points list
_, coords = get_base_info(elem, osm_type)
if coords is None :
continue
toilets = Toilets(location=coords)
# Extract tags as a dictionary
tags = elem.get('tags')
if 'wheelchair' in tags.keys() and tags['wheelchair'] == 'yes':
toilets.wheelchair = True
if 'changing_table' in tags.keys() and tags['changing_table'] == 'yes':
toilets.changing_table = True
if 'fee' in tags.keys() and tags['fee'] == 'yes':
toilets.fee = True
if 'opening_hours' in tags.keys() :
toilets.opening_hours = tags['opening_hours']
toilets_list.append(toilets)
return toilets_list

View File

@ -1,27 +0,0 @@
"""Various helper functions"""
import math as m
def create_bbox(coords: tuple[float, float], radius: int):
"""
Create a bounding box around the given coordinates.
Args:
coords (tuple[float, float]): The latitude and longitude of the center of the bounding box.
radius (int): The half-side length of the bounding box in meters.
Returns:
tuple[float, float, float, float]: The minimum latitude, minimum longitude, maximum latitude, and maximum longitude
defining the bounding box.
"""
# Earth's radius in meters
R = 6378137
lat, lon = coords
d_lat = radius / R
d_lon = radius / (R * m.cos(m.pi * lat / 180))
lat_min = lat - d_lat * 180 / m.pi
lat_max = lat + d_lat * 180 / m.pi
lon_min = lon - d_lon * 180 / m.pi
lon_max = lon + d_lon * 180 / m.pi
return (lat_min, lon_min, lat_max, lon_max)

View File

@ -1,80 +0,0 @@
"""Contains various helper functions to help with distance or score computations."""
from math import sin, cos, sqrt, atan2, radians
import yaml
from ..constants import OPTIMIZER_PARAMETERS_PATH
with OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
DETOUR_FACTOR = parameters['detour_factor']
AVERAGE_WALKING_SPEED = parameters['average_walking_speed']
EARTH_RADIUS_KM = 6373
def get_time(p1: tuple[float, float], p2: tuple[float, float]) -> int:
"""
Calculate the time in minutes to travel from one location to another.
Args:
p1 (tuple[float, float]): Coordinates of the starting location.
p2 (tuple[float, float]): Coordinates of the destination.
Returns:
int: Time to travel from p1 to p2 in minutes.
"""
# if p1 == p2:
# return 0
# else:
# Compute the distance in km along the surface of the Earth
# (assume spherical Earth)
# this is the haversine formula, stolen from stackoverflow
# in order to not use any external libraries
lat1, lon1 = radians(p1[0]), radians(p1[1])
lat2, lon2 = radians(p2[0]), radians(p2[1])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = EARTH_RADIUS_KM * c
# Consider the detour factor for average an average city
walk_distance = distance * DETOUR_FACTOR
# Time to walk this distance (in minutes)
walk_time = walk_distance / AVERAGE_WALKING_SPEED * 60
return min(round(walk_time), 32765)
def get_distance(p1: tuple[float, float], p2: tuple[float, float]) -> int:
"""
Calculate the time in minutes to travel from one location to another.
Args:
p1 (tuple[float, float]): Coordinates of the starting location.
p2 (tuple[float, float]): Coordinates of the destination.
Returns:
int: Time to travel from p1 to p2 in minutes.
"""
if p1 == p2:
return 0
# Compute the distance in km along the surface of the Earth
# (assume spherical Earth)
# this is the haversine formula, stolen from stackoverflow
# in order to not use any external libraries
lat1, lon1 = radians(p1[0]), radians(p1[1])
lat2, lon2 = radians(p2[0]), radians(p2[1])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return EARTH_RADIUS_KM * c

View File

@ -0,0 +1,39 @@
import yaml
from geopy.distance import geodesic
import constants
with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
DETOUR_FACTOR = parameters['detour_factor']
AVERAGE_WALKING_SPEED = parameters['average_walking_speed']
def get_time(p1: tuple[float, float], p2: tuple[float, float]) -> int:
"""
Calculate the time in minutes to travel from one location to another.
Args:
p1 (Tuple[float, float]): Coordinates of the starting location.
p2 (Tuple[float, float]): Coordinates of the destination.
detour (float): Detour factor affecting the distance.
speed (float): Walking speed in kilometers per hour.
Returns:
int: Time to travel from p1 to p2 in minutes.
"""
# Compute the straight-line distance in km
if p1 == p2 :
return 0
else:
dist = geodesic(p1, p2).kilometers
# Consider the detour factor for average cityto deterline walking distance (in km)
walk_dist = dist*DETOUR_FACTOR
# Time to walk this distance (in minutes)
walk_time = walk_dist/AVERAGE_WALKING_SPEED*60
return round(walk_time)

View File

@ -0,0 +1,365 @@
import math as m
import yaml
import logging
from OSMPythonTools.overpass import Overpass, overpassQueryBuilder
from OSMPythonTools.cachingStrategy import CachingStrategy, JSON
from pywikibot import ItemPage, Site
from pywikibot import config
config.put_throttle = 0
config.maxlag = 0
from structs.preferences import Preferences, Preference
from structs.landmark import Landmark
from .take_most_important import take_most_important
import constants
SIGHTSEEING = 'sightseeing'
NATURE = 'nature'
SHOPPING = 'shopping'
class LandmarkManager:
logger = logging.getLogger(__name__)
city_bbox_side: int # bbox side in meters
radius_close_to: int # radius in meters
church_coeff: float # coeff to adjsut score of churches
park_coeff: float # coeff to adjust score of parks
tag_coeff: float # coeff to adjust weight of tags
N_important: int # number of important landmarks to consider
def __init__(self) -> None:
with constants.AMENITY_SELECTORS_PATH.open('r') as f:
self.amenity_selectors = yaml.safe_load(f)
with constants.LANDMARK_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
self.city_bbox_side = parameters['city_bbox_side']
self.radius_close_to = parameters['radius_close_to']
self.church_coeff = parameters['church_coeff']
self.park_coeff = parameters['park_coeff']
self.tag_coeff = parameters['tag_coeff']
self.N_important = parameters['N_important']
self.overpass = Overpass()
CachingStrategy.use(JSON, cacheDir=constants.OSM_CACHE_DIR)
def generate_landmarks_list(self, center_coordinates: tuple[float, float], preferences: Preferences) -> tuple[list[Landmark], list[Landmark]]:
"""
Generate and prioritize a list of landmarks based on user preferences.
This method fetches landmarks from various categories (sightseeing, nature, shopping) based on the user's preferences
and current location. It scores and corrects these landmarks, removes duplicates, and then selects the most important
landmarks based on a predefined criterion.
Parameters:
center_coordinates (tuple[float, float]): The latitude and longitude of the center location around which to search.
preferences (Preferences): The user's preference settings that influence the landmark selection.
Returns:
tuple[list[Landmark], list[Landmark]]:
- A list of all existing landmarks.
- A list of the most important landmarks based on the user's preferences.
"""
L = []
bbox = self.create_bbox(center_coordinates)
# list for sightseeing
if preferences.sightseeing.score != 0:
score_function = lambda loc, n_tags: int((self.count_elements_close_to(loc) + ((n_tags**1.2)*self.tag_coeff) )*self.church_coeff)
L1 = self.fetch_landmarks(bbox, self.amenity_selectors['sightseeing'], SIGHTSEEING, score_function)
self.correct_score(L1, preferences.sightseeing)
L += L1
# list for nature
if preferences.nature.score != 0:
score_function = lambda loc, n_tags: int((self.count_elements_close_to(loc) + ((n_tags**1.2)*self.tag_coeff) )*self.park_coeff)
L2 = self.fetch_landmarks(bbox, self.amenity_selectors['nature'], NATURE, score_function)
self.correct_score(L2, preferences.nature)
L += L2
# list for shopping
if preferences.shopping.score != 0:
score_function = lambda loc, n_tags: int(self.count_elements_close_to(loc) + ((n_tags**1.2)*self.tag_coeff))
L3 = self.fetch_landmarks(bbox, self.amenity_selectors['shopping'], SHOPPING, score_function)
self.correct_score(L3, preferences.shopping)
L += L3
L = self.remove_duplicates(L)
L_constrained = take_most_important(L, self.N_important)
self.logger.info(f'Generated {len(L)} landmarks around {center_coordinates}, and constrained to {len(L_constrained)} most important ones.')
return L, L_constrained
def remove_duplicates(self, landmarks: list[Landmark]) -> list[Landmark]:
"""
Removes duplicate landmarks based on their names from the given list. Only retains the landmark with highest score
Parameters:
landmarks (list[Landmark]): A list of Landmark objects.
Returns:
list[Landmark]: A list of unique Landmark objects based on their names.
"""
L_clean = []
names = []
for landmark in landmarks:
if landmark.name in names:
continue
else:
names.append(landmark.name)
L_clean.append(landmark)
return L_clean
def correct_score(self, landmarks: list[Landmark], preference: Preference):
"""
Adjust the attractiveness score of each landmark in the list based on user preferences.
This method updates the attractiveness of each landmark by scaling it according to the user's preference score.
The score adjustment is computed using a simple linear transformation based on the preference score.
Args:
landmarks (list[Landmark]): A list of landmarks whose scores need to be corrected.
preference (Preference): The user's preference settings that influence the attractiveness score adjustment.
Raises:
TypeError: If the type of any landmark in the list does not match the expected type in the preference.
"""
if len(landmarks) == 0:
return
if landmarks[0].type != preference.type:
raise TypeError(f"LandmarkType {preference.type} does not match the type of Landmark {landmarks[0].name}")
for elem in landmarks:
elem.attractiveness = int(elem.attractiveness*preference.score/5) # arbitrary computation
def count_elements_close_to(self, coordinates: tuple[float, float]) -> int:
"""
Count the number of OpenStreetMap elements (nodes, ways, relations) within a specified radius of the given location.
This function constructs a bounding box around the specified coordinates based on the radius. It then queries
OpenStreetMap data to count the number of elements within that bounding box.
Args:
coordinates (tuple[float, float]): The latitude and longitude of the location to search around.
Returns:
int: The number of elements (nodes, ways, relations) within the specified radius. Returns 0 if no elements
are found or if an error occurs during the query.
"""
lat = coordinates[0]
lon = coordinates[1]
radius = self.radius_close_to
alpha = (180*radius) / (6371000*m.pi)
bbox = {'latLower':lat-alpha,'lonLower':lon-alpha,'latHigher':lat+alpha,'lonHigher': lon+alpha}
# Build the query to find elements within the radius
radius_query = overpassQueryBuilder(
bbox=[bbox['latLower'],
bbox['lonLower'],
bbox['latHigher'],
bbox['lonHigher']],
elementType=['node', 'way', 'relation']
)
try:
radius_result = self.overpass.query(radius_query)
N_elem = radius_result.countWays() + radius_result.countRelations()
self.logger.debug(f"There are {N_elem} ways/relations within 50m")
if N_elem is None:
return 0
return N_elem
except:
return 0
def create_bbox(self, coordinates: tuple[float, float]) -> tuple[float, float, float, float]:
"""
Create a bounding box around the given coordinates.
Args:
coordinates (tuple[float, float]): The latitude and longitude of the center of the bounding box.
Returns:
tuple[float, float, float, float]: The minimum latitude, minimum longitude, maximum latitude, and maximum longitude
defining the bounding box.
"""
lat = coordinates[0]
lon = coordinates[1]
# Half the side length in km (since it's a square bbox)
half_side_length_km = self.city_bbox_side / 2 / 1000
# Convert distance to degrees
lat_diff = half_side_length_km / 111 # 1 degree latitude is approximately 111 km
lon_diff = half_side_length_km / (111 * m.cos(m.radians(lat))) # Adjust for longitude based on latitude
# Calculate bbox
min_lat = lat - lat_diff
max_lat = lat + lat_diff
min_lon = lon - lon_diff
max_lon = lon + lon_diff
return min_lat, min_lon, max_lat, max_lon
def fetch_landmarks(self, bbox: tuple, amenity_selector: dict, landmarktype: str, score_function: callable) -> list[Landmark]:
"""
Fetches landmarks of a specified type from OpenStreetMap (OSM) within a bounding box centered on given coordinates.
Args:
bbox (tuple[float, float, float, float]): The bounding box coordinates (min_lat, min_lon, max_lat, max_lon).
amenity_selector (dict): The Overpass API query selector for the desired landmark type.
landmarktype (str): The type of the landmark (e.g., 'sightseeing', 'nature', 'shopping').
score_function (callable): The function to compute the score of the landmark based on its attributes.
Returns:
list[Landmark]: A list of Landmark objects that were fetched and filtered based on the provided criteria.
Notes:
- Landmarks are fetched using Overpass API queries.
- Selectors are translated from the dictionary to the Overpass query format. (e.g., 'amenity'='place_of_worship')
- Landmarks are filtered based on various conditions including tags and type.
- Scores are assigned to landmarks based on their attributes and surrounding elements.
"""
return_list = []
# caution, when applying a list of selectors, overpass will search for elements that match ALL selectors simultaneously
# we need to split the selectors into separate queries and merge the results
for sel in dict_to_selector_list(amenity_selector):
self.logger.debug(f"Current selector: {sel}")
query = overpassQueryBuilder(
bbox = bbox,
elementType = ['way', 'relation'],
selector = sel,
# conditions = [],
includeCenter = True,
out = 'body'
)
try:
result = self.overpass.query(query)
except Exception as e:
self.logger.error(f"Error fetching landmarks: {e}")
return
for elem in result.elements():
name = elem.tag('name') # Add name
location = (elem.centerLat(), elem.centerLon()) # Add coordinates (lat, lon)
# TODO: exclude these from the get go
# skip if unprecise location
if name is None or location[0] is None:
continue
# skip if unused
if 'disused:leisure' in elem.tags().keys():
continue
# skip if part of another building
if 'building:part' in elem.tags().keys() and elem.tag('building:part') == 'yes':
continue
osm_type = elem.type() # Add type: 'way' or 'relation'
osm_id = elem.id() # Add OSM id
elem_type = landmarktype # Add the landmark type as 'sightseeing,
n_tags = len(elem.tags().keys()) # Add number of tags
# remove specific tags
skip = False
for tag in elem.tags().keys():
if "pay" in tag:
n_tags -= 1 # discard payment options for tags
if "disused" in tag:
skip = True # skip disused amenities
break
if "wikipedia" in tag:
n_tags += 3 # wikipedia entries count more
if tag == "wikidata":
Q = elem.tag('wikidata')
site = Site("wikidata", "wikidata")
item = ItemPage(site, Q)
item.get()
n_languages = len(item.labels)
n_tags += n_languages/10
if elem_type != "nature":
if "leisure" in tag and elem.tag('leisure') == "park":
elem_type = "nature"
if landmarktype != SHOPPING:
if "shop" in tag:
skip = True
break
if tag == "building" and elem.tag('building') in ['retail', 'supermarket', 'parking']:
skip = True
break
if skip:
continue
score = score_function(location, n_tags)
if score != 0:
# Generate the landmark and append it to the list
landmark = Landmark(
name=name,
type=elem_type,
location=location,
osm_type=osm_type,
osm_id=osm_id,
attractiveness=score,
must_do=False,
n_tags=int(n_tags)
)
return_list.append(landmark)
self.logger.debug(f"Fetched {len(return_list)} landmarks of type {landmarktype} in {bbox}")
return return_list
def dict_to_selector_list(d: dict) -> list:
"""
Convert a dictionary of key-value pairs to a list of Overpass query strings.
Args:
d (dict): A dictionary of key-value pairs representing the selector.
Returns:
list: A list of strings representing the Overpass query selectors.
"""
return_list = []
for key, value in d.items():
if type(value) == list:
val = '|'.join(value)
return_list.append(f'{key}~"{val}"')
elif type(value) == str and len(value) == 0:
return_list.append(f'{key}')
else:
return_list.append(f'{key}={value}')
return return_list

View File

@ -0,0 +1,519 @@
import yaml, logging
import numpy as np
from scipy.optimize import linprog
from collections import defaultdict, deque
from geopy.distance import geodesic
from structs.landmark import Landmark
from .get_time_separation import get_time
import constants
class Optimizer:
logger = logging.getLogger(__name__)
detour: int = None # accepted max detour time (in minutes)
detour_factor: float # detour factor of straight line vs real distance in cities
average_walking_speed: float # average walking speed of adult
max_landmarks: int # max number of landmarks to visit
def __init__(self) :
# load parameters from file
with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f)
self.detour_factor = parameters['detour_factor']
self.average_walking_speed = parameters['average_walking_speed']
self.max_landmarks = parameters['max_landmarks']
# Prevent the use of a particular solution
def prevent_config(self, resx):
"""
Prevent the use of a particular solution by adding constraints to the optimization.
Args:
resx (list[float]): List of edge weights.
Returns:
Tuple[list[int], list[int]]: A tuple containing a new row for constraint matrix and new value for upper bound vector.
"""
for i, elem in enumerate(resx):
resx[i] = round(elem)
N = len(resx) # Number of edges
L = int(np.sqrt(N)) # Number of landmarks
nonzeroind = np.nonzero(resx)[0] # the return is a little funky so I use the [0]
nonzero_tup = np.unravel_index(nonzeroind, (L,L))
ind_a = nonzero_tup[0].tolist()
vertices_visited = ind_a
vertices_visited.remove(0)
ones = [1]*L
h = [0]*N
for i in range(L) :
if i in vertices_visited :
h[i*L:i*L+L] = ones
return h, [len(vertices_visited)-1]
# Prevents the creation of the same circle (both directions)
def prevent_circle(self, circle_vertices: list, L: int) :
"""
Prevent circular paths by by adding constraints to the optimization.
Args:
circle_vertices (list): List of vertices forming a circle.
L (int): Number of landmarks.
Returns:
Tuple[np.ndarray, list[int]]: A tuple containing a new row for constraint matrix and new value for upper bound vector.
"""
l1 = [0]*L*L
l2 = [0]*L*L
for i, node in enumerate(circle_vertices[:-1]) :
next = circle_vertices[i+1]
l1[node*L + next] = 1
l2[next*L + node] = 1
s = circle_vertices[0]
g = circle_vertices[-1]
l1[g*L + s] = 1
l2[s*L + g] = 1
return np.vstack((l1, l2)), [0, 0]
def is_connected(self, resx) :
"""
Determine the order of visits and detect any circular paths in the given configuration.
Args:
resx (list): List of edge weights.
Returns:
Tuple[list[int], Optional[list[list[int]]]]: A tuple containing the visit order and a list of any detected circles.
"""
# first round the results to have only 0-1 values
for i, elem in enumerate(resx):
resx[i] = round(elem)
N = len(resx) # length of res
L = int(np.sqrt(N)) # number of landmarks. CAST INTO INT but should not be a problem because N = L**2 by def.
nonzeroind = np.nonzero(resx)[0] # the return is a little funny so I use the [0]
nonzero_tup = np.unravel_index(nonzeroind, (L,L))
ind_a = nonzero_tup[0].tolist()
ind_b = nonzero_tup[1].tolist()
# Step 1: Create a graph representation
graph = defaultdict(list)
for a, b in zip(ind_a, ind_b):
graph[a].append(b)
# Step 2: Function to perform BFS/DFS to extract journeys
def get_journey(start):
journey_nodes = []
visited = set()
stack = deque([start])
while stack:
node = stack.pop()
if node not in visited:
visited.add(node)
journey_nodes.append(node)
for neighbor in graph[node]:
if neighbor not in visited:
stack.append(neighbor)
return journey_nodes
# Step 3: Extract all journeys
all_journeys_nodes = []
visited_nodes = set()
for node in ind_a:
if node not in visited_nodes:
journey_nodes = get_journey(node)
all_journeys_nodes.append(journey_nodes)
visited_nodes.update(journey_nodes)
for l in all_journeys_nodes :
if 0 in l :
order = l
all_journeys_nodes.remove(l)
break
if len(all_journeys_nodes) == 0 :
return order, None
return order, all_journeys_nodes
def init_ub_dist(self, landmarks: list[Landmark], max_steps: int):
"""
Initialize the objective function coefficients and inequality constraints for the optimization problem.
This function computes the distances between all landmarks and stores their attractiveness to maximize sightseeing.
The goal is to maximize the objective function subject to the constraints A*x < b and A_eq*x = b_eq.
Args:
landmarks (list[Landmark]): List of landmarks.
max_steps (int): Maximum number of steps allowed.
Returns:
Tuple[list[float], list[float], list[int]]: Objective function coefficients, inequality constraint coefficients, and the right-hand side of the inequality constraint.
"""
# Objective function coefficients. a*x1 + b*x2 + c*x3 + ...
c = []
# Coefficients of inequality constraints (left-hand side)
A_ub = []
for spot1 in landmarks :
dist_table = [0]*len(landmarks)
c.append(-spot1.attractiveness)
for j, spot2 in enumerate(landmarks) :
t = get_time(spot1.location, spot2.location)
dist_table[j] = t
closest = sorted(dist_table)[:22]
for i, dist in enumerate(dist_table) :
if dist not in closest :
dist_table[i] = 32700
A_ub += dist_table
c = c*len(landmarks)
return c, A_ub, [max_steps]
def respect_number(self, L: int):
"""
Generate constraints to ensure each landmark is visited only once and cap the total number of visited landmarks.
Args:
L (int): Number of landmarks.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
ones = [1]*L
zeros = [0]*L
A = ones + zeros*(L-1)
b = [1]
for i in range(L-1) :
h_new = zeros*i + ones + zeros*(L-1-i)
A = np.vstack((A, h_new))
b.append(1)
A = np.vstack((A, ones*L))
b.append(self.max_landmarks+1)
return A, b
# Constraint to not have d14 and d41 simultaneously. Does not prevent cyclic paths with more elements
def break_sym(self, L):
"""
Generate constraints to prevent simultaneous travel between two landmarks in both directions.
Args:
L (int): Number of landmarks.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
upper_ind = np.triu_indices(L,0,L)
up_ind_x = upper_ind[0]
up_ind_y = upper_ind[1]
A = [0]*L*L
b = [1]
for i, _ in enumerate(up_ind_x[1:]) :
l = [0]*L*L
if up_ind_x[i] != up_ind_y[i] :
l[up_ind_x[i]*L + up_ind_y[i]] = 1
l[up_ind_y[i]*L + up_ind_x[i]] = 1
A = np.vstack((A,l))
b.append(1)
return A, b
def init_eq_not_stay(self, L: int):
"""
Generate constraints to prevent staying in the same position (e.g., removing d11, d22, d33, etc.).
Args:
L (int): Number of landmarks.
Returns:
Tuple[list[np.ndarray], list[int]]: Equality constraint coefficients and the right-hand side of the equality constraints.
"""
l = [0]*L*L
for i in range(L) :
for j in range(L) :
if j == i :
l[j + i*L] = 1
l = np.array(np.array(l), dtype=np.int8)
return [l], [0]
def respect_user_must_do(self, landmarks: list[Landmark]) :
"""
Generate constraints to ensure that landmarks marked as 'must_do' are included in the optimization.
Args:
landmarks (list[Landmark]): List of landmarks, where some are marked as 'must_do'.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
L = len(landmarks)
A = [0]*L*L
b = [0]
for i, elem in enumerate(landmarks[1:]) :
if elem.must_do is True and elem.name not in ['finish', 'start']:
l = [0]*L*L
l[i*L:i*L+L] = [1]*L # set mandatory departures from landmarks tagged as 'must_do'
A = np.vstack((A,l))
b.append(1)
return A, b
def respect_user_must_avoid(self, landmarks: list[Landmark]) :
"""
Generate constraints to ensure that landmarks marked as 'must_avoid' are skipped in the optimization.
Args:
landmarks (list[Landmark]): List of landmarks, where some are marked as 'must_avoid'.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
L = len(landmarks)
A = [0]*L*L
b = [0]
for i, elem in enumerate(landmarks[1:]) :
if elem.must_avoid is True and elem.name not in ['finish', 'start']:
l = [0]*L*L
l[i*L:i*L+L] = [1]*L
A = np.vstack((A,l))
b.append(0) # prevent departures from landmarks tagged as 'must_do'
return A, b
# Constraint to ensure start at start and finish at goal
def respect_start_finish(self, L: int):
"""
Generate constraints to ensure that the optimization starts at the designated start landmark and finishes at the goal landmark.
Args:
L (int): Number of landmarks.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
l_start = [1]*L + [0]*L*(L-1) # sets departures only for start (horizontal ones)
l_start[L-1] = 0 # prevents the jump from start to finish
l_goal = [0]*L*L # sets arrivals only for finish (vertical ones)
l_L = [0]*L*(L-1) + [1]*L # prevents arrivals at start and departures from goal
for k in range(L-1) : # sets only vertical ones for goal (go to)
l_L[k*L] = 1
if k != 0 :
l_goal[k*L+L-1] = 1
A = np.vstack((l_start, l_goal))
b = [1, 1]
A = np.vstack((A,l_L))
b.append(0)
return A, b
def respect_order(self, L: int):
"""
Generate constraints to tie the optimization problem together and prevent stacked ones, although this does not fully prevent circles.
Args:
L (int): Number of landmarks.
Returns:
Tuple[np.ndarray, list[int]]: Inequality constraint coefficients and the right-hand side of the inequality constraints.
"""
A = [0]*L*L
b = [0]
for i in range(L-1) : # Prevent stacked ones
if i == 0 or i == L-1: # Don't touch start or finish
continue
else :
l = [0]*L
l[i] = -1
l = l*L
for j in range(L) :
l[i*L + j] = 1
A = np.vstack((A,l))
b.append(0)
return A, b
def link_list(self, order: list[int], landmarks: list[Landmark])->list[Landmark] :
"""
Compute the time to reach from each landmark to the next and create a list of landmarks with updated travel times.
Args:
order (list[int]): List of indices representing the order of landmarks to visit.
landmarks (list[Landmark]): List of all landmarks.
Returns:
list[Landmark]]: The updated linked list of landmarks with travel times
"""
L = []
j = 0
while j < len(order)-1 :
# get landmarks involved
elem = landmarks[order[j]]
next = landmarks[order[j+1]]
# get attributes
elem.time_to_reach_next = get_time(elem.location, next.location)
elem.must_do = True
elem.location = (round(elem.location[0], 5), round(elem.location[1], 5))
elem.next_uuid = next.uuid
L.append(elem)
j += 1
next.location = (round(next.location[0], 5), round(next.location[1], 5))
next.must_do = True
L.append(next)
return L
# Main optimization pipeline
def solve_optimization(
self,
max_time: int,
landmarks: list[Landmark],
) -> list[Landmark]:
"""
Main optimization pipeline to solve the landmark visiting problem.
This method sets up and solves a linear programming problem with constraints to find an optimal tour of landmarks,
considering user-defined must-visit landmarks, start and finish points, and ensuring no cycles are present.
Args:
max_time (int): Maximum time allowed for the tour in minutes.
landmarks (list[Landmark]): List of landmarks to visit.
Returns:
list[Landmark]: The optimized tour of landmarks with updated travel times, or None if no valid solution is found.
"""
L = len(landmarks)
# SET CONSTRAINTS FOR INEQUALITY
c, A_ub, b_ub = self.init_ub_dist(landmarks, max_time) # Add the distances from each landmark to the other
A, b = self.respect_number(L) # Respect max number of visits (no more possible stops than landmarks).
A_ub = np.vstack((A_ub, A), dtype=np.int16)
b_ub += b
A, b = self.break_sym(L) # break the 'zig-zag' symmetry
A_ub = np.vstack((A_ub, A), dtype=np.int16)
b_ub += b
# SET CONSTRAINTS FOR EQUALITY
A_eq, b_eq = self.init_eq_not_stay(L) # Force solution not to stay in same place
A, b = self.respect_user_must_do(landmarks) # Check if there are user_defined must_see. Also takes care of start/goal
A_eq = np.vstack((A_eq, A), dtype=np.int8)
b_eq += b
A, b = self.respect_user_must_avoid(landmarks) # Check if there are user_defined must_see. Also takes care of start/goal
A_eq = np.vstack((A_eq, A), dtype=np.int8)
b_eq += b
A, b = self.respect_start_finish(L) # Force start and finish positions
A_eq = np.vstack((A_eq, A), dtype=np.int8)
b_eq += b
A, b = self.respect_order(L) # Respect order of visit (only works when max_steps is limiting factor)
A_eq = np.vstack((A_eq, A), dtype=np.int8)
b_eq += b
# SET BOUNDS FOR DECISION VARIABLE (x can only be 0 or 1)
x_bounds = [(0, 1)]*L*L
# Solve linear programming problem
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3)
# Raise error if no solution is found
if not res.success :
raise ArithmeticError("No solution could be found, the problem is overconstrained. Please adapt your must_dos")
# If there is a solution, we're good to go, just check for connectiveness
order, circles = self.is_connected(res.x)
#nodes, edges = is_connected(res.x)
i = 0
timeout = 80
while circles is not None and i < timeout:
A, b = self.prevent_config(res.x)
A_ub = np.vstack((A_ub, A))
b_ub += b
#A_ub, b_ub = prevent_circle(order, len(landmarks), A_ub, b_ub)
for circle in circles :
A, b = self.prevent_circle(circle, L)
A_eq = np.vstack((A_eq, A))
b_eq += b
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3)
if not res.success :
raise ArithmeticError("Solving failed because of overconstrained problem")
return None
order, circles = self.is_connected(res.x)
#nodes, edges = is_connected(res.x)
if circles is None :
break
# print(i)
i += 1
if i == timeout :
raise TimeoutError(f"Optimization took too long. No solution found after {timeout} iterations.")
#sort the landmarks in the order of the solution
tour = [landmarks[i] for i in order]
self.logger.debug(f"Re-optimized {i} times, score: {int(-res.fun)}")
return tour

View File

@ -1,44 +1,35 @@
"""Allows to refine the tour by adding more landmarks and making the path easier to follow.""" import yaml, logging
import logging
from math import pi
import yaml
from shapely import buffer, LineString, Point, Polygon, MultiPoint, concave_hull
from ..structs.landmark import Landmark from shapely import buffer, LineString, Point, Polygon, MultiPoint, concave_hull
from ..utils.get_time_distance import get_time from math import pi
from ..utils.take_most_important import take_most_important
from structs.landmark import Landmark
from . import take_most_important, get_time_separation
from .optimizer import Optimizer from .optimizer import Optimizer
from ..constants import OPTIMIZER_PARAMETERS_PATH import constants
class Refiner : class Refiner :
"""
Refines a tour by incorporating smaller landmarks along the path to enhance the experience.
This class is designed to adjust an existing tour by considering additional,
smaller points of interest (landmarks) that may require minor detours but
improve the overall quality of the tour. It balances the efficiency of travel
with the added value of visiting these landmarks.
"""
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
detour_factor: float # detour factor of straight line vs real distance in cities detour_factor: float # detour factor of straight line vs real distance in cities
detour_corridor_width: float # width of the corridor around the path detour_corridor_width: float # width of the corridor around the path
average_walking_speed: float # average walking speed of adult average_walking_speed: float # average walking speed of adult
max_landmarks_refiner: int # max number of landmarks to visit max_landmarks: int # max number of landmarks to visit
optimizer: Optimizer # optimizer object optimizer: Optimizer # optimizer object
def __init__(self, optimizer: Optimizer) : def __init__(self, optimizer: Optimizer) :
self.optimizer = optimizer self.optimizer = optimizer
# load parameters from file # load parameters from file
with OPTIMIZER_PARAMETERS_PATH.open('r') as f: with constants.OPTIMIZER_PARAMETERS_PATH.open('r') as f:
parameters = yaml.safe_load(f) parameters = yaml.safe_load(f)
self.detour_factor = parameters['detour_factor'] self.detour_factor = parameters['detour_factor']
self.detour_corridor_width = parameters['detour_corridor_width'] self.detour_corridor_width = parameters['detour_corridor_width']
self.average_walking_speed = parameters['average_walking_speed'] self.average_walking_speed = parameters['average_walking_speed']
self.max_landmarks_refiner = parameters['max_landmarks_refiner'] self.max_landmarks = parameters['max_landmarks'] + 4
def create_corridor(self, landmarks: list[Landmark], width: float) : def create_corridor(self, landmarks: list[Landmark], width: float) :
@ -46,11 +37,11 @@ class Refiner :
Create a corridor around the path connecting the landmarks. Create a corridor around the path connecting the landmarks.
Args: Args:
landmarks (list[Landmark]) : the landmark path around which to create the corridor landmarks (list[Landmark]): the landmark path around which to create the corridor
width (float) : width of the corridor in meters. width (float): Width of the corridor in meters.
Returns: Returns:
Geometry: a buffered geometry object representing the corridor around the path. Geometry: A buffered geometry object representing the corridor around the path.
""" """
corrected_width = (180*width)/(6371000*pi) corrected_width = (180*width)/(6371000*pi)
@ -143,21 +134,6 @@ class Refiner :
return tour return tour
def integrate_landmarks(self, sub_list: list[Landmark], main_list: list[Landmark]) :
"""
Inserts 'sub_list' of Landmarks inside the 'main_list' by leaving the ends untouched.
Args:
sub_list : the list of Landmarks to be inserted inside of the 'main_list'.
main_list : the original list with start and finish.
Returns:
the full list.
"""
sub_list.append(main_list[-1]) # add finish back
return main_list[:-1] + sub_list # create full set of possible landmarks
def find_shortest_path_through_all_landmarks(self, landmarks: list[Landmark]) -> tuple[list[Landmark], Polygon]: def find_shortest_path_through_all_landmarks(self, landmarks: list[Landmark]) -> tuple[list[Landmark], Polygon]:
""" """
@ -195,7 +171,7 @@ class Refiner :
# Step 4: Use nearest neighbor heuristic to visit all landmarks # Step 4: Use nearest neighbor heuristic to visit all landmarks
while unvisited_landmarks: while unvisited_landmarks:
nearest_landmark = min(unvisited_landmarks, key=lambda lm: get_time(current_landmark.location, lm.location)) nearest_landmark = min(unvisited_landmarks, key=lambda lm: get_time_separation.get_time(current_landmark.location, lm.location))
path.append(nearest_landmark) path.append(nearest_landmark)
coordinates.append(nearest_landmark.location) coordinates.append(nearest_landmark.location)
current_landmark = nearest_landmark current_landmark = nearest_landmark
@ -238,7 +214,7 @@ class Refiner :
if self.is_in_area(area, landmark.location) and landmark.name not in visited_names: if self.is_in_area(area, landmark.location) and landmark.name not in visited_names:
second_order_landmarks.append(landmark) second_order_landmarks.append(landmark)
return take_most_important(second_order_landmarks, int(self.max_landmarks_refiner*0.75)) return take_most_important.take_most_important(second_order_landmarks, len(visited_landmarks))
# Try fix the shortest path using shapely # Try fix the shortest path using shapely
@ -274,14 +250,9 @@ class Refiner :
better_tour_poly = concave_hull(MultiPoint(coords)) # Create concave hull with "core" of tour leaving out start and finish better_tour_poly = concave_hull(MultiPoint(coords)) # Create concave hull with "core" of tour leaving out start and finish
xs, ys = better_tour_poly.exterior.xy xs, ys = better_tour_poly.exterior.xy
except Exception: except :
better_tour_poly = concave_hull(MultiPoint(coords)) # Create concave hull with "core" of tour leaving out start and finish better_tour_poly = concave_hull(MultiPoint(coords)) # Create concave hull with "core" of tour leaving out start and finish
xs, ys = better_tour_poly.exterior.xy xs, ys = better_tour_poly.exterior.xy
"""
FIXED : ERROR HERE :
Exception has occurred: AttributeError
'LineString' object has no attribute 'exterior'
"""
# reverse the xs and ys # reverse the xs and ys
@ -337,39 +308,33 @@ class Refiner :
""" """
# No need to refine if no detour is taken # No need to refine if no detour is taken
# if detour == 0: if detour == 0:
# return base_tour return base_tour
minor_landmarks = self.get_minor_landmarks(all_landmarks, base_tour, self.detour_corridor_width) minor_landmarks = self.get_minor_landmarks(all_landmarks, base_tour, self.detour_corridor_width)
self.logger.debug(f"Using {len(minor_landmarks)} minor landmarks around the predicted path") self.logger.info(f"Using {len(minor_landmarks)} minor landmarks around the predicted path")
# Full set of visitable landmarks. # full set of visitable landmarks
full_set = self.integrate_landmarks(minor_landmarks, base_tour) # could probably be optimized with less overhead full_set = base_tour[:-1] + minor_landmarks # create full set of possible landmarks (without finish)
full_set.append(base_tour[-1]) # add finish back
# Generate a new tour with the optimizer. # get a new tour
new_tour = self.optimizer.solve_optimization( new_tour = self.optimizer.solve_optimization(
max_time = max_time + detour, max_time = max_time + detour,
landmarks = full_set, landmarks = full_set
max_landmarks = self.max_landmarks_refiner
) )
# If unsuccessful optimization, use the base_tour.
if new_tour is None: if new_tour is None:
self.logger.warning("Refiner failed: No solution found during second stage optimization.") self.logger.warning("No solution found for the refined tour. Returning the initial tour.")
new_tour = base_tour new_tour = base_tour
# If only one landmark, return it.
if len(new_tour) < 4 :
return new_tour
# Find shortest path using the nearest neighbor heuristic. # Find shortest path using the nearest neighbor heuristic
better_tour, better_poly = self.find_shortest_path_through_all_landmarks(new_tour) better_tour, better_poly = self.find_shortest_path_through_all_landmarks(new_tour)
# Fix the tour using Polygons if the path looks weird. # Fix the tour using Polygons if the path looks weird
# Conditions : circular trip and invalid polygon.
if base_tour[0].location == base_tour[-1].location and not better_poly.is_valid : if base_tour[0].location == base_tour[-1].location and not better_poly.is_valid :
self.logger.debug("Tours might be funky, attempting to correct with polygons")
better_tour = self.fix_using_polygon(better_tour) better_tour = self.fix_using_polygon(better_tour)
return better_tour return better_tour

View File

@ -1,17 +1,38 @@
"""Helper function to return only the major landmarks from a large list.""" from structs.landmark import Landmark
from ..structs.landmark import Landmark
def take_most_important(landmarks: list[Landmark], n_important) -> list[Landmark]: def take_most_important(landmarks: list[Landmark], N_important) -> list[Landmark] :
""" L = len(landmarks)
Given a list of landmarks, return the n_important most important landmarks L_copy = []
Args: L_clean = []
landmarks: list[Landmark] - list of landmarks scores = [0]*len(landmarks)
n_important: int - number of most important landmarks to return names = []
Returns: name_id = {}
list[Landmark] - list of the n_important most important landmarks
"""
# Sort landmarks by attractiveness (descending) for i, elem in enumerate(landmarks) :
sorted_landmarks = sorted(landmarks, key=lambda x: x.attractiveness, reverse=True) if elem.name not in names :
names.append(elem.name)
name_id[elem.name] = [i]
L_copy.append(elem)
else :
name_id[elem.name] += [i]
scores = []
for j in name_id[elem.name] :
scores.append(L[j].attractiveness)
best_id = max(range(len(scores)), key=scores.__getitem__)
t = name_id[elem.name][best_id]
if t == i :
for old in L_copy :
if old.name == elem.name :
old.attractiveness = L[t].attractiveness
return sorted_landmarks[:n_important] scores = [0]*len(L_copy)
for i, elem in enumerate(L_copy) :
scores[i] = elem.attractiveness
res = sorted(range(len(scores)), key = lambda sub: scores[sub])[-(N_important-L):]
for i, elem in enumerate(L_copy) :
if i in res :
L_clean.append(elem)
return L_clean

View File

@ -1,59 +0,0 @@
on:
push:
tags:
- 'v*'
jobs:
build:
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
- name: Set up ruby env
uses: ruby/setup-ruby@v1
with:
ruby-version: 3.2.1
bundler-cache: true
- name: Setup java for android build
uses: actions/setup-java@v4
with:
java-version: '17'
distribution: 'zulu'
- name: Setup android SDK
uses: android-actions/setup-android@v3
- name: Install Flutter
uses: subosito/flutter-action@v2
with:
channel: stable
flutter-version: 3.22.0
cache: true
- name: Infer version number from git tag
id: version
env:
REF_NAME: ${{ github.ref_name }}
run:
# remove the 'v' prefix from the tag name
echo "BUILD_NAME=${REF_NAME//v}" >> $GITHUB_ENV
- name: Put selected secrets into files
run: |
echo "${{ secrets.ANDROID_SECRET_PROPERTIES_BASE64 }}" | base64 -d > secrets.properties
echo "${{ secrets.ANDROID_GOOGLE_PLAY_JSON_BASE64 }}" | base64 -d > google-key.json
echo "${{ secrets.ANDROID_KEYSTORE_BASE64 }}" | base64 -d > release.keystore
working-directory: android
- name: Install fastlane
run: bundle install
working-directory: android
- name: Run fastlane lane
run: bundle exec fastlane deploy_release
working-directory: android
env:
BUILD_NUMBER: ${{ github.run_number }}
# BUILD_NAME is implicitly available
GOOGLE_MAPS_API_KEY: ${{ secrets.GOOGLE_MAPS_API_KEY }}

View File

@ -1,64 +0,0 @@
on:
push:
tags:
- 'v*'
jobs:
build:
runs-on: macos-latest
env:
# $BUNDLE_GEMFILE must be set at the job level, so it is set for all steps
BUNDLE_GEMFILE: ${{ github.workspace }}/ios/Gemfile
steps:
- uses: actions/checkout@v4
- name: Set up ruby env
uses: ruby/setup-ruby@v1
with:
ruby-version: 3.3
bundler-cache: true # runs 'bundle install' and caches installed gems automatically
- name: Install Flutter
uses: subosito/flutter-action@v2
with:
channel: stable
flutter-version: 3.22.0
cache: true
- name: Infer version number from git tag
id: version
env:
REF_NAME: ${{ github.ref_name }}
run:
# remove the 'v' prefix from the tag name
echo "BUILD_NAME=${REF_NAME//v}" >> $GITHUB_ENV
- name: Setup SSH key for match git repo
# and mark the host as known
run: |
echo $MATCH_REPO_SSH_KEY | base64 --decode > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan -p 2222 git.kluster.moll.re > ~/.ssh/known_hosts
env:
MATCH_REPO_SSH_KEY: ${{ secrets.IOS_MATCH_REPO_SSH_KEY_BASE64 }}
- name: Install dependencies and clean up
run: |
flutter pub get
bundle exec pod install
flutter clean
bundle exec pod cache clean --all
working-directory: ios
- name: Run fastlane lane
run: bundle exec fastlane deploy_release --verbose
working-directory: ios
env:
BUILD_NUMBER: ${{ github.run_number }}
# BUILD_NAME is implicitly available
GOOGLE_MAPS_API_KEY: ${{ secrets.GOOGLE_MAPS_API_KEY }}
IOS_ASC_KEY_ID: ${{ secrets.IOS_ASC_KEY_ID }}
IOS_ASC_ISSUER_ID: ${{ secrets.IOS_ASC_ISSUER_ID }}
IOS_ASC_KEY: ${{ secrets.IOS_ASC_KEY }}
MATCH_PASSWORD: ${{ secrets.IOS_MATCH_PASSWORD }}
IOS_GOOGLE_MAPS_API_KEY: ${{ secrets.IOS_GOOGLE_MAPS_API_KEY }}

View File

@ -4,7 +4,7 @@
# This file should be version controlled and should not be manually edited. # This file should be version controlled and should not be manually edited.
version: version:
revision: "09de023485e95e6d1225c2baa44b8feb85e0d45f" revision: "54e66469a933b60ddf175f858f82eaeb97e48c8d"
channel: "stable" channel: "stable"
project_type: app project_type: app
@ -13,11 +13,26 @@ project_type: app
migration: migration:
platforms: platforms:
- platform: root - platform: root
create_revision: 09de023485e95e6d1225c2baa44b8feb85e0d45f create_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
base_revision: 09de023485e95e6d1225c2baa44b8feb85e0d45f base_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
- platform: android
create_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
base_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
- platform: ios
create_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
base_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
- platform: linux - platform: linux
create_revision: 09de023485e95e6d1225c2baa44b8feb85e0d45f create_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
base_revision: 09de023485e95e6d1225c2baa44b8feb85e0d45f base_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
- platform: macos
create_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
base_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
- platform: web
create_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
base_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
- platform: windows
create_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
base_revision: 54e66469a933b60ddf175f858f82eaeb97e48c8d
# User provided section # User provided section

View File

@ -1,6 +1,6 @@
# Frontend # Frontend
The frontend of this project is a Flutter application designed to run on both Android and iOS devices (and possibly as a PWA). The frontend is responsible for displaying the user interface and handling user input. It communicates with the backend via a REST-api to retrieve and send data. This is the frontend of the project. It is a Flutter application that is designed to run on both Android and iOS devices. The frontend is responsible for displaying the user interface and handling user input. It communicates with the backend to retrieve and send data.
## Getting Started ## Getting Started
@ -15,47 +15,3 @@ Once you have the Flutter SDK installed, you can locally install the dependencie
```bash ```bash
flutter pub get flutter pub get
``` ```
## Development
### ...
### Icons and logos
The application uses a custom launcher icon and splash screen. These are managed platform-independently using the `flutter_launcher_icons` package.
To update the icons, change the `flutter_launcher_icons.yaml` configuration file. Especially the `image_path` is relevant. Then run
```bash
dart run flutter_launcher_icons
```
### Deploying a new version
To truly deploy a new version of the application, i.e. to the official app stores, a special CI step is required. This listens for new tags. To create a new tag position yourself on the main branch and run
```bash
git tag -a v<name> -m "Release <name>"
git push origin v<name>
```
We adhere to the [Semantic Versioning](https://semver.org/) standard, so the tag should be of the form `v0.1.8` for example.
## Fastlane - in depth
The application is deployed to the Google Play Store and the Apple App Store using fastlane: [https://docs.fastlane.tools/](https://docs.fastlane.tools/)
Fastlane is installed as a Ruby gem. Since the bundler-gemfile is scoped to a single directory, a `Gemfile` is included in both the `android` and `ios` directories. Once installed, the usage is
```bash
cd frontend/android # or ios
bundle install
bundle exec fastlane <lane>
```
This is reused in the CI/CD pipeline to automate the deployment process.
Secrets used by fastlane are stored on hashicorp vault and are fetched by the CI/CD pipeline. See below.
## Secrets
These are mostly used by the CI/CD pipeline to deploy the application. The main usage for github actions is documented under [https://github.com/hashicorp/vault-action](https://github.com/hashicorp/vault-action).
**Platform-specific secrets** are used by the CI/CD pipeline to deploy to the respective app stores.
- `GOOGLE_MAPS_API_KEY` is used to authenticate with the Google Maps API and is scoped to the android platform
- `ANDROID_KEYSTORE` is used to sign the android apk
- `ANDROID_GOOGLE_KEY` is used to authenticate with the Google Play Store api
- `IOS_GOOGLE_MAPS_API_KEY` is used to authenticate with the Google Maps API and is scoped to the ios platform
- `IOS_GOOGLE_...`
- `IOS_GOOGLE_...`
- `IOS_GOOGLE_...`

View File

@ -1,8 +1,8 @@
gradlew gradle-wrapper.jar
gradlew.bat
gradle/
/.gradle /.gradle
/captures/ /captures/
/gradlew
/gradlew.bat
/local.properties /local.properties
/secrets.properties /secrets.properties
GeneratedPluginRegistrant.java GeneratedPluginRegistrant.java
@ -12,6 +12,3 @@ GeneratedPluginRegistrant.java
key.properties key.properties
**/*.keystore **/*.keystore
**/*.jks **/*.jks
# Fastlane google cloud access
google-key.json

View File

@ -1,3 +0,0 @@
source "https://rubygems.org"
gem "fastlane"

View File

@ -2,12 +2,13 @@
### Keystore setup ### Keystore setup
```bash ```bash
keytool -genkey -v -keystore release.keystore -keyalg RSA -keysize 2048 -validity 10000 -alias upload keytool -genkey -v -keystore release.keystore -keyalg RSA -keysize 2048 -validity 10000 -alias release
``` ```
- This is required to store local credentials securely and more importantly to sign the app for google play store distribution. - This is required to store local credentials securely (not used for now).
- But necesseary in order to restrict the particular api key to a particular app (through the sha1 of the associated keystore).
### Using secret credentials during build ### Building and secret credentials
Following the guide under [https://developers.google.com/maps/flutter-package/config#android_1](https://developers.google.com/maps/flutter-package/config#android_1). Following the guide under [https://developers.google.com/maps/flutter-package/config#android_1](https://developers.google.com/maps/flutter-package/config#android_1).
- Add the following to `android/build.gradle`: - Add the following to `android/build.gradle`:
```gradle ```gradle
@ -35,31 +36,13 @@ Following the guide under [https://developers.google.com/maps/flutter-package/co
android:value="${MAPS_API_KEY}" /> android:value="${MAPS_API_KEY}" />
``` ```
### Signing the app
Compared to the flutter template application, a few changes have to be made:
- Added to `android/app/build.gradle`:
```gradle
signingConfigs {
release {
keyAlias = secretProperties['keyAlias']
keyPassword = secretProperties['keyPassword']
storeFile = secretProperties['storeFile'] ? file(secretProperties['storeFile']) : null
storePassword = secretProperties['storePassword']
}
}
```
- Changed the `buildTypes` to use the `release` signing config:
```gradle
buildTypes {
release {
signingConfig signingConfigs.release
}
}
```
This makes use of the `secretProperties` defined previously:
```gradle
secretPropertiesFile.withReader('UTF-8') { reader ->
secretProperties.load(reader)
}
```
### Using the credentials in CI
- Add the base64 encoded credentials to the repository secrets (e.g. `ANDROID_SECRETS`).
```bash
base64 -i android/secrets.properties
```
- Use the following in the CI script:
```bash
echo {{ secrets.ANDROID_SECRETS }} | base64 -d > android/secrets.properties
```

View File

@ -30,24 +30,19 @@ if (flutterVersionName == null) {
def secretPropertiesFile = rootProject.file('secrets.properties') def secretPropertiesFile = rootProject.file('secrets.properties')
def fallbackPropertiesFile = rootProject.file('fallback.properties')
def secretProperties = new Properties() def secretProperties = new Properties()
if (secretPropertiesFile.exists()) { if (secretPropertiesFile.exists()) {
secretPropertiesFile.withReader('UTF-8') { reader -> secretPropertiesFile.withReader('UTF-8') { reader ->
secretProperties.load(reader) secretProperties.load(reader)
} }
} else if (fallbackPropertiesFile.exists()) {
fallbackPropertiesFile.withReader('UTF-8') { reader ->
secretProperties.load(reader)
}
} else { } else {
throw new GradleException("Secrets file (secrets.properties, fallback.properties) not found") throw new GradleException("Secrets file secrets.properties not found")
} }
android { android {
namespace "com.anydev.anyway" namespace "com.example.fast_network_navigation"
compileSdk flutter.compileSdkVersion compileSdk flutter.compileSdkVersion
ndkVersion flutter.ndkVersion ndkVersion flutter.ndkVersion
@ -65,8 +60,8 @@ android {
} }
defaultConfig { defaultConfig {
// TODO: Specify your own unique Application ID (https://developer.android.com/studio/build/application-id.html).
applicationId "com.anydev.anyway" applicationId "com.example.fast_network_navigation"
// You can update the following values to match your application needs. // You can update the following values to match your application needs.
// For more information, see: https://docs.flutter.dev/deployment/android#reviewing-the-gradle-build-configuration. // For more information, see: https://docs.flutter.dev/deployment/android#reviewing-the-gradle-build-configuration.
// Minimum Android version for Google Maps SDK // Minimum Android version for Google Maps SDK
@ -77,22 +72,15 @@ android {
versionCode flutterVersionCode.toInteger() versionCode flutterVersionCode.toInteger()
versionName flutterVersionName versionName flutterVersionName
// // Placeholders of keys that are replaced by the build system. // // Placeholders of keys that are replaced by the build system.
manifestPlaceholders += ['MAPS_API_KEY': System.getenv('ANDROID_GOOGLE_MAPS_API_KEY')] manifestPlaceholders += ['MAPS_API_KEY': secretProperties.getProperty('MAPS_API_KEY')]
} }
signingConfigs {
release {
keyAlias = secretProperties['keyAlias']
keyPassword = secretProperties['keyPassword']
storeFile = secretProperties['storeFile'] ? file(secretProperties['storeFile']) : null
storePassword = secretProperties['storePassword']
}
}
buildTypes { buildTypes {
release { release {
signingConfig = signingConfigs.release // TODO: Add your own signing config for the release build.
// Signing with the debug keys for now, so `flutter run --release` works.
signingConfig signingConfigs.debug
} }
} }
} }

View File

@ -1,12 +1,9 @@
<manifest xmlns:android="http://schemas.android.com/apk/res/android"> <manifest xmlns:android="http://schemas.android.com/apk/res/android">
<!-- Required to fetch data from the internet. --> <!-- Required to fetch data from the internet. -->
<uses-permission android:name="android.permission.INTERNET"/> <uses-permission android:name="android.permission.INTERNET"/>
<!-- Required to show user location -->
<uses-permission android:name="android.permission.ACCESS_FINE_LOCATION"/>
<uses-permission android:name="android.permission.ACCESS_COARSE_LOCATION" />
<application <application
android:label="anyway" android:label="fast_network_navigation"
android:name="${applicationName}" android:name="${applicationName}"
android:icon="@mipmap/ic_launcher"> android:icon="@mipmap/ic_launcher">
<activity <activity

View File

@ -1,4 +1,4 @@
package com.anydev.anyway package com.example.fast_network_navigation
import io.flutter.embedding.android.FlutterActivity import io.flutter.embedding.android.FlutterActivity

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

After

Width:  |  Height:  |  Size: 544 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.3 KiB

After

Width:  |  Height:  |  Size: 442 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.2 KiB

After

Width:  |  Height:  |  Size: 721 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.4 KiB

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 1.4 KiB

View File

@ -1,2 +1 @@
# This file mirrors the state of secrets.properties as a reference for the developer. MAPS_API_KEY=Key
# And as a fallback for build.gradle

View File

@ -1,2 +0,0 @@
json_key_file("google-key.json") # Path to the json secret file - Follow https://docs.fastlane.tools/actions/supply/#setup to get one
package_name("com.anydev.anyway") # e.g. com.krausefx.app

View File

@ -1,53 +0,0 @@
default_platform(:android)
platform :android do
desc "Deploy a new version to closed testing (play store)"
lane :deploy_beta do
build_name = ENV["BUILD_NAME"]
build_number = ENV["BUILD_NUMBER"]
sh(
"flutter",
"build",
"appbundle",
"--release",
"--build-name=#{build_name}",
"--build-number=#{build_number}",
)
upload_to_play_store(
track: 'alpha',
# upload aab files intstead
skip_upload_apk: true,
skip_upload_changelogs: true,
aab: "../build/app/outputs/bundle/release/app-release.aab",
# this is the default output of flutter build ... --release
# in particular this the build folder lies in the flutter root folder
# this is the parent folder for the android folder
)
end
desc "Deploy a new version as a full release"
lane :deploy_release do
build_name = ENV["BUILD_NAME"]
build_number = ENV["BUILD_NUMBER"]
sh(
"flutter",
"build",
"appbundle",
"--release",
"--build-name=#{build_name}",
"--build-number=#{build_number}",
)
upload_to_play_store(
track: 'production',
skip_upload_apk: true,
skip_upload_changelogs: true,
aab: "../build/app/outputs/bundle/release/app-release.aab",
)
end
end

View File

@ -1,40 +0,0 @@
fastlane documentation
----
# Installation
Make sure you have the latest version of the Xcode command line tools installed:
```sh
xcode-select --install
```
For _fastlane_ installation instructions, see [Installing _fastlane_](https://docs.fastlane.tools/#installing-fastlane)
# Available Actions
## Android
### android deploy_testing
```sh
[bundle exec] fastlane android deploy_testing
```
Deploy a new version as a preview version
### android deploy_release
```sh
[bundle exec] fastlane android deploy_release
```
Deploy a new version as a full release
----
This README.md is auto-generated and will be re-generated every time [_fastlane_](https://fastlane.tools) is run.
More information about _fastlane_ can be found on [fastlane.tools](https://fastlane.tools).
The documentation of _fastlane_ can be found on [docs.fastlane.tools](https://docs.fastlane.tools).

View File

@ -1,7 +0,0 @@
AnyWay - plan city trips your way
AnyWay is a mobile application that helps users plan city trips. The app allows users to specify their preferences and constraints, and then generates a personalized itinerary for them. The planning follows some guiding principles:
- **Personalization**:The user's preferences should be reflected in the choice of destinations.
- **Efficiency**:The itinerary should be optimized for the user's constraints.
- **Flexibility**: We aknowledge that tourism is a dynamic activity, and that users may want to change their plans on the go.
- **Discoverability**: Tourism is an inherently exploratory activity. Once a rough itinerary is generated, detours and spontaneous decisions should be encouraged.

View File

@ -1 +0,0 @@
AnyWay - plan city trips your way!

View File

@ -0,0 +1,5 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-7.6.3-all.zip

View File

@ -19,8 +19,8 @@ pluginManagement {
plugins { plugins {
id "dev.flutter.flutter-plugin-loader" version "1.0.0" id "dev.flutter.flutter-plugin-loader" version "1.0.0"
id "com.android.application" version "8.1.0" apply false id "com.android.application" version "7.3.0" apply false
id "org.jetbrains.kotlin.android" version "2.0.20" apply false id "org.jetbrains.kotlin.android" version "1.7.10" apply false
} }
include ":app" include ":app"

View File

@ -1,2 +0,0 @@
## Vector assets
As per https://www.svgrepo.com/collection/pixellove-bordered-vectors/ these icons are licensed under CC0.

View File

@ -1,79 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 64 64" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<title>cel-snow-globe</title>
<desc>Created with Sketch.</desc>
<defs>
</defs>
<g id="General" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="SLICES-64px" transform="translate(-450.000000, 0.000000)">
</g>
<g id="ICONS" transform="translate(-445.000000, 5.000000)">
<g id="cel-snow-globe" transform="translate(450.000000, 2.000000)">
<path d="M46,44 C48.209,44 50,45.791 50,48 L50,52 L2,52 L2,48 C2,45.791 3.791,44 6,44 L46,44 Z" id="Fill-1055" fill="#EEC261">
</path>
<path d="M7.2402,44.002 C2.7562,39.33 0.0002,32.987 0.0002,26 C0.0002,11.641 11.6402,0 26.0002,0 C40.3592,0 52.0002,11.641 52.0002,26 C52.0002,32.986 49.2442,39.33 44.7602,44.001 L7.2402,44.002 Z" id="Fill-1056" fill="#B6E0F2">
</path>
<path d="M38,37 C38,33.134 34.866,30 31,30 C27.134,30 24,33.134 24,37 C24,40.866 27.134,44 31,44 C34.866,44 38,40.866 38,37" id="Fill-1057" fill="#E9EFFA">
</path>
<path d="M26,25 C26,22.238 28.239,20 31,20 C33.761,20 36,22.238 36,25 C36,27.762 33.761,30 31,30 C28.239,30 26,27.762 26,25" id="Fill-1058" fill="#E9EFFA">
</path>
<path d="M38,37 C38,33.134 34.866,30 31,30 C27.134,30 24,33.134 24,37 C24,40.866 27.134,44 31,44 C34.866,44 38,40.866 38,37 Z" id="Stroke-1059" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M26,25 C26,22.238 28.239,20 31,20 C33.761,20 36,22.238 36,25 C36,27.762 33.761,30 31,30 C28.239,30 26,27.762 26,25 Z" id="Stroke-1060" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M46,44 C48.209,44 50,45.791 50,48 L50,52 L2,52 L2,48 C2,45.791 3.791,44 6,44 L46,44 Z" id="Stroke-1061" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M7.2402,44.002 C2.7562,39.33 0.0002,32.987 0.0002,26 C0.0002,11.641 11.6402,0 26.0002,0 C40.3592,0 52.0002,11.641 52.0002,26 C52.0002,32.986 49.2442,39.33 44.7602,44.001" id="Stroke-1062" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M8,24 C8,14.059 16.059,6 26,6" id="Stroke-1063" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M20,28 L26.061,32.04" id="Stroke-1064" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M42,28 L35.939,32.04" id="Stroke-1065" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M42,25 L42,28" id="Stroke-1066" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M45,28 L42,28" id="Stroke-1067" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M20,25 L20,28" id="Stroke-1068" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M17,28 L20,28" id="Stroke-1069" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M10.02,31.0098 C10.02,31.5688 9.568,32.0208 9.01,32.0208 C8.452,32.0208 8,31.5688 8,31.0098 C8,30.4518 8.452,29.9998 9.01,29.9998 C9.568,29.9998 10.02,30.4518 10.02,31.0098 Z" id="Stroke-1070" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M22.02,15.0098 C22.02,15.5688 21.568,16.0208 21.01,16.0208 C20.452,16.0208 20,15.5688 20,15.0098 C20,14.4518 20.452,13.9998 21.01,13.9998 C21.568,13.9998 22.02,14.4518 22.02,15.0098 Z" id="Stroke-1071" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M44.02,17.0098 C44.02,17.5688 43.568,18.0208 43.01,18.0208 C42.452,18.0208 42,17.5688 42,17.0098 C42,16.4518 42.452,15.9998 43.01,15.9998 C43.568,15.9998 44.02,16.4518 44.02,17.0098 Z" id="Stroke-1072" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M18.02,37.0098 C18.02,37.5688 17.568,38.0208 17.01,38.0208 C16.452,38.0208 16,37.5688 16,37.0098 C16,36.4518 16.452,35.9998 17.01,35.9998 C17.568,35.9998 18.02,36.4518 18.02,37.0098 Z" id="Stroke-1073" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M36.02,9.0098 C36.02,9.5688 35.568,10.0208 35.01,10.0208 C34.452,10.0208 34,9.5688 34,9.0098 C34,8.4518 34.452,7.9998 35.01,7.9998 C35.568,7.9998 36.02,8.4518 36.02,9.0098 Z" id="Stroke-1074" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 5.1 KiB

View File

@ -1,64 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 64 64" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<title>cld-server</title>
<desc>Created with Sketch.</desc>
<defs>
</defs>
<g id="General" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="SLICES-64px" transform="translate(-810.000000, -200.000000)">
</g>
<g id="ICONS" transform="translate(-805.000000, -195.000000)">
<g id="cld-server" transform="translate(810.000000, 204.000000)">
<path d="M48,12 C51.313,12 54,9.313 54,6 C54,2.687 51.313,0 48,0 L6,0 C2.687,0 0,2.687 0,6 C0,9.313 2.687,12 6,12 L48,12 Z" id="Fill-424" fill="#969CE3">
</path>
<path d="M10,6 C10,7.104 9.104,8 8,8 C6.896,8 6,7.104 6,6 C6,4.896 6.896,4 8,4 C9.104,4 10,4.896 10,6" id="Fill-425" fill="#7BBDEC">
</path>
<path d="M48,30 C51.313,30 54,27.313 54,24 C54,20.687 51.313,18 48,18 L6,18 C2.687,18 0,20.687 0,24 C0,27.313 2.687,30 6,30 L48,30 Z" id="Fill-426" fill="#969CE3">
</path>
<path d="M10,24 C10,25.104 9.104,26 8,26 C6.896,26 6,25.104 6,24 C6,22.896 6.896,22 8,22 C9.104,22 10,22.896 10,24" id="Fill-427" fill="#7BBDEC">
</path>
<path d="M48,48 C51.313,48 54,45.313 54,42 C54,38.687 51.313,36 48,36 L6,36 C2.687,36 0,38.687 0,42 C0,45.313 2.687,48 6,48 L48,48 Z" id="Fill-428" fill="#969CE3">
</path>
<path d="M10,42 C10,43.104 9.104,44 8,44 C6.896,44 6,43.104 6,42 C6,40.896 6.896,40 8,40 C9.104,40 10,40.896 10,42" id="Fill-429" fill="#7BBDEC">
</path>
<path d="M48,12 C51.313,12 54,9.313 54,6 C54,2.687 51.313,0 48,0 L6,0 C2.687,0 0,2.687 0,6 C0,9.313 2.687,12 6,12 L48,12 Z" id="Stroke-430" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M10,6 C10,7.104 9.104,8 8,8 C6.896,8 6,7.104 6,6 C6,4.896 6.896,4 8,4 C9.104,4 10,4.896 10,6 Z" id="Stroke-431" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M48,6 L36,6" id="Stroke-432" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M48,30 C51.313,30 54,27.313 54,24 C54,20.687 51.313,18 48,18 L6,18 C2.687,18 0,20.687 0,24 C0,27.313 2.687,30 6,30 L48,30 Z" id="Stroke-433" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M10,24 C10,25.104 9.104,26 8,26 C6.896,26 6,25.104 6,24 C6,22.896 6.896,22 8,22 C9.104,22 10,22.896 10,24 Z" id="Stroke-434" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M48,24 L36,24" id="Stroke-435" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M48,48 C51.313,48 54,45.313 54,42 C54,38.687 51.313,36 48,36 L6,36 C2.687,36 0,38.687 0,42 C0,45.313 2.687,48 6,48 L48,48 Z" id="Stroke-436" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M10,42 C10,43.104 9.104,44 8,44 C6.896,44 6,43.104 6,42 C6,40.896 6.896,40 8,40 C9.104,40 10,40.896 10,42 Z" id="Stroke-437" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M48,42 L36,42" id="Stroke-438" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 3.7 KiB

View File

@ -1,64 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 64 64" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<title>con-drill</title>
<desc>Created with Sketch.</desc>
<defs>
</defs>
<g id="General" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="SLICES-64px" transform="translate(-450.000000, -300.000000)">
</g>
<g id="ICONS" transform="translate(-445.000000, -295.000000)">
<g id="con-drill" transform="translate(452.000000, 306.000000)">
<path d="M4,46 L20,46 C21.104,46 22,45.104 22,44 L22,36 C22,34.896 21.104,34 20,34 L13.375,34 L2,34 L2,44 C2,45.104 2.896,46 4,46" id="Fill-680" fill="#99A5B7">
</path>
<path d="M40,4 L34,4 L34,12 L40,12 C41.104,12 42,11.104 42,10 L42,6 C42,4.896 41.104,4 40,4" id="Fill-681" fill="#E9EFFA">
</path>
<path d="M30,16 C32.209,16 34,14.209 34,12 L34,4 C34,1.791 32.209,0 30,0 L4,0 C1.791,0 0,1.791 0,4 L0,12 C0,14.209 1.791,16 4,16 L30,16 Z" id="Fill-682" fill="#D3D873">
</path>
<path d="M12.71,22 L18,22 C16.354,20.354 17.87,17.918 19,16 L14,16 L12.71,22 Z" id="Fill-683" fill="#F16963">
</path>
<path d="M13.375,34 C11.926,34 10.75,32.824 10.75,31.375 C10.75,31.12 10.786,30.874 10.854,30.641 L14,16 L6,16 L2,34 L13.375,34 Z" id="Fill-684" fill="#AEC14A">
</path>
<path d="M12.71,22 L18,22 C16.354,20.354 17.87,17.918 19,16" id="Stroke-685" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M42,8 L54,8" id="Stroke-686" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M34,4 L40,4 C41.104,4 42,4.896 42,6 L42,10 C42,11.104 41.104,12 40,12 L34,12" id="Stroke-687" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M2,34 L6,16" id="Stroke-688" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M6,8 L14,8" id="Stroke-689" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M6,4 L14,4" id="Stroke-690" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M2,34 L2,44 C2,45.104 2.896,46 4,46 L20,46 C21.104,46 22,45.104 22,44 L22,36 C22,34.896 21.104,34 20,34 L13.375,34" id="Stroke-691" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M13.375,34 C11.926,34 10.75,32.824 10.75,31.375 C10.75,31.12 10.786,30.874 10.854,30.641 L14,16" id="Stroke-692" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M2,34 L14,34" id="Stroke-693" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M30,16 C32.209,16 34,14.209 34,12 L34,4 C34,1.791 32.209,0 30,0 L4,0 C1.791,0 0,1.791 0,4 L0,12 C0,14.209 1.791,16 4,16 L30,16 Z" id="Stroke-694" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 3.4 KiB

View File

@ -1,37 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 64 64" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<title>con-warning</title>
<desc>Created with Sketch.</desc>
<defs>
</defs>
<g id="General" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="SLICES-64px" transform="translate(-720.000000, -300.000000)">
</g>
<g id="ICONS" transform="translate(-715.000000, -295.000000)">
<g id="con-warning" transform="translate(718.000000, 302.000000)">
<path d="M50,46 C53.313,46 56,43.313 56,40 C56,38.751 55.358,37.299 55.358,37.299 L32.878,2.51 L32.88,2.509 C31.791,0.99 30.011,1.13686838e-13 28,1.13686838e-13 C25.989,1.13686838e-13 24.209,0.99 23.12,2.509 L23.122,2.51 L0.642,37.299 C0.642,37.299 0,38.751 0,40 C0,43.313 2.687,46 6,46 L50,46 Z" id="Fill-390" fill="#F3E777">
</path>
<path d="M26,36 C26,34.896 26.896,34 28,34 C29.104,34 30,34.896 30,36 C30,37.104 29.104,38 28,38 C26.896,38 26,37.104 26,36" id="Fill-391" fill="#F16963">
</path>
<path d="M32,16 C32,13.791 30.209,12 28,12 C25.791,12 24,13.791 24,16 L26,28 C26,29.104 26.896,30 28,30 C29.104,30 30,29.104 30,28 L32,16 Z" id="Fill-392" fill="#F16963">
</path>
<path d="M26,36 C26,34.896 26.896,34 28,34 C29.104,34 30,34.896 30,36 C30,37.104 29.104,38 28,38 C26.896,38 26,37.104 26,36 Z" id="Stroke-393" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M32,16 C32,13.791 30.209,12 28,12 C25.791,12 24,13.791 24,16 L26,28 C26,29.104 26.896,30 28,30 C29.104,30 30,29.104 30,28 L32,16 Z" id="Stroke-394" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
<path d="M50,46 C53.313,46 56,43.313 56,40 C56,38.751 55.358,37.299 55.358,37.299 L32.878,2.51 L32.88,2.509 C31.791,0.99 30.011,1.13686838e-13 28,1.13686838e-13 C25.989,1.13686838e-13 24.209,0.99 23.12,2.509 L23.122,2.51 L0.642,37.299 C0.642,37.299 0,38.751 0,40 C0,43.313 2.687,46 6,46 L50,46 Z" id="Stroke-395" stroke="#414547" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
</path>
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 2.4 KiB

Some files were not shown because too many files have changed in this diff Show More