better logs
Some checks failed
Build and deploy the backend to staging / Build and push image (pull_request) Successful in 2m20s
Run linting on the backend code / Build (pull_request) Failing after 26s
Run testing on the backend code / Build (pull_request) Failing after 2m40s
Build and deploy the backend to staging / Deploy to staging (pull_request) Successful in 17s

This commit is contained in:
Helldragon67 2025-01-09 16:58:38 +01:00
parent a0a3d76b78
commit 11bbf34375
7 changed files with 60 additions and 68 deletions

View File

@ -1,6 +1,7 @@
"""Main app for backend api"""
import logging
import time
from fastapi import FastAPI, HTTPException, Query
from contextlib import asynccontextmanager
@ -81,6 +82,7 @@ def new_trip(preferences: Preferences,
must_do=True,
n_tags=0)
start_time = time.time()
# Generate the landmarks from the start location
landmarks, landmarks_short = manager.generate_landmarks_list(
center_coordinates = start,
@ -91,6 +93,9 @@ def new_trip(preferences: Preferences,
landmarks_short.insert(0, start_landmark)
landmarks_short.append(end_landmark)
t_generate_landmarks = time.time() - start_time
start_time = time.time()
# First stage optimization
try:
base_tour = optimizer.solve_optimization(preferences.max_time_minute, landmarks_short)
@ -99,11 +104,20 @@ def new_trip(preferences: Preferences,
except TimeoutError as exc:
raise HTTPException(status_code=500, detail="Optimzation took too long") from exc
t_first_stage = time.time() - start_time
start_time = time.time()
# Second stage optimization
refined_tour = refiner.refine_optimization(landmarks, base_tour,
preferences.max_time_minute,
preferences.detour_tolerance_minute)
t_second_stage = time.time() - start_time
logger.debug(f'Generating landmarks : {round(t_generate_landmarks,3)} seconds')
logger.debug(f'First stage optimization : {round(t_first_stage,3)} seconds')
logger.debug(f'Second stage optimization : {round(t_second_stage,3)} seconds')
logger.info(f'Total computation time : {round(t_generate_landmarks + t_first_stage + t_generate_landmarks,3)} seconds')
linked_tour = LinkedLandmarks(refined_tour)
# upon creation of the trip, persistence of both the trip and its landmarks is ensured.
trip = Trip.from_linked_landmarks(linked_tour, cache_client)
@ -165,7 +179,7 @@ def get_toilets(location: tuple[float, float] = Query(...), radius: int = 500) -
raise HTTPException(status_code=406, detail="Coordinates not provided or invalid")
if not (-90 <= location[0] <= 90 or -180 <= location[1] <= 180):
raise HTTPException(status_code=422, detail="Start coordinates not in range")
toilets_manager = ToiletsManager(location, radius)
try :

View File

@ -1,7 +1,7 @@
"""Collection of tests to ensure correct implementation and track progress. """
from fastapi.testclient import TestClient
import pytest
import pytest, time
from .test_utils import landmarks_to_osmid, load_trip_landmarks, log_trip_details
from ..main import app
@ -20,7 +20,9 @@ def test_turckheim(client, request): # pylint: disable=redefined-outer-name
client:
request:
"""
start_time = time.time() # Start timer
duration_minutes = 15
response = client.post(
"/trip/new",
json={
@ -35,6 +37,9 @@ def test_turckheim(client, request): # pylint: disable=redefined-outer-name
result = response.json()
landmarks = load_trip_landmarks(client, result['first_landmark_uuid'])
# Get computation time
comp_time = time.time() - start_time
# Add details to report
log_trip_details(request, landmarks, result['total_time'], duration_minutes)
@ -43,6 +48,7 @@ def test_turckheim(client, request): # pylint: disable=redefined-outer-name
assert isinstance(landmarks, list) # check that the return type is a list
assert duration_minutes*0.8 < int(result['total_time']) < duration_minutes*1.2
assert len(landmarks) > 2 # check that there is something to visit
assert comp_time < 30, f"Computation time exceeded 30 seconds: {comp_time:.2f} seconds"
def test_bellecour(client, request) : # pylint: disable=redefined-outer-name
@ -53,7 +59,9 @@ def test_bellecour(client, request) : # pylint: disable=redefined-outer-name
client:
request:
"""
start_time = time.time() # Start timer
duration_minutes = 120
response = client.post(
"/trip/new",
json={
@ -67,7 +75,9 @@ def test_bellecour(client, request) : # pylint: disable=redefined-outer-name
)
result = response.json()
landmarks = load_trip_landmarks(client, result['first_landmark_uuid'])
osm_ids = landmarks_to_osmid(landmarks)
# Get computation time
comp_time = time.time() - start_time
# Add details to report
log_trip_details(request, landmarks, result['total_time'], duration_minutes)
@ -79,8 +89,7 @@ def test_bellecour(client, request) : # pylint: disable=redefined-outer-name
# checks :
assert response.status_code == 200 # check for successful planning
assert duration_minutes*0.8 < int(result['total_time']) < duration_minutes*1.2
assert 136200148 in osm_ids # check for Cathédrale St. Jean in trip
# assert response.status_code == 2000 # check for successful planning
assert comp_time < 30, f"Computation time exceeded 30 seconds: {comp_time:.2f} seconds"
@ -92,7 +101,9 @@ def test_shopping(client, request) : # pylint: disable=redefined-outer-name
client:
request:
"""
start_time = time.time() # Start timer
duration_minutes = 240
response = client.post(
"/trip/new",
json={
@ -107,12 +118,16 @@ def test_shopping(client, request) : # pylint: disable=redefined-outer-name
result = response.json()
landmarks = load_trip_landmarks(client, result['first_landmark_uuid'])
# Get computation time
comp_time = time.time() - start_time
# Add details to report
log_trip_details(request, landmarks, result['total_time'], duration_minutes)
# checks :
assert response.status_code == 200 # check for successful planning
assert duration_minutes*0.8 < int(result['total_time']) < duration_minutes*1.2
assert comp_time < 30, f"Computation time exceeded 30 seconds: {comp_time:.2f} seconds"
# def test_new_trip_single_prefs(client):
# response = client.post(

View File

@ -34,26 +34,25 @@ def fetch_landmark(client, landmark_uuid: str):
dict: Landmark data fetched from the API.
"""
logger = logging.getLogger(__name__)
response = client.get(f"/landmark/{landmark_uuid}")
response = client.get(f'/landmark/{landmark_uuid}')
if response.status_code != 200:
raise HTTPException(status_code=500,
detail=f"Failed to fetch landmark with UUID {landmark_uuid}: {response.status_code}")
detail=f'Failed to fetch landmark with UUID {landmark_uuid}: {response.status_code}')
try:
json_data = response.json()
logger.info(f"API Response: {json_data}")
logger.info(f'API Response: {json_data}')
except ValueError as e:
logger.error(f"Failed to parse response as JSON: {response.text}")
logger.error(f'Failed to parse response as JSON: {response.text}')
raise HTTPException(status_code=500, detail="Invalid response format from API")
# Try validating against the Landmark model here to ensure consistency
try:
landmark = Landmark(**json_data)
except ValidationError as ve:
logging.error(f"Validation error: {ve}")
logging.error(f'Validation error: {ve}')
raise HTTPException(status_code=500, detail="Invalid data format received from API")
if "detail" in json_data:
raise HTTPException(status_code=500, detail=json_data["detail"])
@ -75,23 +74,21 @@ def fetch_landmark_cache(landmark_uuid: str):
# Try to fetch the landmark data from the cache
try:
landmark = cache_client.get(f"landmark_{landmark_uuid}")
landmark = cache_client.get(f'landmark_{landmark_uuid}')
if not landmark :
logger.warning(f"Cache miss for landmark UUID: {landmark_uuid}")
raise HTTPException(status_code=404, detail=f"Landmark with UUID {landmark_uuid} not found in cache.")
logger.warning(f'Cache miss for landmark UUID: {landmark_uuid}')
raise HTTPException(status_code=404, detail=f'Landmark with UUID {landmark_uuid} not found in cache.')
# Validate that the fetched data is a dictionary
if not isinstance(landmark, Landmark):
logger.error(f"Invalid cache data format for landmark UUID: {landmark_uuid}. Expected dict, got {type(landmark).__name__}.")
logger.error(f'Invalid cache data format for landmark UUID: {landmark_uuid}. Expected dict, got {type(landmark).__name__}.')
raise HTTPException(status_code=500, detail="Invalid cache data format.")
return landmark
except Exception as exc:
logger.error(f"Unexpected error occurred while fetching landmark UUID {landmark_uuid}: {exc}")
logger.error(f'Unexpected error occurred while fetching landmark UUID {landmark_uuid}: {exc}')
raise HTTPException(status_code=500, detail="An unexpected error occurred while fetching the landmark from the cache") from exc
def load_trip_landmarks(client, first_uuid: str, from_cache=None) -> list[Landmark]:
@ -122,14 +119,14 @@ def load_trip_landmarks(client, first_uuid: str, from_cache=None) -> list[Landma
def log_trip_details(request, landmarks: list[Landmark], duration: int, target_duration: int) :
"""
Allows to show the detailed trip in the html test report.
Args:
request:
landmarks (list): the ordered list of visited landmarks
duration (int): the total duration of this trip
target_duration(int): the target duration of this trip
"""
trip_string = [f"{landmark.name} ({landmark.attractiveness} | {landmark.duration}) - {landmark.time_to_reach_next}" for landmark in landmarks]
trip_string = [f'{landmark.name} ({landmark.attractiveness} | {landmark.duration}) - {landmark.time_to_reach_next}' for landmark in landmarks]
# Pass additional info to pytest for reporting
request.node.trip_details = trip_string

View File

@ -212,9 +212,6 @@ class LandmarkManager:
for sel in dict_to_selector_list(amenity_selector):
self.logger.debug(f"Current selector: {sel}")
# query_conditions = ['count_tags()>5']
# if landmarktype == 'shopping' : # use this later for shopping clusters
# element_types = ['node']
element_types = ['way', 'relation']
if 'viewpoint' in sel :
@ -269,7 +266,7 @@ class LandmarkManager:
tag_values = set(elem.tags().values()) # Store tag values
# Use simple tags :
# Retrieve image, name and website :
image_url = elem.tag('image')
website_url = elem.tag('website')
if website_url is None :
@ -278,6 +275,9 @@ class LandmarkManager:
if elem_type != "nature" and elem.tag('leisure') == "park":
elem_type = "nature"
if elem.tag('wikipedia') is not None :
score += self.wikipedia_bonus
# Skip element if it is an administrative boundary or a disused thing or it is an appartement and useless amenities
if elem.tag('boundary') is not None or elem.tag('disused') is not None:
@ -297,20 +297,16 @@ class LandmarkManager:
# do not count the building description as being particularly useful
n_tags -= 1
if "wiki" in tag_key:
# wikipedia entries count more
score += self.wikipedia_bonus
if landmarktype != "shopping":
if "shop" in tag_key:
skip = True
break
# if landmarktype != "shopping":
# if "shop" in tag_key:
# skip = True
# break
# if tag_key == "building" and elem.tag('building') in ['retail', 'supermarket', 'parking']:
# skip = True
# break
if skip:
continue
# if skip:
# continue
score = score_function(score)

View File

@ -8,9 +8,6 @@ from ..structs.landmark import Landmark
from .get_time_separation import get_time
from ..constants import OPTIMIZER_PARAMETERS_PATH
class Optimizer:

View File

@ -333,7 +333,7 @@ class Refiner :
minor_landmarks = self.get_minor_landmarks(all_landmarks, base_tour, self.detour_corridor_width)
self.logger.info(f"Using {len(minor_landmarks)} minor landmarks around the predicted path")
self.logger.debug(f"Using {len(minor_landmarks)} minor landmarks around the predicted path")
# Full set of visitable landmarks.
full_set = self.integrate_landmarks(minor_landmarks, base_tour) # could probably be optimized with less overhead

View File

@ -1,27 +0,0 @@
from OSMPythonTools.overpass import Overpass, overpassQueryBuilder
overpass = Overpass()
query = overpassQueryBuilder(
bbox = (45.7300, 4.7900, 45.8000, 4.8600),
elementType = ['way'],
# selector can in principle be a list already,
# but it generates the intersection of the queries
# we want the union
selector = '"historic"="building"',
includeCenter = True,
out = 'body'
)
res = overpass.query(query)
# for elem in res.elements() :
elem = res.elements()[1]
tags = elem.tags()
test = elem.tag('sgehs')
print(test)