Merge branch 'feature/optimizer-use-fastapi'
This commit is contained in:
		
							
								
								
									
										164
									
								
								backend/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										164
									
								
								backend/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,164 @@ | ||||
| # osm-cache | ||||
| cache/ | ||||
| # Byte-compiled / optimized / DLL files | ||||
| __pycache__/ | ||||
| *.py[cod] | ||||
| *$py.class | ||||
|  | ||||
| # C extensions | ||||
| *.so | ||||
|  | ||||
| # Distribution / packaging | ||||
| .Python | ||||
| build/ | ||||
| develop-eggs/ | ||||
| dist/ | ||||
| downloads/ | ||||
| eggs/ | ||||
| .eggs/ | ||||
| lib/ | ||||
| lib64/ | ||||
| parts/ | ||||
| sdist/ | ||||
| var/ | ||||
| wheels/ | ||||
| share/python-wheels/ | ||||
| *.egg-info/ | ||||
| .installed.cfg | ||||
| *.egg | ||||
| MANIFEST | ||||
|  | ||||
| # PyInstaller | ||||
| #  Usually these files are written by a python script from a template | ||||
| #  before PyInstaller builds the exe, so as to inject date/other infos into it. | ||||
| *.manifest | ||||
| *.spec | ||||
|  | ||||
| # Installer logs | ||||
| pip-log.txt | ||||
| pip-delete-this-directory.txt | ||||
|  | ||||
| # Unit test / coverage reports | ||||
| htmlcov/ | ||||
| .tox/ | ||||
| .nox/ | ||||
| .coverage | ||||
| .coverage.* | ||||
| .cache | ||||
| nosetests.xml | ||||
| coverage.xml | ||||
| *.cover | ||||
| *.py,cover | ||||
| .hypothesis/ | ||||
| .pytest_cache/ | ||||
| cover/ | ||||
|  | ||||
| # Translations | ||||
| *.mo | ||||
| *.pot | ||||
|  | ||||
| # Django stuff: | ||||
| *.log | ||||
| local_settings.py | ||||
| db.sqlite3 | ||||
| db.sqlite3-journal | ||||
|  | ||||
| # Flask stuff: | ||||
| instance/ | ||||
| .webassets-cache | ||||
|  | ||||
| # Scrapy stuff: | ||||
| .scrapy | ||||
|  | ||||
| # Sphinx documentation | ||||
| docs/_build/ | ||||
|  | ||||
| # PyBuilder | ||||
| .pybuilder/ | ||||
| target/ | ||||
|  | ||||
| # Jupyter Notebook | ||||
| .ipynb_checkpoints | ||||
|  | ||||
| # IPython | ||||
| profile_default/ | ||||
| ipython_config.py | ||||
|  | ||||
| # pyenv | ||||
| #   For a library or package, you might want to ignore these files since the code is | ||||
| #   intended to run in multiple environments; otherwise, check them in: | ||||
| # .python-version | ||||
|  | ||||
| # pipenv | ||||
| #   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. | ||||
| #   However, in case of collaboration, if having platform-specific dependencies or dependencies | ||||
| #   having no cross-platform support, pipenv may install dependencies that don't work, or not | ||||
| #   install all needed dependencies. | ||||
| #Pipfile.lock | ||||
|  | ||||
| # poetry | ||||
| #   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. | ||||
| #   This is especially recommended for binary packages to ensure reproducibility, and is more | ||||
| #   commonly ignored for libraries. | ||||
| #   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control | ||||
| #poetry.lock | ||||
|  | ||||
| # pdm | ||||
| #   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. | ||||
| #pdm.lock | ||||
| #   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it | ||||
| #   in version control. | ||||
| #   https://pdm.fming.dev/latest/usage/project/#working-with-version-control | ||||
| .pdm.toml | ||||
| .pdm-python | ||||
| .pdm-build/ | ||||
|  | ||||
| # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm | ||||
| __pypackages__/ | ||||
|  | ||||
| # Celery stuff | ||||
| celerybeat-schedule | ||||
| celerybeat.pid | ||||
|  | ||||
| # SageMath parsed files | ||||
| *.sage.py | ||||
|  | ||||
| # Environments | ||||
| .env | ||||
| .venv | ||||
| env/ | ||||
| venv/ | ||||
| ENV/ | ||||
| env.bak/ | ||||
| venv.bak/ | ||||
|  | ||||
| # Spyder project settings | ||||
| .spyderproject | ||||
| .spyproject | ||||
|  | ||||
| # Rope project settings | ||||
| .ropeproject | ||||
|  | ||||
| # mkdocs documentation | ||||
| /site | ||||
|  | ||||
| # mypy | ||||
| .mypy_cache/ | ||||
| .dmypy.json | ||||
| dmypy.json | ||||
|  | ||||
| # Pyre type checker | ||||
| .pyre/ | ||||
|  | ||||
| # pytype static type analyzer | ||||
| .pytype/ | ||||
|  | ||||
| # Cython debug symbols | ||||
| cython_debug/ | ||||
|  | ||||
| # PyCharm | ||||
| #  JetBrains specific template is maintained in a separate JetBrains.gitignore that can | ||||
| #  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore | ||||
| #  and can be added to the global gitignore or merged into this file.  For a more nuclear | ||||
| #  option (not recommended) you can uncomment the following to ignore the entire idea folder. | ||||
| #.idea/ | ||||
| @@ -1,14 +1,12 @@ | ||||
| [[source]] | ||||
| url = "https://pypi.org/simple" | ||||
| verify_ssl = true | ||||
| name = "pypi" | ||||
|  | ||||
| [packages] | ||||
| numpy = "*" | ||||
| scipy = "*" | ||||
| fastapi = "*" | ||||
|  | ||||
| [dev-packages] | ||||
|  | ||||
| [requires] | ||||
| python_version = "3.12" | ||||
| [[source]] | ||||
| url = "https://pypi.org/simple" | ||||
| verify_ssl = true | ||||
| name = "pypi" | ||||
|  | ||||
| [packages] | ||||
| numpy = "*" | ||||
| scipy = "*" | ||||
| fastapi = "*" | ||||
| osmpythontools = "*" | ||||
|  | ||||
| [dev-packages] | ||||
|   | ||||
							
								
								
									
										2369
									
								
								backend/Pipfile.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										2369
									
								
								backend/Pipfile.lock
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										0
									
								
								backend/app/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								backend/app/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
								
								
									
										
											BIN
										
									
								
								backend/app/__pycache__/__init__.cpython-310.pyc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								backend/app/__pycache__/__init__.cpython-310.pyc
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								backend/app/__pycache__/main.cpython-310.pyc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								backend/app/__pycache__/main.cpython-310.pyc
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										1
									
								
								backend/app/dependencies.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								backend/app/dependencies.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| import app.src | ||||
							
								
								
									
										34
									
								
								backend/app/main.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								backend/app/main.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,34 @@ | ||||
| from .src.optimizer import solve_optimization | ||||
| from .src.landmarks_manager import Landmark | ||||
| from fastapi import FastAPI | ||||
|  | ||||
| app = FastAPI() | ||||
|  | ||||
|  | ||||
| @app.get("/optimize/{max_steps}/{print_details}") | ||||
| def main(max_steps: int, print_details: bool): | ||||
|      | ||||
|     # CONSTRAINT TO RESPECT MAX NUMBER OF STEPS | ||||
|     #max_steps = 16 | ||||
|  | ||||
|  | ||||
|     # Initialize all landmarks (+ start and goal). Order matters here | ||||
|     landmarks = [] | ||||
|     landmarks.append(Landmark("départ", -1, (0, 0))) | ||||
|     landmarks.append(Landmark("tour eiffel", 99, (0,2)))                           # PUT IN JSON | ||||
|     landmarks.append(Landmark("arc de triomphe", 99, (0,4))) | ||||
|     landmarks.append(Landmark("louvre", 99, (0,6))) | ||||
|     landmarks.append(Landmark("montmartre", 99, (0,10))) | ||||
|     landmarks.append(Landmark("concorde", 99, (0,8))) | ||||
|     landmarks.append(Landmark("arrivée", -1, (0, 0))) | ||||
|  | ||||
|  | ||||
|     visiting_order = solve_optimization(landmarks, max_steps, print_details) | ||||
|  | ||||
|     #return visiting_order | ||||
|  | ||||
|     return("max steps :", max_steps, "\n", visiting_order) | ||||
|  | ||||
|  | ||||
| """if __name__ == "__main__": | ||||
|     main()""" | ||||
							
								
								
									
										0
									
								
								backend/app/src/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								backend/app/src/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
								
								
									
										
											BIN
										
									
								
								backend/app/src/__pycache__/__init__.cpython-310.pyc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								backend/app/src/__pycache__/__init__.cpython-310.pyc
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								backend/app/src/__pycache__/optimizer.cpython-310.pyc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								backend/app/src/__pycache__/optimizer.cpython-310.pyc
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										57
									
								
								backend/app/src/landmarks_manager.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										57
									
								
								backend/app/src/landmarks_manager.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,57 @@ | ||||
| from OSMPythonTools.api import Api | ||||
| from OSMPythonTools.overpass import Overpass | ||||
| from dataclasses import dataclass | ||||
|  | ||||
|  | ||||
| # Defines the landmark class (aka some place there is to visit) | ||||
| @dataclass | ||||
| class Landmarkkkk : | ||||
|     name : str | ||||
|     attractiveness : int | ||||
|     id : int | ||||
|      | ||||
| @dataclass | ||||
| class Landmark : | ||||
|     name : str | ||||
|     attractiveness : int | ||||
|     loc : tuple | ||||
|  | ||||
| # Converts a OSM id to a landmark | ||||
| def add_from_id(id: int, score: int) : | ||||
|  | ||||
|     try : | ||||
|         s = 'way/' + str(id)           # prepare string for query | ||||
|         obj =  api.query(s)                             # object to add | ||||
|     except : | ||||
|         s = 'relation/' + str(id)           # prepare string for query | ||||
|         obj =  api.query(s)                             # object to add | ||||
|  | ||||
|     return Landmarkkkk(obj.tag('name:fr'), score, id)      # create Landmark out of it | ||||
|  | ||||
|  | ||||
| # take a lsit of tuples (id, score) to generate a list of landmarks | ||||
| def generate_landmarks(ids_and_scores: list) : | ||||
|  | ||||
|     L = [] | ||||
|     for tup in ids_and_scores : | ||||
|         L.append(add_from_id(tup[0], tup[1])) | ||||
|  | ||||
|     return L | ||||
|  | ||||
| api = Api() | ||||
|  | ||||
|  | ||||
| l = (7515426, 70) | ||||
| t = (5013364, 100) | ||||
| n = (201611261, 99) | ||||
| a = (226413508, 50) | ||||
| m = (23762981, 30) | ||||
|  | ||||
|  | ||||
| ids_and_scores = [t, l, n, a, m] | ||||
|  | ||||
| landmarks = generate_landmarks(ids_and_scores) | ||||
|  | ||||
|  | ||||
| for obj in landmarks : | ||||
|     print(obj) | ||||
| @@ -1,23 +1,23 @@ | ||||
| import fastapi | ||||
| from dataclasses import dataclass | ||||
| 
 | ||||
| 
 | ||||
| @dataclass | ||||
| class Destination: | ||||
|     name: str | ||||
|     location: tuple | ||||
|     attractiveness: int | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| d = Destination() | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| def get_route() -> list[Destination]: | ||||
|     return {"route": "Hello World"} | ||||
| 
 | ||||
| endpoint = ("/get_route", get_route) | ||||
| end | ||||
| if __name__ == "__main__": | ||||
|     fastapi.run() | ||||
| import fastapi | ||||
| from dataclasses import dataclass | ||||
| 
 | ||||
| 
 | ||||
| @dataclass | ||||
| class Destination: | ||||
|     name: str | ||||
|     location: tuple | ||||
|     attractiveness: int | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| d = Destination() | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| def get_route() -> list[Destination]: | ||||
|     return {"route": "Hello World"} | ||||
| 
 | ||||
| endpoint = ("/get_route", get_route) | ||||
| end | ||||
| if __name__ == "__main__": | ||||
|     fastapi.run() | ||||
							
								
								
									
										323
									
								
								backend/app/src/optimizer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										323
									
								
								backend/app/src/optimizer.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,323 @@ | ||||
| from scipy.optimize import linprog | ||||
| import numpy as np | ||||
| from scipy.linalg import block_diag | ||||
|  | ||||
|  | ||||
| # landmarks = [Landmark_1, Landmark_2, ...] | ||||
|  | ||||
| # Convert the solution of the optimization into the list of edges to follow. Order is taken into account | ||||
| def untangle(resx: list) : | ||||
|     N = len(resx)                   # length of res | ||||
|     L = int(np.sqrt(N))             # number of landmarks. CAST INTO INT but should not be a problem because N = L**2 by def. | ||||
|     n_edges = resx.sum()      # number of edges | ||||
|  | ||||
|     order = [] | ||||
|     nonzeroind = np.nonzero(resx)[0] # the return is a little funny so I use the [0] | ||||
|  | ||||
|     nonzero_tup = np.unravel_index(nonzeroind, (L,L)) | ||||
|  | ||||
|     indx = nonzero_tup[0].tolist() | ||||
|     indy = nonzero_tup[1].tolist() | ||||
|  | ||||
|     vert = (indx[0], indy[0]) | ||||
|  | ||||
|     order.append(vert[0]) | ||||
|     order.append(vert[1]) | ||||
|      | ||||
|     while len(order) < n_edges + 1 : | ||||
|         ind = indx.index(vert[1]) | ||||
|  | ||||
|         vert = (indx[ind], indy[ind]) | ||||
|  | ||||
|         order.append(vert[1]) | ||||
|  | ||||
|     return order | ||||
|  | ||||
| # Just to print the result | ||||
| def print_res(res, landmarks: list, P) : | ||||
|     X = abs(res.x) | ||||
|     order = untangle(X) | ||||
|     things = [] | ||||
|  | ||||
|     """N = int(np.sqrt(len(X))) | ||||
|     for i in range(N): | ||||
|         print(X[i*N:i*N+N]) | ||||
|     print("Optimal value:", -res.fun)  # Minimization, so we negate to get the maximum | ||||
|     print("Optimal point:", res.x) | ||||
|     for i,x in enumerate(X) : X[i] = round(x,0) | ||||
|     print(order)""" | ||||
|  | ||||
|     if (X.sum()+1)**2 == len(X) :  | ||||
|         print('\nAll landmarks can be visited within max_steps, the following order is suggested : ') | ||||
|     else : | ||||
|         print('Could not visit all the landmarks, the following order is suggested : ') | ||||
|  | ||||
|     for idx in order :  | ||||
|         print('- ' + landmarks[idx].name) | ||||
|         things.append(landmarks[idx].name) | ||||
|  | ||||
|     steps = path_length(P, abs(res.x)) | ||||
|     print("\nSteps walked : " + str(steps)) | ||||
|  | ||||
|     return things | ||||
|  | ||||
| # Checks for cases of circular symmetry in the result | ||||
| def has_circle(resx: list) : | ||||
|     N = len(resx)                   # length of res | ||||
|     L = int(np.sqrt(N))             # number of landmarks. CAST INTO INT but should not be a problem because N = L**2 by def. | ||||
|     n_edges = resx.sum()      # number of edges | ||||
|  | ||||
|      | ||||
|     nonzeroind = np.nonzero(resx)[0] # the return is a little funny so I use the [0] | ||||
|  | ||||
|     nonzero_tup = np.unravel_index(nonzeroind, (L,L)) | ||||
|  | ||||
|     indx = nonzero_tup[0].tolist() | ||||
|     indy = nonzero_tup[1].tolist() | ||||
|      | ||||
|  | ||||
|     verts = [] | ||||
|  | ||||
|     for i, x in enumerate(indx) : | ||||
|         verts.append((x, indy[i])) | ||||
|  | ||||
|      | ||||
|     for vert in verts : | ||||
|         visited = [] | ||||
|         visited.append(vert) | ||||
|  | ||||
|         while len(visited) < n_edges + 1 : | ||||
|  | ||||
|             try :  | ||||
|                 ind = indx.index(vert[1]) | ||||
|  | ||||
|                 vert = (indx[ind], indy[ind]) | ||||
|  | ||||
|                 if vert in visited : | ||||
|                     return visited | ||||
|                 else : | ||||
|                     visited.append(vert) | ||||
|             except : | ||||
|                 break | ||||
|  | ||||
|     return [] | ||||
|  | ||||
| # Constraint to not have d14 and d41 simultaneously. Does not prevent circular symmetry with more elements | ||||
| def break_sym(landmarks, A_ub, b_ub): | ||||
|     L = len(landmarks) | ||||
|     upper_ind = np.triu_indices(L,0,L) | ||||
|  | ||||
|     up_ind_x = upper_ind[0] | ||||
|     up_ind_y = upper_ind[1] | ||||
|  | ||||
|     for i, _ in enumerate(up_ind_x) : | ||||
|         l = [0]*L*L | ||||
|         if up_ind_x[i] != up_ind_y[i] : | ||||
|             l[up_ind_x[i]*L + up_ind_y[i]] = 1 | ||||
|             l[up_ind_y[i]*L + up_ind_x[i]] = 1 | ||||
|  | ||||
|             A_ub = np.vstack((A_ub,l)) | ||||
|             b_ub.append(1) | ||||
|  | ||||
|             """for i in range(7): | ||||
|                 print(l[i*7:i*7+7]) | ||||
|             print("\n")""" | ||||
|  | ||||
|     return A_ub, b_ub | ||||
|  | ||||
| # Constraint to not have circular paths. Want to go from start -> finish without unconnected loops | ||||
| def break_circle(landmarks, A_ub, b_ub, circle) : | ||||
|     N = len(landmarks) | ||||
|     l = [0]*N*N | ||||
|  | ||||
|     for index in circle : | ||||
|         x = index[0] | ||||
|         y = index[1] | ||||
|         l[x*N+y] = 1 | ||||
|  | ||||
|     A_ub = np.vstack((A_ub,l)) | ||||
|     b_ub.append(len(circle)-1) | ||||
|  | ||||
|     """print("\n\nPREVENT CIRCLE") | ||||
|     for i in range(7): | ||||
|         print(l[i*7:i*7+7]) | ||||
|     print("\n")""" | ||||
|  | ||||
|     return A_ub, b_ub | ||||
|  | ||||
| # Constraint to respect max number of travels | ||||
| def respect_number(landmarks, A_ub, b_ub): | ||||
|     h = [] | ||||
|     for i in range(len(landmarks)) : h.append([1]*len(landmarks)) | ||||
|     T = block_diag(*h) | ||||
|     """for l in T : | ||||
|         for i in range(7): | ||||
|             print(l[i*7:i*7+7]) | ||||
|         print("\n")""" | ||||
|     return np.vstack((A_ub, T)), b_ub + [1]*len(landmarks) | ||||
|  | ||||
| # Constraint to tie the problem together. Necessary but not sufficient to avoid circles | ||||
| def respect_order(landmarks: list, A_eq, b_eq):  | ||||
|     N = len(landmarks) | ||||
|     for i in range(N-1) :     # Prevent stacked ones | ||||
|         if i == 0 : | ||||
|             continue | ||||
|         else :  | ||||
|             l = [0]*N | ||||
|             l[i] = -1 | ||||
|             l = l*N | ||||
|             for j in range(N) : | ||||
|                 l[i*N + j] = 1 | ||||
|  | ||||
|             A_eq = np.vstack((A_eq,l)) | ||||
|             b_eq.append(0) | ||||
|  | ||||
|             """for i in range(7): | ||||
|                 print(l[i*7:i*7+7]) | ||||
|             print("\n")""" | ||||
|  | ||||
|     return A_eq, b_eq | ||||
|  | ||||
| # Compute manhattan distance between 2 locations | ||||
| def manhattan_distance(loc1: tuple, loc2: tuple): | ||||
|     x1, y1 = loc1 | ||||
|     x2, y2 = loc2 | ||||
|     return abs(x1 - x2) + abs(y1 - y2) | ||||
|  | ||||
| # Constraint to not stay in position | ||||
| def init_eq_not_stay(landmarks):  | ||||
|     L = len(landmarks) | ||||
|     l = [0]*L*L | ||||
|  | ||||
|  | ||||
|     for i in range(L) : | ||||
|         for j in range(L) : | ||||
|             if j == i : | ||||
|                 l[j + i*L] = 1 | ||||
|     l[L-1] = 1      # cannot skip from start to finish | ||||
|     #A_eq = np.array([np.array(xi) for xi in A_eq])                  # Must convert A_eq into an np array | ||||
|     l = np.array(np.array(l)) | ||||
|  | ||||
|     """for i in range(7): | ||||
|         print(l[i*7:i*7+7])""" | ||||
|  | ||||
|     return [l], [0] | ||||
|  | ||||
| # Initialize A and c. Compute the distances from all landmarks to each other and store attractiveness | ||||
| # We want to maximize the sightseeing :  max(c) st. A*x < b   and   A_eq*x = b_eq | ||||
| def init_ub_dist(landmarks: list, max_steps: int): | ||||
|     # Objective function coefficients. a*x1 + b*x2 + c*x3 + ... | ||||
|     c = [] | ||||
|     # Coefficients of inequality constraints (left-hand side) | ||||
|     A = [] | ||||
|     for i, spot1 in enumerate(landmarks) : | ||||
|         dist_table = [0]*len(landmarks) | ||||
|         c.append(-spot1.attractiveness) | ||||
|         for j, spot2 in enumerate(landmarks) : | ||||
|             dist_table[j] = manhattan_distance(spot1.loc, spot2.loc) | ||||
|         A.append(dist_table) | ||||
|     c = c*len(landmarks) | ||||
|     A_ub = [] | ||||
|     for line in A : | ||||
|         #print(line) | ||||
|         A_ub += line | ||||
|     return c, A_ub, [max_steps] | ||||
|  | ||||
| # Go through the landmarks and force the optimizer to use landmarks where attractiveness is set to -1 | ||||
| def respect_user_mustsee(landmarks: list, A_eq: list, b_eq: list) : | ||||
|     L = len(landmarks) | ||||
|     H = 0       # sort of heuristic to get an idea of the number of steps needed | ||||
|     for i in landmarks :  | ||||
|         if i.name == "départ" : elem_prev = i              # list of all matches | ||||
|     for i, elem in enumerate(landmarks) : | ||||
|         if elem.attractiveness == -1 : | ||||
|             l = [0]*L*L | ||||
|             if elem.name != "arrivée" : | ||||
|                 for j in range(L) : | ||||
|                     l[j +i*L] = 1 | ||||
|                      | ||||
|             else :                          # This ensures we go to goal | ||||
|                 for k in range(L-1) : | ||||
|                         l[k*L+L-1] = 1   | ||||
|  | ||||
|             H += manhattan_distance(elem.loc, elem_prev.loc) | ||||
|             elem_prev = elem | ||||
|  | ||||
|             """for i in range(7): | ||||
|                 print(l[i*7:i*7+7]) | ||||
|             print("\n")""" | ||||
|  | ||||
|             A_eq = np.vstack((A_eq,l)) | ||||
|             b_eq.append(1) | ||||
|     return A_eq, b_eq, H | ||||
|  | ||||
| # Computes the path length given path matrix (dist_table) and a result | ||||
| def path_length(P: list, resx: list) : | ||||
|     return np.dot(P, resx) | ||||
|  | ||||
| # Main optimization pipeline | ||||
| def solve_optimization (landmarks, max_steps, printing_details) : | ||||
|  | ||||
|     # SET CONSTRAINTS FOR INEQUALITY | ||||
|     c, A_ub, b_ub = init_ub_dist(landmarks, max_steps)              # Add the distances from each landmark to the other | ||||
|     P = A_ub                                                        # store the paths for later. Needed to compute path length | ||||
|     A_ub, b_ub = respect_number(landmarks, A_ub, b_ub)              # Respect max number of visits.  | ||||
|  | ||||
|     # TODO : Problems with circular symmetry | ||||
|     A_ub, b_ub = break_sym(landmarks, A_ub, b_ub)                  # break the symmetry. Only use the upper diagonal values | ||||
|  | ||||
|     # SET CONSTRAINTS FOR EQUALITY | ||||
|     A_eq, b_eq = init_eq_not_stay(landmarks)                       # Force solution not to stay in same place | ||||
|     A_eq, b_eq, H = respect_user_mustsee(landmarks, A_eq, b_eq)       # Check if there are user_defined must_see. Also takes care of start/goal | ||||
|  | ||||
|     A_eq, b_eq = respect_order(landmarks, A_eq, b_eq)              # Respect order of visit (only works when max_steps is limiting factor) | ||||
|  | ||||
|     # Bounds for variables (x can only be 0 or 1) | ||||
|     x_bounds = [(0, 1)] * len(c) | ||||
|  | ||||
|     # Solve linear programming problem | ||||
|  | ||||
|     res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3) | ||||
|      | ||||
|  | ||||
|  | ||||
|     # Raise error if no solution is found | ||||
|     if not res.success : | ||||
|  | ||||
|         # Override the max_steps using the heuristic | ||||
|         for i, val in enumerate(b_ub) : | ||||
|             if val == max_steps : b_ub[i] = H | ||||
|  | ||||
|         # Solve problem again : | ||||
|         res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3) | ||||
|  | ||||
|         if not res.success : | ||||
|             s = "No solution could be found, even when increasing max_steps using the heuristic" | ||||
|             return s | ||||
|             #raise ValueError("No solution could be found, even when increasing max_steps using the heuristic") | ||||
|      | ||||
|     # If there is a solution, we're good to go, just check for | ||||
|     else : | ||||
|         circle = has_circle(res.x) | ||||
|         i = 0 | ||||
|  | ||||
|         # Break the circular symmetry if needed | ||||
|         while len(circle) != 0 : | ||||
|             A_ub, b_ub = break_circle(landmarks, A_ub, b_ub, circle) | ||||
|             res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3) | ||||
|             circle = has_circle(res.x) | ||||
|             i += 1 | ||||
|  | ||||
|         if printing_details is True : | ||||
|             if i != 0 : | ||||
|                 print(f"Neded to recompute paths {i} times because of unconnected loops...") | ||||
|             X = print_res(res, landmarks, P) | ||||
|             return X | ||||
|         else : | ||||
|             return untangle(res.x) | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| @@ -1,206 +0,0 @@ | ||||
| from scipy.optimize import linprog | ||||
| import numpy as np | ||||
| from scipy.linalg import block_diag | ||||
|  | ||||
|  | ||||
| # Defines the landmark class (aka some place there is to visit) | ||||
| class landmark : | ||||
|     def __init__(self, name: str, attractiveness: int, loc: tuple): | ||||
|         self.name = name | ||||
|         self.attractiveness = attractiveness | ||||
|         self.loc = loc | ||||
|  | ||||
| # Convert the result (edges from j to k like d_25 = edge between vertex 2 and vertex 5) into the list of indices corresponding to the landmarks | ||||
| def untangle(res: list) : | ||||
|     N = len(res)                # length of res | ||||
|     L = int(np.sqrt(N))         # number of landmarks. CAST INTO INT but should not be a problem because N = L**2 by def. | ||||
|     n_landmarks = res.sum()     # number of visited landmarks | ||||
|     visit_order = [] | ||||
|     cnt = 0 | ||||
|  | ||||
|     if n_landmarks % 2 == 1 :                                     # if odd number of visited checkpoints | ||||
|         for i in range(L) : | ||||
|             for j in range(L) : | ||||
|                 if res[i*L + j] == 1 :              # if index is 1 | ||||
|                     cnt += 1                        # increment counter | ||||
|                     if cnt % 2 == 1 :               # if counter odd | ||||
|                         visit_order.append(i) | ||||
|                         visit_order.append(j) | ||||
|     else :                                   # if even number of ones | ||||
|         for i in range(L) : | ||||
|             for j in range(L) : | ||||
|                 if res[i*L + j] == 1 :              # if index is one | ||||
|                     cnt += 1                        # increment counter | ||||
|                     if j % (L-1) == 0 :             # if last node | ||||
|                         visit_order.append(j)       # append only the last index | ||||
|                         return visit_order          # return | ||||
|                     if cnt % 2 == 1 :  | ||||
|                         visit_order.append(i) | ||||
|                         visit_order.append(j) | ||||
|     return visit_order | ||||
|  | ||||
| # Just to print the result | ||||
| def print_res(res: list, P) : | ||||
|     X = abs(res.x) | ||||
|     order = untangle(X) | ||||
|  | ||||
|     # print("Optimal value:", -res.fun)  # Minimization, so we negate to get the maximum | ||||
|     # print("Optimal point:", res.x) | ||||
|     # N = int(np.sqrt(len(X))) | ||||
|     # for i in range(N): | ||||
|     #     print(X[i*N:i*N+N]) | ||||
|     # print(order) | ||||
|  | ||||
|     if (X.sum()+1)**2 == len(X) :  | ||||
|         print('\nAll landmarks can be visited within max_steps, the following order is most likely not the fastest') | ||||
|     else : | ||||
|         print('Could not visit all the landmarks, the following order could be the fastest but not sure') | ||||
|     print("Order of visit :") | ||||
|     for i, elem in enumerate(landmarks) :  | ||||
|         if i in order : print('- ' + elem.name) | ||||
|  | ||||
|     steps = path_length(P, abs(res.x)) | ||||
|     print("\nSteps walked : " + str(steps)) | ||||
|  | ||||
| # Constraint to use only the upper triangular indices for travel | ||||
| def break_sym(landmarks, A_eq, b_eq): | ||||
|     L = len(landmarks) | ||||
|     l = [0]*L*L | ||||
|     for i in range(L) : | ||||
|         for j in range(L) : | ||||
|             if i >= j : | ||||
|                 l[j+i*L] = 1 | ||||
|  | ||||
|     A_eq = np.vstack((A_eq,l)) | ||||
|     b_eq.append(0) | ||||
|  | ||||
|     return A_eq, b_eq | ||||
|  | ||||
| # Constraint to respect max number of travels | ||||
| def respect_number(landmarks, A_ub, b_ub): | ||||
|     h = [] | ||||
|     for i in range(len(landmarks)) : h.append([1]*len(landmarks)) | ||||
|     T = block_diag(*h) | ||||
|     return np.vstack((A_ub, T)), b_ub + [1]*len(landmarks) | ||||
|  | ||||
| # Constraint to tie the problem together and have a connected path | ||||
| def respect_order(landmarks: list, A_eq, b_eq):  | ||||
|     N = len(landmarks) | ||||
|     for i in range(N-1) :     # Prevent stacked ones | ||||
|         if i == 0 : | ||||
|             continue | ||||
|         else :  | ||||
|             l = [0]*N | ||||
|             l[i] = -1 | ||||
|             l = l*N | ||||
|             for j in range(N) : | ||||
|                 l[i*N + j] = 1 | ||||
|  | ||||
|             A_eq = np.vstack((A_eq,l)) | ||||
|             b_eq.append(0) | ||||
|  | ||||
|     return A_eq, b_eq | ||||
|  | ||||
| # Compute manhattan distance between 2 locations | ||||
| def manhattan_distance(loc1: tuple, loc2: tuple): | ||||
|     x1, y1 = loc1 | ||||
|     x2, y2 = loc2 | ||||
|     return abs(x1 - x2) + abs(y1 - y2) | ||||
|  | ||||
| # Constraint to not stay in position | ||||
| def init_eq_not_stay(landmarks):  | ||||
|     L = len(landmarks) | ||||
|     l = [0]*L*L | ||||
|     for i in range(L) : | ||||
|         for j in range(L) : | ||||
|             if j == i : | ||||
|                 l[j + i*L] = 1 | ||||
|     #A_eq = np.array([np.array(xi) for xi in A_eq])                  # Must convert A_eq into an np array | ||||
|     l = np.array(np.array(l)) | ||||
|     return [l], [0] | ||||
|  | ||||
| # Initialize A and c. Compute the distances from all landmarks to each other and store attractiveness | ||||
| # We want to maximize the sightseeing :  max(c) st. A*x < b   and   A_eq*x = b_eq | ||||
| def init_ub_dist(landmarks: list, max_steps: int): | ||||
|     # Objective function coefficients. a*x1 + b*x2 + c*x3 + ... | ||||
|     c = [] | ||||
|     # Coefficients of inequality constraints (left-hand side) | ||||
|     A = [] | ||||
|     for i, spot1 in enumerate(landmarks) : | ||||
|         dist_table = [0]*len(landmarks) | ||||
|         c.append(-spot1.attractiveness) | ||||
|         for j, spot2 in enumerate(landmarks) : | ||||
|             dist_table[j] = manhattan_distance(spot1.loc, spot2.loc) | ||||
|         A.append(dist_table) | ||||
|     c = c*len(landmarks) | ||||
|     A_ub = [] | ||||
|     for line in A : | ||||
|         A_ub += line | ||||
|     return c, A_ub, [max_steps] | ||||
|  | ||||
| # Go through the landmarks and force the optimizer to use landmarks where attractiveness is set to -1 | ||||
| def respect_user_mustsee(landmarks: list, A_eq: list, b_eq: list) : | ||||
|     L = len(landmarks) | ||||
|     for i, elem in enumerate(landmarks) : | ||||
|         if elem.attractiveness == -1 : | ||||
|             l = [0]*L*L | ||||
|             if elem.name != "arrivée" : | ||||
|                 for j in range(L) : | ||||
|                     l[j +i*L] = 1 | ||||
|             else :                          # This ensures we go to goal | ||||
|                 for k in range(L-1) : | ||||
|                         l[k*L+L-1] = 1 | ||||
|             A_eq = np.vstack((A_eq,l)) | ||||
|             b_eq.append(1) | ||||
|     return A_eq, b_eq | ||||
|  | ||||
| # Computes the path length given path matrix (dist_table) and a result | ||||
| def path_length(P: list, resx: list) : | ||||
|     return np.dot(P, resx) | ||||
|  | ||||
| # Initialize all landmarks (+ start and goal). Order matters here | ||||
| landmarks = [] | ||||
| landmarks.append(landmark("départ", -1, (0, 0))) | ||||
| landmarks.append(landmark("concorde", -1, (5,5))) | ||||
| landmarks.append(landmark("tour eiffel", 99, (1,1)))                           # PUT IN JSON | ||||
| landmarks.append(landmark("arc de triomphe", 99, (2,3))) | ||||
| landmarks.append(landmark("louvre", 70, (4,2))) | ||||
| landmarks.append(landmark("montmartre", 20, (0,2))) | ||||
| landmarks.append(landmark("arrivée", -1, (0, 0))) | ||||
|  | ||||
|  | ||||
|  | ||||
| # CONSTRAINT TO RESPECT MAX NUMBER OF STEPS | ||||
| max_steps = 25 | ||||
|  | ||||
| # SET CONSTRAINTS FOR INEQUALITY | ||||
| c, A_ub, b_ub = init_ub_dist(landmarks, max_steps)              # Add the distances from each landmark to the other | ||||
| P = A_ub                                                        # store the paths for later. Needed to compute path length | ||||
| A_ub, b_ub = respect_number(landmarks, A_ub, b_ub)              # Respect max number of visits.  | ||||
|  | ||||
| # SET CONSTRAINTS FOR EQUALITY | ||||
| A_eq, b_eq = init_eq_not_stay(landmarks)                       # Force solution not to stay in same place | ||||
| A_eq, b_eq = respect_user_mustsee(landmarks, A_eq, b_eq)       # Check if there are user_defined must_see. Also takes care of start/goal | ||||
| A_eq, b_eq = break_sym(landmarks, A_eq, b_eq)                  # break the symmetry. Only use the upper diagonal values | ||||
| A_eq, b_eq = respect_order(landmarks, A_eq, b_eq)              # Respect order of visit (only works when max_steps is limiting factor) | ||||
|  | ||||
| # Bounds for variables (x can only be 0 or 1) | ||||
| x_bounds = [(0, 1)] * len(c) | ||||
|  | ||||
| # Solve linear programming problem | ||||
| res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq = b_eq, bounds=x_bounds, method='highs', integrality=3) | ||||
|  | ||||
| # Raise error if no solution is found | ||||
| if not res.success : | ||||
|     raise ValueError("No solution has been found, please adapt your max steps") | ||||
|  | ||||
| # Print result | ||||
| print_res(res, P) | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
		Reference in New Issue
	
	Block a user