mirror of
https://github.com/Telecominfraproject/oopt-gnpy.git
synced 2025-11-01 10:38:10 +00:00
Compare commits
420 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
06ff45d0c2 | ||
|
|
81474e252e | ||
|
|
0069265905 | ||
|
|
dec15f6797 | ||
|
|
f6041cd844 | ||
|
|
ec20d3981b | ||
|
|
08a867ef5a | ||
|
|
76c8296a5d | ||
|
|
f65059dd7f | ||
|
|
e23fef3f64 | ||
|
|
30234f913c | ||
|
|
2dd017bddc | ||
|
|
b25a298087 | ||
|
|
8635a7c182 | ||
|
|
46d25df241 | ||
|
|
0338ccb08f | ||
|
|
8899a575b8 | ||
|
|
b4407b1ff3 | ||
|
|
75f0aebe8f | ||
|
|
5a2dd53636 | ||
|
|
3555154c3e | ||
|
|
6c92c282e7 | ||
|
|
bd1847e5ba | ||
|
|
38727d6203 | ||
|
|
7e6d557d01 | ||
|
|
5d3ce91839 | ||
|
|
1f34e3005e | ||
|
|
8e27437086 | ||
|
|
b4cbe8029e | ||
|
|
92f3fd2063 | ||
|
|
1112b331ef | ||
|
|
ec34e84a3a | ||
|
|
bc9eee326a | ||
|
|
c9106c3a6f | ||
|
|
fa949f977a | ||
|
|
4c2d61bb9b | ||
|
|
ec7b14da8c | ||
|
|
771af4991c | ||
|
|
a08ce9ecb7 | ||
|
|
4d84a4f528 | ||
|
|
5d92baf35e | ||
|
|
ac5171e95e | ||
|
|
697ac311fe | ||
|
|
c22d1173af | ||
|
|
c0cc5fa9fd | ||
|
|
4bd9a9cdda | ||
|
|
63f8139dbc | ||
|
|
be731a5977 | ||
|
|
dd4ce4cea4 | ||
|
|
2548a2eee8 | ||
|
|
03948d6785 | ||
|
|
4c1c17eea6 | ||
|
|
b258d22d25 | ||
|
|
aef43e6bca | ||
|
|
3d7362743d | ||
|
|
96f3d5a805 | ||
|
|
2df500e027 | ||
|
|
346f24022a | ||
|
|
cb45c7ef16 | ||
|
|
9cfb57dc4b | ||
|
|
020d852758 | ||
|
|
8d97fcd735 | ||
|
|
097fe3114e | ||
|
|
5e0fd265ff | ||
|
|
6af137a085 | ||
|
|
3cdc8511a8 | ||
|
|
2d515eea4c | ||
|
|
61289119cb | ||
|
|
b6bc995e40 | ||
|
|
b0bb41bac6 | ||
|
|
0927a92652 | ||
|
|
f51061d650 | ||
|
|
74314f00ca | ||
|
|
81c5ef4a23 | ||
|
|
72e329b08e | ||
|
|
50f884663f | ||
|
|
0d81eb4b29 | ||
|
|
978a9407fa | ||
|
|
2c3b74cdc1 | ||
|
|
448e0f54be | ||
|
|
17f638e991 | ||
|
|
b78d3d8eda | ||
|
|
02a7e467e2 | ||
|
|
15304890f5 | ||
|
|
9ea96e431c | ||
|
|
7b3bfea614 | ||
|
|
d68637c2c8 | ||
|
|
f009306030 | ||
|
|
ca97cba18b | ||
|
|
a46c8c5398 | ||
|
|
88c2e2bd70 | ||
|
|
1bbcee8715 | ||
|
|
39a8fa3335 | ||
|
|
fe067e5367 | ||
|
|
5efbd17829 | ||
|
|
fa3e54a747 | ||
|
|
603beccb01 | ||
|
|
4b20afd599 | ||
|
|
adbe283c83 | ||
|
|
1908d7e29a | ||
|
|
7c6e16cfbc | ||
|
|
1480d23088 | ||
|
|
4be3522209 | ||
|
|
5381e0300f | ||
|
|
cde822ebf8 | ||
|
|
72d3525da1 | ||
|
|
dc867fa051 | ||
|
|
eaf3fcade8 | ||
|
|
3df270e4ac | ||
|
|
7937392dfc | ||
|
|
9c1c0f8d1f | ||
|
|
ad2ab0d164 | ||
|
|
c4bed94eb0 | ||
|
|
edc8eb55de | ||
|
|
f4f9868381 | ||
|
|
f103bebe05 | ||
|
|
c168af46bc | ||
|
|
7d82248903 | ||
|
|
e6cb269754 | ||
|
|
ac8a96398a | ||
|
|
86c79c7c60 | ||
|
|
0c47b3f3ea | ||
|
|
b2b500c5dc | ||
|
|
efc8468268 | ||
|
|
205baebd48 | ||
|
|
af9ba2750d | ||
|
|
e04afdbe4c | ||
|
|
e94fd9590e | ||
|
|
4f4f05abdf | ||
|
|
bcf93e1d9f | ||
|
|
48198bdd89 | ||
|
|
fbb4f3e5dd | ||
|
|
cefd1cf030 | ||
|
|
f8fa544e31 | ||
|
|
27885a4cbc | ||
|
|
185a62958f | ||
|
|
1ba748f2a4 | ||
|
|
90a75a9b3d | ||
|
|
215295efb1 | ||
|
|
2413bd9e0d | ||
|
|
356ae650fd | ||
|
|
2444c24545 | ||
|
|
7727708a3a | ||
|
|
0bfacd84f4 | ||
|
|
b271c1ca3c | ||
|
|
75660febc1 | ||
|
|
13aaa174e1 | ||
|
|
d112c728fc | ||
|
|
826af4a9fd | ||
|
|
c9693d355f | ||
|
|
9f37cb8ce6 | ||
|
|
d99e8ca565 | ||
|
|
44312125ab | ||
|
|
7558721642 | ||
|
|
fb49f7fb5d | ||
|
|
ee7f2c2f47 | ||
|
|
833fe006af | ||
|
|
9be0607b2e | ||
|
|
6dc3f2ffa6 | ||
|
|
df7cbf0b76 | ||
|
|
19b6378b1a | ||
|
|
29cb2b50a8 | ||
|
|
5932c014a0 | ||
|
|
a3f75e9af0 | ||
|
|
9ed31e2c4e | ||
|
|
8599f63fbf | ||
|
|
07de78cb05 | ||
|
|
f764bbe080 | ||
|
|
7aa343b767 | ||
|
|
69198779a7 | ||
|
|
3088032ad8 | ||
|
|
dfaa12598d | ||
|
|
1c724cdc6c | ||
|
|
b59423fb01 | ||
|
|
96bceed102 | ||
|
|
f8e146b9b9 | ||
|
|
c54ddb644a | ||
|
|
2be616411f | ||
|
|
7415744807 | ||
|
|
23905a90f4 | ||
|
|
83444b329e | ||
|
|
9898dc85a9 | ||
|
|
661287f600 | ||
|
|
353c6a77e9 | ||
|
|
8c006fec3f | ||
|
|
5896b7ce6a | ||
|
|
f831f6cb2c | ||
|
|
b505a1ae01 | ||
|
|
a0fac17c0e | ||
|
|
851f606fc0 | ||
|
|
8940430ee0 | ||
|
|
06d3927275 | ||
|
|
7cea13e90d | ||
|
|
de2a504078 | ||
|
|
df14b441a3 | ||
|
|
ab1391440f | ||
|
|
310d32dcea | ||
|
|
f8cd822c92 | ||
|
|
3ade885e41 | ||
|
|
008a88192c | ||
|
|
639e7f012c | ||
|
|
c8ecc16648 | ||
|
|
225fb1ec0c | ||
|
|
c943e0e9b4 | ||
|
|
3d8ac83fcc | ||
|
|
06399ca8af | ||
|
|
8f8fc13ded | ||
|
|
2b018cb9a5 | ||
|
|
9577f4c9a3 | ||
|
|
771d98cc10 | ||
|
|
91e875d54c | ||
|
|
da39f1489f | ||
|
|
2940576681 | ||
|
|
a49c137b78 | ||
|
|
87e748cd83 | ||
|
|
94c2e332bb | ||
|
|
1437b6010e | ||
|
|
5fc203482d | ||
|
|
ff6d81b749 | ||
|
|
167e644bd0 | ||
|
|
d1c7489768 | ||
|
|
a783e165dd | ||
|
|
45bdd82864 | ||
|
|
79c5cb6b78 | ||
|
|
63ade5fdef | ||
|
|
853b8c7aa3 | ||
|
|
0655fb60de | ||
|
|
69b28e3508 | ||
|
|
5c16d9539f | ||
|
|
e3acf02bde | ||
|
|
10268e82d4 | ||
|
|
bc44bf726a | ||
|
|
9839681bc0 | ||
|
|
beb292cb07 | ||
|
|
29c4134f60 | ||
|
|
a6157f328d | ||
|
|
c6432e2c28 | ||
|
|
4a756bf2a9 | ||
|
|
b77cc5cd40 | ||
|
|
9f94af6ff7 | ||
|
|
7aba40cd5b | ||
|
|
fb6c17c5ff | ||
|
|
4504d80c80 | ||
|
|
8a8c8989cb | ||
|
|
20731dbe4b | ||
|
|
55393ca9eb | ||
|
|
e9aa4d5601 | ||
|
|
6d49769df9 | ||
|
|
0333e9d094 | ||
|
|
53bedca50a | ||
|
|
b9518ca987 | ||
|
|
794e713d6d | ||
|
|
5e1fd7501e | ||
|
|
c86ea206d9 | ||
|
|
b810cf84c2 | ||
|
|
15bc5db2ed | ||
|
|
4845d9005e | ||
|
|
bed4e9f1e1 | ||
|
|
7b20db10cc | ||
|
|
d59b3e8c46 | ||
|
|
8fccbb0ac2 | ||
|
|
f36a610e52 | ||
|
|
479a2f358e | ||
|
|
8795c357ae | ||
|
|
d8cb7526bb | ||
|
|
6ead8e391b | ||
|
|
362f45083d | ||
|
|
36218037ec | ||
|
|
584b56bc83 | ||
|
|
d33602e131 | ||
|
|
4304b49bf0 | ||
|
|
4f5325acac | ||
|
|
f8a40bfaf0 | ||
|
|
52dfb20a2b | ||
|
|
34b20cdfe0 | ||
|
|
f462201499 | ||
|
|
5106bdf634 | ||
|
|
b93c6dbcbd | ||
|
|
2ca141baba | ||
|
|
01fe5d2147 | ||
|
|
74830cede4 | ||
|
|
2f21cc29f7 | ||
|
|
05f8d97d68 | ||
|
|
04c4795192 | ||
|
|
ff6e379b6f | ||
|
|
25cfc375bc | ||
|
|
627184ef2d | ||
|
|
180e1178ef | ||
|
|
1c33454ce3 | ||
|
|
c7abe11dd8 | ||
|
|
7a6feccc8b | ||
|
|
f05e578f51 | ||
|
|
277ebea2b5 | ||
|
|
4982c5e147 | ||
|
|
726e217205 | ||
|
|
695c538ee5 | ||
|
|
e9b287ceac | ||
|
|
ecdae68b62 | ||
|
|
e737564b32 | ||
|
|
75c4a8d96d | ||
|
|
28dc1b050f | ||
|
|
4a9a7e31ba | ||
|
|
92da31f905 | ||
|
|
afd264c816 | ||
|
|
d676e3e217 | ||
|
|
4af117aacf | ||
|
|
b27e567a5e | ||
|
|
8ec9a5bf99 | ||
|
|
f129f92cfd | ||
|
|
ef87373d4b | ||
|
|
ff938d4ced | ||
|
|
ae555e0ce1 | ||
|
|
44db6093f5 | ||
|
|
d4bf6ce201 | ||
|
|
0b1c7bfa30 | ||
|
|
80f8a345b5 | ||
|
|
c929ce0257 | ||
|
|
7d3ab357d0 | ||
|
|
84d87602b8 | ||
|
|
14938ea7dd | ||
|
|
be0c052e5e | ||
|
|
af90a6658e | ||
|
|
9720c979b3 | ||
|
|
7c86186416 | ||
|
|
404dbfe725 | ||
|
|
02eac208dd | ||
|
|
bc9a0432cd | ||
|
|
dba7846d6c | ||
|
|
238d7723bb | ||
|
|
d2724bb1a5 | ||
|
|
8e046a9b50 | ||
|
|
1b808afd5d | ||
|
|
8d6f69eb05 | ||
|
|
c4562df955 | ||
|
|
8e1e8a8be3 | ||
|
|
363e92e072 | ||
|
|
c5a52fdb6d | ||
|
|
2dd096cfab | ||
|
|
80e1ce12ce | ||
|
|
2777d957e4 | ||
|
|
898aa4f41a | ||
|
|
c9ece6ad7c | ||
|
|
a6265c1b8d | ||
|
|
5646714c13 | ||
|
|
8ab13537a9 | ||
|
|
25e14e4846 | ||
|
|
55d67f890d | ||
|
|
9b3d7614f5 | ||
|
|
a3273d24b5 | ||
|
|
ffd7bec485 | ||
|
|
0aef76407d | ||
|
|
f2ad236863 | ||
|
|
487237638e | ||
|
|
3a78ccafce | ||
|
|
e99b9c286c | ||
|
|
8c3ffdfd4e | ||
|
|
a4c7395cfe | ||
|
|
398124e841 | ||
|
|
07cc3fc079 | ||
|
|
27bae162d6 | ||
|
|
d4392e5a7a | ||
|
|
a0aab0918a | ||
|
|
2461385a94 | ||
|
|
ba06a0e104 | ||
|
|
48f9c448e4 | ||
|
|
2e0d01bc0b | ||
|
|
0d3a86f1d8 | ||
|
|
a30cd0f721 | ||
|
|
1491c2361f | ||
|
|
ca7b993d95 | ||
|
|
8c75c4f9d7 | ||
|
|
b730b1a4f4 | ||
|
|
a7d3c00e5b | ||
|
|
9146290ecd | ||
|
|
3fae7210d8 | ||
|
|
002f1dfa36 | ||
|
|
b3c1e6af95 | ||
|
|
58ac717f8d | ||
|
|
f193fb261a | ||
|
|
55d8d23b25 | ||
|
|
ca642e3dfc | ||
|
|
bfb7b466eb | ||
|
|
6467cb5819 | ||
|
|
ba215d8a07 | ||
|
|
4dc0913825 | ||
|
|
fedebc7038 | ||
|
|
b42715f003 | ||
|
|
48ae4252db | ||
|
|
0b4dd58c2a | ||
|
|
81b863122d | ||
|
|
732749d459 | ||
|
|
014e5fd966 | ||
|
|
dc7a459697 | ||
|
|
7f378e5479 | ||
|
|
d91e279294 | ||
|
|
c5abc4109f | ||
|
|
c8ce640a2a | ||
|
|
029877604e | ||
|
|
7ee1ad2a92 | ||
|
|
24f3e135ad | ||
|
|
a5718911c5 | ||
|
|
660d9c8c49 | ||
|
|
2ec50c4e07 | ||
|
|
32b4eda3f9 | ||
|
|
d5f5ee5595 | ||
|
|
f0545c57a8 | ||
|
|
815f4d2810 | ||
|
|
bd474151ab | ||
|
|
2c585faef6 | ||
|
|
b15be2cf0d | ||
|
|
3cc98ae388 | ||
|
|
09d1dbf927 | ||
|
|
cda7f5d50b | ||
|
|
1047bbc37c | ||
|
|
b1cb759164 | ||
|
|
2640912baa | ||
|
|
8676daed3a | ||
|
|
db31986d28 | ||
|
|
7823eca871 | ||
|
|
572d7d6999 |
@@ -1,21 +0,0 @@
|
||||
# http://editorconfig.org
|
||||
|
||||
root = true
|
||||
|
||||
[*]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
|
||||
[*.bat]
|
||||
indent_style = tab
|
||||
end_of_line = crlf
|
||||
|
||||
[LICENSE]
|
||||
insert_final_newline = false
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
15
.github/ISSUE_TEMPLATE.md
vendored
15
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,15 +0,0 @@
|
||||
* gnpy version:
|
||||
* Python version:
|
||||
* Operating System:
|
||||
|
||||
### Description
|
||||
|
||||
Describe what you were trying to get done.
|
||||
Tell us what happened, what went wrong, and what you expected to happen.
|
||||
|
||||
### What I Did
|
||||
|
||||
```
|
||||
Paste the command(s) you ran and the output.
|
||||
If there was a crash, please include the traceback here.
|
||||
```
|
||||
29
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
29
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Environment:**
|
||||
- OS: [e.g. Windows]
|
||||
- Python Version [e.g, 3.7]
|
||||
- Anaconda Version [e.g. 3.7]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -2,6 +2,7 @@
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
.ipynb_checkpoints
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
@@ -60,3 +61,6 @@ target/
|
||||
|
||||
# pyenv python configuration file
|
||||
.python-version
|
||||
|
||||
# MacOS DS_store
|
||||
.DS_Store
|
||||
|
||||
4
.readthedocs.yml
Normal file
4
.readthedocs.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
build:
|
||||
image: latest
|
||||
python:
|
||||
version: 3.6
|
||||
35
.travis.yml
35
.travis.yml
@@ -1,29 +1,10 @@
|
||||
# Config file for automatic testing at travis-ci.org
|
||||
# This file will be regenerated if you run travis_pypi_setup.py
|
||||
|
||||
language: python
|
||||
python:
|
||||
- 3.5
|
||||
- 3.4
|
||||
- 3.3
|
||||
- 2.7
|
||||
- 2.6
|
||||
|
||||
# command to install dependencies, e.g. pip install -r requirements.txt --use-mirrors
|
||||
install: pip install -U tox-travis
|
||||
|
||||
# command to run tests, e.g. python setup.py test
|
||||
script: tox
|
||||
|
||||
# After you create the Github repo and add it to Travis, run the
|
||||
# travis_pypi_setup.py script to finish PyPI deployment setup
|
||||
deploy:
|
||||
provider: pypi
|
||||
distributions: sdist bdist_wheel
|
||||
user: <TBD>
|
||||
password:
|
||||
secure: PLEASE_REPLACE_ME
|
||||
on:
|
||||
tags: true
|
||||
repo: <TBD>/gnpy
|
||||
python: 2.7
|
||||
- "3.6"
|
||||
# command to install dependencies
|
||||
install:
|
||||
- python setup.py install
|
||||
# command to run tests
|
||||
before_script:
|
||||
script:
|
||||
- pytest
|
||||
|
||||
28
AUTHORS.rst
28
AUTHORS.rst
@@ -1,13 +1,21 @@
|
||||
=======
|
||||
Credits
|
||||
=======
|
||||
gnpy is written and maintained by the Telecom Infra Project with work
|
||||
contributed by the following TIP members.
|
||||
|
||||
Development Lead
|
||||
----------------
|
||||
To learn how to contribute, please see CONTRIBUTING.md
|
||||
|
||||
* <TBD> <<TBD>@<TBD>.com>
|
||||
(*in alphabetical order*)
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
None yet. Why not be the first?
|
||||
- Alessio Ferrari (Politecnico di Torino) <alessio.ferrari@polito.it>
|
||||
- Brian Taylor (Facebook) <briantaylor@fb.com>
|
||||
- David Boertjes (Ciena) <dboertje@ciena.com>
|
||||
- Esther Le Rouzic (Orange) <esther.lerouzic@orange.com>
|
||||
- Gabriele Galimberti (Cisco) <ggalimbe@cisco.com>
|
||||
- Gert Grammel (Juniper Networks) <ggrammel@juniper.net>
|
||||
- Gilad Goldfarb (Facebook) <giladg@fb.com>
|
||||
- James Powell (Telecom Infra Project) <james.powell@telecominfraproject.com>
|
||||
- Jeanluc Augé (Orange) <jeanluc.auge@orange.com>
|
||||
- Jonas Mårtensson (RISE) <jonas.martensson@ri.se>
|
||||
- Mattia Cantono (Politecnico di Torino) <mattia.cantono@polito.it>
|
||||
- Roberts Miculens (Lattelecom) <roberts.miculens@lattelecom.lv>
|
||||
- Vittorio Curri (Politecnico di Torino) <vittorio.curri@polito.it>
|
||||
- Xufeng Liu (Jabil) <xufeng_liu@jabil.com>
|
||||
|
||||
114
CONTRIBUTING.rst
114
CONTRIBUTING.rst
@@ -1,114 +0,0 @@
|
||||
.. highlight:: shell
|
||||
|
||||
============
|
||||
Contributing
|
||||
============
|
||||
|
||||
Contributions are welcome, and they are greatly appreciated! Every
|
||||
little bit helps, and credit will always be given.
|
||||
|
||||
You can contribute in many ways:
|
||||
|
||||
Types of Contributions
|
||||
----------------------
|
||||
|
||||
Report Bugs
|
||||
~~~~~~~~~~~
|
||||
|
||||
Report bugs at https://github.com/<TBD>/gnpy/issues.
|
||||
|
||||
If you are reporting a bug, please include:
|
||||
|
||||
* Your operating system name and version.
|
||||
* Any details about your local setup that might be helpful in troubleshooting.
|
||||
* Detailed steps to reproduce the bug.
|
||||
|
||||
Fix Bugs
|
||||
~~~~~~~~
|
||||
|
||||
Look through the GitHub issues for bugs. Anything tagged with "bug"
|
||||
and "help wanted" is open to whoever wants to implement it.
|
||||
|
||||
Implement Features
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Look through the GitHub issues for features. Anything tagged with "enhancement"
|
||||
and "help wanted" is open to whoever wants to implement it.
|
||||
|
||||
Write Documentation
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
gnpy could always use more documentation, whether as part of the
|
||||
official gnpy docs, in docstrings, or even on the web in blog posts,
|
||||
articles, and such.
|
||||
|
||||
Submit Feedback
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
The best way to send feedback is to file an issue at https://github.com/<TBD>/gnpy/issues.
|
||||
|
||||
If you are proposing a feature:
|
||||
|
||||
* Explain in detail how it would work.
|
||||
* Keep the scope as narrow as possible, to make it easier to implement.
|
||||
* Remember that this is a volunteer-driven project, and that contributions
|
||||
are welcome :)
|
||||
|
||||
Get Started!
|
||||
------------
|
||||
|
||||
Ready to contribute? Here's how to set up `gnpy` for local development.
|
||||
|
||||
1. Fork the `gnpy` repo on GitHub.
|
||||
2. Clone your fork locally::
|
||||
|
||||
$ git clone git@github.com:your_name_here/gnpy.git
|
||||
|
||||
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
|
||||
|
||||
$ mkvirtualenv gnpy
|
||||
$ cd gnpy/
|
||||
$ python setup.py develop
|
||||
|
||||
4. Create a branch for local development::
|
||||
|
||||
$ git checkout -b name-of-your-bugfix-or-feature
|
||||
|
||||
Now you can make your changes locally.
|
||||
|
||||
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
|
||||
|
||||
$ flake8 gnpy tests
|
||||
$ python setup.py test or py.test
|
||||
$ tox
|
||||
|
||||
To get flake8 and tox, just pip install them into your virtualenv.
|
||||
|
||||
6. Commit your changes and push your branch to GitHub::
|
||||
|
||||
$ git add .
|
||||
$ git commit -m "Your detailed description of your changes."
|
||||
$ git push origin name-of-your-bugfix-or-feature
|
||||
|
||||
7. Submit a pull request through the GitHub website.
|
||||
|
||||
Pull Request Guidelines
|
||||
-----------------------
|
||||
|
||||
Before you submit a pull request, check that it meets these guidelines:
|
||||
|
||||
1. The pull request should include tests.
|
||||
2. If the pull request adds functionality, the docs should be updated. Put
|
||||
your new functionality into a function with a docstring, and add the
|
||||
feature to the list in README.rst.
|
||||
3. The pull request should work for Python 2.6, 2.7, 3.3, 3.4 and 3.5, and for PyPy. Check
|
||||
https://travis-ci.org/<TBD>/gnpy/pull_requests
|
||||
and make sure that the tests pass for all supported Python versions.
|
||||
|
||||
Tips
|
||||
----
|
||||
|
||||
To run a subset of tests::
|
||||
|
||||
$ py.test tests.test_gnpy
|
||||
|
||||
262
Excel_userguide.rst
Normal file
262
Excel_userguide.rst
Normal file
@@ -0,0 +1,262 @@
|
||||
|
||||
How to prepare the Excel input file
|
||||
-----------------------------------
|
||||
|
||||
`examples/transmission_main_example.py <examples/transmission_main_example.py>`_ gives the possibility to use an excel input file instead of a json file. The program then will generate the corresponding json file for you.
|
||||
|
||||
The file named 'meshTopologyExampleV2.xls' is an example.
|
||||
|
||||
In order to work the excel file MUST contain at least 2 sheets:
|
||||
|
||||
- Nodes
|
||||
- Links
|
||||
|
||||
(In progress) The File MAY contain an additional sheet:
|
||||
|
||||
- Eqt
|
||||
- Service
|
||||
|
||||
Nodes sheet
|
||||
-----------
|
||||
|
||||
Nodes sheet contains seven columns.
|
||||
Each line represents a 'node' (ROADM site or an in line amplifier site ILA)::
|
||||
|
||||
City (Mandatory) ; State ; Country ; Region ; Latitude ; Longitude ; Type
|
||||
|
||||
- **City** is used for the name of a node of the graph. It accepts letters, numbers,underscore,dash, blank... (not exhaustive). The user may want to avoid commas for future CSV exports.
|
||||
|
||||
**City name MUST be unique**
|
||||
|
||||
- **Type** is not mandatory.
|
||||
|
||||
- If not filled, it will be interpreted as an 'ILA' site if node degree is 2 and as a ROADM otherwise.
|
||||
- If filled, it can take "ROADM", "FUSED" or "ILA" values. If another string is used, it will be considered as not filled. FUSED means that ingress and egress spans will be fused together.
|
||||
|
||||
- *State*, *Country*, *Region* are not mandatory.
|
||||
"Region" is a holdover from the CORONET topology reference file `CORONET_Global_Topology.xls <examples/CORONET_Global_Topology.xls>`_. CORONET separates its network into geographical regions (Europe, Asia, Continental US.) This information is not used by gnpy.
|
||||
|
||||
- *Longitude*, *Latitude* are not mandatory. If filled they should contain numbers.
|
||||
|
||||
**There MUST NOT be empty line(s) between two nodes lines**
|
||||
|
||||
|
||||
Links sheet
|
||||
-----------
|
||||
|
||||
Links sheet must contain sixteen columns::
|
||||
|
||||
<-- east cable from a to z --> <-- west from z to -->
|
||||
NodeA ; NodeZ ; Distance km ; Fiber type ; Lineic att ; Con_in ; Con_out ; PMD ; Cable Id ; Distance km ; Fiber type ; Lineic att ; Con_in ; Con_out ; PMD ; Cable Id
|
||||
|
||||
|
||||
Links sheets MUST contain all links between nodes defined in Nodes sheet.
|
||||
Each line represents a 'bidir link' between two nodes. The two directions are represented on a single line with "east cable from a to z" fields and "west from z to a" fields. Values for 'a to z' may be different from values from 'z to a'.
|
||||
Since both direction of a bidir 'a-z' link are described on the same line (east and west), 'z to a' direction MUST NOT be repeated in a different line. If repeated, it will generate another parrallel bidir link between the same end nodes.
|
||||
|
||||
|
||||
Parameters for "east cable from a to z" and "west from z to a" are detailed in 2x7 columns. If not filled, "west from z to a" is copied from "east cable from a to z".
|
||||
|
||||
For example, a line filled with::
|
||||
|
||||
node6 ; node3 ; 80 ; SSMF ; 0.2 ; 0.5 ; 0.5 ; 0.1 ; cableB ; ; ; 0.21 ; 0.2 ; ; ;
|
||||
|
||||
will generate a unidir fiber span from node6 to node3 with::
|
||||
|
||||
[node6 node3 80 SSMF 0.2 0.5 0.5 0.1 cableB]
|
||||
|
||||
and a fiber span from node3 to node6::
|
||||
|
||||
[node6 node3 80 SSMF 0.21 0.2 0.5 0.1 cableB] attributes.
|
||||
|
||||
- **NodeA** and **NodeZ** are Mandatory.
|
||||
They are the two endpoints of the link. They MUST contain a node name from the **City** names listed in Nodes sheet.
|
||||
|
||||
- **Distance km** is not mandatory.
|
||||
It is the link length.
|
||||
|
||||
- If filled it MUST contain numbers. If empty it is replaced by a default "80" km value.
|
||||
- If value is below 150 km, it is considered as a single (bidirectional) fiber span.
|
||||
- If value is over 150 km the `transmission_main_example.py <examples/transmission_main_example.py>`_ program will automatically suppose that intermediate span description are required and will generate fiber spans elements with "_1","_2", ... trailing strings which are not visible in the json output. The reason for the splitting is that current edfa usually do not support large span loss. The current assumption is that links larger than 150km will require intermediate amplification. This value will be revisited when Raman amplification is added”
|
||||
|
||||
- **Fiber type** is not mandatory.
|
||||
|
||||
If filled it must contain types listed in `eqpt_config.json <examples/eqpt_config.json>`_ in "Fiber" list "type_variety".
|
||||
If not filled it takes "SSMF" as default value.
|
||||
|
||||
- **Lineic att** is not mandatory.
|
||||
|
||||
It is the lineic attenuation expressed in dB/km.
|
||||
If filled it must contain positive numbers.
|
||||
If not filled it takes "0.2" dB/km value
|
||||
|
||||
- *Con_in*, *Con_out* are not mandatory.
|
||||
|
||||
They are the connector loss in dB at ingress and egress of the fiber spans.
|
||||
If filled they must contain positive numbers.
|
||||
If not filled they take "0.5" dB default value.
|
||||
|
||||
- *PMD* is not mandatory and and is not used yet.
|
||||
|
||||
It is the PMD value of the link in ps.
|
||||
If filled they must contain positive numbers.
|
||||
If not filled, it takes "0.1" ps value.
|
||||
|
||||
- *Cable Id* is not mandatory.
|
||||
If filled they must contain strings with the same constraint as "City" names. Its value is used to differenate links having the same end points. In this case different Id should be used. Cable Ids are not meant to be unique in general.
|
||||
|
||||
|
||||
|
||||
|
||||
(in progress)
|
||||
|
||||
Eqpt sheet
|
||||
----------
|
||||
|
||||
Eqt sheet is optional. It lists the amplifiers types and characteristics on each degree of the *Node A* line.
|
||||
Eqpt sheet must contain twelve columns::
|
||||
|
||||
<-- east cable from a to z --> <-- west from z to a -->
|
||||
Node A ; Node Z ; amp type ; att_in ; amp gain ; tilt ; att_out ; amp type ; att_in ; amp gain ; tilt ; att_out
|
||||
|
||||
If the sheet is present, it MUST have as many lines as egress directions of ROADMs defined in Links Sheet.
|
||||
|
||||
For example, consider the following list of links (A,B and C being a ROADM and amp# ILAs)
|
||||
|
||||
::
|
||||
|
||||
A - amp1
|
||||
amp1 - amp2
|
||||
Amp2 - B
|
||||
A - amp3
|
||||
amp3 - C
|
||||
|
||||
then Eqpt sheet should contain:
|
||||
- one line for each ILAs: amp1, amp2, amp3
|
||||
- one line for each degree 1 ROADMs B and C
|
||||
- two lines for ROADM A which is a degree 2 ROADM
|
||||
|
||||
::
|
||||
|
||||
A - amp1
|
||||
amp1 - amp2
|
||||
Amp2 - B
|
||||
A - amp3
|
||||
amp3 - C
|
||||
B - amp2
|
||||
C - amp3
|
||||
|
||||
|
||||
In case you already have filled Nodes and Links sheets `create_eqpt_sheet.py <examples/create_eqpt_sheet.py>`_ can be used to automatically create a template for the mandatory entries of the list.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd examples
|
||||
$ python create_eqpt_sheet.py meshTopologyExampleV2.xls
|
||||
|
||||
This generates a text file meshTopologyExampleV2_eqt_sheet.txt whose content can be directly copied into the Eqt sheet of the excel file. The user then can fill the values in the rest of the columns.
|
||||
|
||||
|
||||
- **Node A** is mandatory. It is the name of the node (as listed in Nodes sheet).
|
||||
If Node A is a 'ROADM' (Type attribute in sheet Node), its number of occurence must be equal to its degree.
|
||||
If Node A is an 'ILA' it should appear only once.
|
||||
|
||||
- **Node Z** is mandatory. It is the egress direction from the *Node A* site. Multiple Links between the same Node A and NodeZ is not supported.
|
||||
|
||||
- **amp type** is not mandatory.
|
||||
If filled it must contain types listed in `eqpt_config.json <examples/eqpt_config.json>`_ in "Edfa" list "type_variety".
|
||||
If not filled it takes "std_medium_gain" as default value.
|
||||
|
||||
- **amp_gain** is not mandatory. It is the value to be set on the amplifier (in dB).
|
||||
If not filled, it will be determined with design rules in the convert.py file.
|
||||
If filled, it must contain positive numbers.
|
||||
|
||||
- *att_in* and *att_out* are not mandatory and are not used yet. They are the value of the attenautor at input and output of amplifier (in dB).
|
||||
If filled they must contain positive numbers.
|
||||
|
||||
- *tilt* --TODO--
|
||||
|
||||
# to be completed #
|
||||
|
||||
(in progress)
|
||||
|
||||
Service sheet
|
||||
-------------
|
||||
|
||||
Service sheet is optional. It lists the services for which path and feasibility must be computed with path_requests_run.py.
|
||||
|
||||
Service sheet must contain 11 columns::
|
||||
|
||||
route id ; Source ; Destination ; TRX type ; Mode ; System: spacing ; System: input power (dBm) ; System: nb of channels ; routing: disjoint from ; routing: path ; routing: is loose?
|
||||
|
||||
- **route id** is mandatory. It must be unique. It is the identifier of the request. It can be an integer or a string (do not use blank or dash or coma)
|
||||
|
||||
- **Source** is mandatory. It is the name of the source node (as listed in Nodes sheet). Source MUST be a ROADM node. (TODO: relax this and accept trx entries)
|
||||
|
||||
- **Destination** is mandatory. It is the name of the destination node (as listed in Nodes sheet). Source MUST be a ROADM node. (TODO: relax this and accept trx entries)
|
||||
|
||||
- **TRX type ** is mandatory. They are the variety type and selected mode of the transceiver to be used for the propagation simulation. These modes MUST be defined in the equipment library. The format of the mode is used as the name of the mode. (TODO: maybe add another mode id on Transceiver library ?). In particular the mode selection defines the channel baudrate to be used for the propagation simulation.
|
||||
|
||||
- **mode** is optional. If not specified, the program will search for the mode of the defined transponder with the highest baudrate fitting within the spacing value.
|
||||
|
||||
- **System: spacing** is mandatory. Spacing is the channel spacing defined in GHz difined for the feasibility propagation simulation, assuming system full load.
|
||||
|
||||
- **System: input power (dBm) ; System: nb of channels** are optional input defining the system parameters for the propagation simulation.
|
||||
|
||||
- input power is the channel optical input power in dBm
|
||||
- nb of channels is the number of channels to be used for the simulation.
|
||||
|
||||
- **routing: disjoint from ; routing: path ; routing: is loose?** are optional.
|
||||
|
||||
- disjoint from: identifies the requests from which this request must be disjoint. If filled it must contain request ids separated by ' | '
|
||||
- path: is the set of ROADM nodes that must be used by the path. It must contain the list of ROADM names that the path must cross. TODO : only ROADM nodes are accepted in this release. Relax this with any type of nodes. If filled it must contain ROADM ids separated by ' | '. Exact names are required.
|
||||
- is loose? 'no' value means that the list of nodes should be strictly followed, while any other value means that the constraint may be relaxed if the node is not reachable.
|
||||
|
||||
- ** path bandwidth** is optional. It is the amount of capacity required between source and destination in Gbit/s. Default value is 0.0 Gbit/s.
|
||||
|
||||
convert_service_sheet.py
|
||||
------------------------
|
||||
|
||||
|
||||
`convert_service_sheet.py <examples/convert_service_sheet.py>`_ converts the service sheet to a json file following the Yang model for requesting Path Computation defined in `draft-ietf-teas-yang-path-computation-01.txt <https://www.ietf.org/id/draft-ietf-teas-yang-path-computation-01.pdf>`_. TODO: verify that this implementation is correct + give feedback to ietf on what is missing for our specific application.
|
||||
For PSE use, additional fields with trx type and mode have been added to the te-bandwidth field.
|
||||
|
||||
**Usage**: convert_service_sheet.py [-h] [-v] [-o OUTPUT] [workbook_name.xls]
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd examples
|
||||
$ python convert_service_sheet.py meshTopologyExampleV2.xls -o service_file.json
|
||||
|
||||
-o output_file.json is an optional parameter:
|
||||
|
||||
- if not used, the program output the json data on standard output and on a json file with name 'workbook_name_services.json'.
|
||||
|
||||
A template for the json file can be found here: `service_template.json <service_template.json>`_
|
||||
|
||||
path_requests_run.py
|
||||
------------------------
|
||||
|
||||
**Usage**: path_requests_run.py [-h] [-v] [-o OUTPUT]
|
||||
[network_filename xls or json] [service_filename xls or json] [eqpt_filename json]
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd examples
|
||||
$ python path_requests_run.py meshTopologyExampleV2.xls service_file.json eqpt_file -o output_file.json
|
||||
|
||||
A function that computes performances for a list of services provided in the service file (accepts json or excel format.
|
||||
|
||||
If no output file is given, the computation is shown on standard output for demo.
|
||||
If a file is specified with the optional -o argument, the result of the computation is converted into a json format following the Yang model for requesting Path Computation defined in `draft-ietf-teas-yang-path-computation-01.txt <https://www.ietf.org/id/draft-ietf-teas-yang-path-computation-01.pdf>`_. TODO: verify that this implementation is correct + give feedback to ietf on what is missing for our specific application.
|
||||
|
||||
A template for the result of computation json file can be found here: `path_result_template.json <path_result_template.json>`_
|
||||
|
||||
Important note: path_requests_run.py is not a network dimensionning tool : each service does not reserve spectrum, or occupy ressources such as transponders. It only computes path feasibility assuming the spectrum (between defined frequencies) is loaded with "nb of channels" spaced by "spacing" values as specified in the system parameters input in the service file, each cannel having the same characteristics in terms of baudrate, format, ... as the service transponder. The transceiver element acts as a "logical starting/stopping point" for the spectral information propagation. At that point it is not meant to represent the capacity of add drop ports
|
||||
As a result transponder type is not part of the network info. it is related to the list of services requests.
|
||||
|
||||
In a next step we plan to provide required features to enable dimensionning : alocation of ressources, counting channels, limitation of the number of channels, ...
|
||||
|
||||
(in progress)
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
=======
|
||||
History
|
||||
=======
|
||||
|
||||
0.1.0 (2017-06-29)
|
||||
------------------
|
||||
|
||||
* First release on PyPI.
|
||||
40
LICENSE
40
LICENSE
@@ -1,31 +1,29 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
BSD License
|
||||
|
||||
Copyright (c) 2017, <TBD>
|
||||
Copyright (c) 2018, Telecom Infra Project
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
|
||||
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
||||
OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
11
MANIFEST.in
11
MANIFEST.in
@@ -1,11 +0,0 @@
|
||||
include AUTHORS.rst
|
||||
include CONTRIBUTING.rst
|
||||
include HISTORY.rst
|
||||
include LICENSE
|
||||
include README.rst
|
||||
|
||||
recursive-include tests *
|
||||
recursive-exclude * __pycache__
|
||||
recursive-exclude * *.py[co]
|
||||
|
||||
recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif
|
||||
87
Makefile
87
Makefile
@@ -1,87 +0,0 @@
|
||||
.PHONY: clean clean-test clean-pyc clean-build docs help
|
||||
.DEFAULT_GOAL := help
|
||||
define BROWSER_PYSCRIPT
|
||||
import os, webbrowser, sys
|
||||
try:
|
||||
from urllib import pathname2url
|
||||
except:
|
||||
from urllib.request import pathname2url
|
||||
|
||||
webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1])))
|
||||
endef
|
||||
export BROWSER_PYSCRIPT
|
||||
|
||||
define PRINT_HELP_PYSCRIPT
|
||||
import re, sys
|
||||
|
||||
for line in sys.stdin:
|
||||
match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line)
|
||||
if match:
|
||||
target, help = match.groups()
|
||||
print("%-20s %s" % (target, help))
|
||||
endef
|
||||
export PRINT_HELP_PYSCRIPT
|
||||
BROWSER := python -c "$$BROWSER_PYSCRIPT"
|
||||
|
||||
help:
|
||||
@python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST)
|
||||
|
||||
clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts
|
||||
|
||||
|
||||
clean-build: ## remove build artifacts
|
||||
rm -fr build/
|
||||
rm -fr dist/
|
||||
rm -fr .eggs/
|
||||
find . -name '*.egg-info' -exec rm -fr {} +
|
||||
find . -name '*.egg' -exec rm -f {} +
|
||||
|
||||
clean-pyc: ## remove Python file artifacts
|
||||
find . -name '*.pyc' -exec rm -f {} +
|
||||
find . -name '*.pyo' -exec rm -f {} +
|
||||
find . -name '*~' -exec rm -f {} +
|
||||
find . -name '__pycache__' -exec rm -fr {} +
|
||||
|
||||
clean-test: ## remove test and coverage artifacts
|
||||
rm -fr .tox/
|
||||
rm -f .coverage
|
||||
rm -fr htmlcov/
|
||||
|
||||
lint: ## check style with flake8
|
||||
flake8 gnpy tests
|
||||
|
||||
test: ## run tests quickly with the default Python
|
||||
py.test
|
||||
|
||||
|
||||
test-all: ## run tests on every Python version with tox
|
||||
tox
|
||||
|
||||
coverage: ## check code coverage quickly with the default Python
|
||||
coverage run --source gnpy -m pytest
|
||||
coverage report -m
|
||||
coverage html
|
||||
$(BROWSER) htmlcov/index.html
|
||||
|
||||
docs: ## generate Sphinx HTML documentation, including API docs
|
||||
rm -f docs/gnpy.rst
|
||||
rm -f docs/modules.rst
|
||||
sphinx-apidoc -o docs/ gnpy
|
||||
$(MAKE) -C docs clean
|
||||
$(MAKE) -C docs html
|
||||
$(BROWSER) docs/_build/html/index.html
|
||||
|
||||
servedocs: docs ## compile the docs watching for changes
|
||||
watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D .
|
||||
|
||||
release: clean ## package and upload a release
|
||||
python setup.py sdist upload
|
||||
python setup.py bdist_wheel upload
|
||||
|
||||
dist: clean ## builds source and wheel package
|
||||
python setup.py sdist
|
||||
python setup.py bdist_wheel
|
||||
ls -l dist
|
||||
|
||||
install: clean ## install the package to the active Python's site-packages
|
||||
python setup.py install
|
||||
601
README.rst
601
README.rst
@@ -1,32 +1,595 @@
|
||||
====
|
||||
gnpy
|
||||
====
|
||||
====================================================================
|
||||
`gnpy`: mesh optical network route planning and optimization library
|
||||
====================================================================
|
||||
|
||||
|docs| |build|
|
||||
|
||||
.. image:: https://img.shields.io/pypi/v/gnpy.svg
|
||||
:target: https://pypi.python.org/pypi/gnpy
|
||||
**`gnpy` is an open-source, community-developed library for building route
|
||||
planning and optimization tools in real-world mesh optical networks.**
|
||||
|
||||
.. image:: https://img.shields.io/travis/<TBD>/gnpy.svg
|
||||
:target: https://travis-ci.org/<TBD>/gnpy
|
||||
`gnpy <http://github.com/telecominfraproject/oopt-gnpy>`__ is:
|
||||
|
||||
.. image:: https://readthedocs.org/projects/gnpy/badge/?version=latest
|
||||
:target: https://gnpy.readthedocs.io/en/latest/?badge=latest
|
||||
- a sponsored project of the `OOPT/PSE <https://telecominfraproject.com/open-optical-packet-transport/>`_ working group of the `Telecom Infra Project <http://telecominfraproject.com>`_
|
||||
- fully community-driven, fully open source library
|
||||
- driven by a consortium of operators, vendors, and academic researchers
|
||||
- intended for rapid development of production-grade route planning tools
|
||||
- easily extensible to include custom network elements
|
||||
- performant to the scale of real-world mesh optical networks
|
||||
|
||||
Documentation: https://gnpy.readthedocs.io
|
||||
|
||||
Branches and Tagged Releases
|
||||
----------------------------
|
||||
|
||||
- the `master <https://github.com/Telecominfraproject/oopt-gnpy/tree/master>`_ branch contains stable, validated code. It is updated from develop on a release schedule determined by the OOPT-PSE Working Group. For more information about the validation process, see: https://github.com/Telecominfraproject/oopt-gnpy/wiki/Testing-for-Quality
|
||||
- the `develop <https://github.com/Telecominfraproject/oopt-gnpy/tree/develop>`_ branch contains the latest code under active development, which may not be fully validated and tested.
|
||||
- the `phase-1 <https://github.com/Telecominfraproject/oopt-gnpy/tree/phase-1>`_ branch contains code for Phase I of the OOPT-PSE efforts and is kept only for reference. This branch is unmaintained.
|
||||
|
||||
A brief outline of major (tagged) `gnpy` releases:
|
||||
|
||||
+---------------+-------------+-----------------------------------------------+
|
||||
| release date | version tag | notes |
|
||||
+===============+=============+===============================================+
|
||||
| Jan 30, 2019 | v1.1 | - XLS parser enhancements |
|
||||
| | | - carrier probe feature |
|
||||
| | | - bug fixes |
|
||||
+---------------+-------------+-----------------------------------------------+
|
||||
| Oct 16, 2018 | v1.0 | - first "production"-ready release |
|
||||
| | | - open network element model (EDFA, GN-model) |
|
||||
| | | - auto-design functionality |
|
||||
| | | - path request functionality |
|
||||
+---------------+-------------+-----------------------------------------------+
|
||||
|
||||
How to Install
|
||||
--------------
|
||||
|
||||
**Note**: `gnpy` supports Python 3 only. Python 2 is not supported.
|
||||
`gnpy` requires Python ≥3.6
|
||||
|
||||
**Note**: the `gnpy` maintainers strongly recommend the use of Anaconda for
|
||||
managing dependencies.
|
||||
|
||||
It is recommended that you use a "virtual environment" when installing `gnpy`.
|
||||
Do not install `gnpy` on your system Python.
|
||||
|
||||
We recommend the use of the Anaconda Python distribution
|
||||
(https://www.anaconda.com/download) which comes with many scientific computing
|
||||
dependencies pre-installed. Anaconda creates a base "virtual environment" for
|
||||
you automatically. You can also create and manage your conda "virtual
|
||||
environments" yourself (see:
|
||||
https://conda.io/docs/user-guide/tasks/manage-environments.html)
|
||||
|
||||
To activate your Anaconda virtual environment, you may need to do the
|
||||
following:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ source /path/to/anaconda/bin/activate # activate Anaconda base environment
|
||||
(base) $ # note the change to the prompt
|
||||
|
||||
You can check which Anaconda environment you are using with:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
(base) $ conda env list # list all environments
|
||||
# conda environments:
|
||||
#
|
||||
base * /src/install/anaconda3
|
||||
|
||||
(base) $ echo $CONDA_DEFAULT_ENV # show default environment
|
||||
base
|
||||
|
||||
You can check your version of Python with the following. If you are using
|
||||
Anaconda's Python 3, you should see similar output as below. Your results may
|
||||
be slightly different depending on your Anaconda installation path and the
|
||||
exact version of Python you are using.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ which python # check which Python executable is used
|
||||
/path/to/anaconda/bin/python
|
||||
$ python -V # check your Python version
|
||||
Python 3.6.5 :: Anaconda, Inc.
|
||||
|
||||
From within your Anaconda Python 3 environment, you can clone the master branch
|
||||
of the `gnpy` repo and install it with:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ git clone https://github.com/Telecominfraproject/oopt-gnpy # clone the repo
|
||||
$ cd oopt-gnpy
|
||||
$ python setup.py install # install
|
||||
|
||||
To test that `gnpy` was successfully installed, you can run this command. If it
|
||||
executes without a `ModuleNotFoundError`, you have successfully installed
|
||||
`gnpy`.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ python -c 'import gnpy' # attempt to import gnpy
|
||||
|
||||
$ cd oopt-gnpy
|
||||
$ pytest # run tests
|
||||
|
||||
Instructions for First Use
|
||||
--------------------------
|
||||
|
||||
``gnpy`` is a library for building route planning and optimization tools.
|
||||
|
||||
It ships with a number of example programs. Release versions will ship with
|
||||
fully-functional programs.
|
||||
|
||||
**Note**: *If you are a network operator or involved in route planning and
|
||||
optimization for your organization, please contact project maintainer James
|
||||
Powell <james.powell@telecominfraproject>. gnpy is looking for users with
|
||||
specific, delineated use cases to drive requirements for future
|
||||
development.*
|
||||
|
||||
**To get started, run the main transmission example:**
|
||||
|
||||
**Note**: *Examples should be run from the examples/ folder.*
|
||||
|
||||
.. code-block:: shell
|
||||
$ pwd
|
||||
/path/to/oopt-gnpy
|
||||
$ cd examples
|
||||
$ python transmission_main_example.py
|
||||
|
||||
By default, this script operates on a single span network defined in
|
||||
`examples/edfa_example_network.json <examples/edfa_example_network.json>`_
|
||||
|
||||
You can specify a different network at the command line as follows. For
|
||||
example, to use the CORONET Global network defined in
|
||||
`examples/CORONET_Global_Topology.json <examples/CORONET_Global_Topology.json>`_:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd examples
|
||||
$ python transmission_main_example.py CORONET_Global_Topology.json
|
||||
|
||||
It is also possible to use an Excel file input (for example
|
||||
`examples/CORONET_Global_Topology.xls <examples/CORONET_Global_Topology.xls>`_).
|
||||
The Excel file will be processed into a JSON file with the same prefix. For
|
||||
further instructions on how to prepare the Excel input file, see
|
||||
`Excel_userguide.rst <Excel_userguide.rst>`_.
|
||||
|
||||
The main transmission example will calculate the average signal OSNR and SNR
|
||||
across network elements (transceiver, ROADMs, fibers, and amplifiers)
|
||||
between two transceivers selected by the user. Additional details are provided by doing ``transmission_main_example.py -h``. (By default, for the CORONET Global
|
||||
network, it will show the transmission of spectral information between Abilene and Albany)
|
||||
|
||||
This script calculates the average signal OSNR = |OSNR| and SNR = |SNR|.
|
||||
|
||||
.. |OSNR| replace:: P\ :sub:`ch`\ /P\ :sub:`ase`
|
||||
.. |SNR| replace:: P\ :sub:`ch`\ /(P\ :sub:`nli`\ +\ P\ :sub:`ase`)
|
||||
|
||||
|Pase| is the amplified spontaneous emission noise, and |Pnli| the non-linear
|
||||
interference noise.
|
||||
|
||||
.. |Pase| replace:: P\ :sub:`ase`
|
||||
.. |Pnli| replace:: P\ :sub:`nli`
|
||||
|
||||
Further Instructions for Use (`transmission_main_example.py`, `path_requests_run.py`)
|
||||
-------------------------------------------------------------------------------------
|
||||
|
||||
Design and transmission parameters are defined in a dedicated json file. By
|
||||
default, this information is read from `examples/eqpt_config.json
|
||||
<examples/eqpt_config.json>`_. This file defines the equipment libraries that
|
||||
can be customized (EDFAs, fibers, and transceivers).
|
||||
|
||||
It also defines the simulation parameters (spans, ROADMs, and the spectral
|
||||
information to transmit.)
|
||||
|
||||
The EDFA equipment library is a list of supported amplifiers. New amplifiers
|
||||
can be added and existing ones removed. Three different noise models are available:
|
||||
|
||||
1. `'type_def': 'variable_gain'` is a simplified model simulating a 2-coil EDFA with internal, input and output VOAs. The NF vs gain response is calculated accordingly based on the input parameters: `nf_min`, `nf_max`, and `gain_flatmax`. It is not a simple interpolation but a 2-stage NF calculation.
|
||||
2. `'type_def': 'fixed_gain'` is a fixed gain model. `NF == Cte == nf0` if `gain_min < gain < gain_flatmax`
|
||||
3. `'type_def': None` is an advanced model. A detailed json configuration file is required (by default `examples/std_medium_gain_advanced_config.json <examples/std_medium_gain_advanced_config.json>`_.) It uses a 3rd order polynomial where NF = f(gain), NF_ripple = f(frequency), gain_ripple = f(frequency), N-array dgt = f(frequency). Compared to the previous models, NF ripple and gain ripple are modelled.
|
||||
|
||||
For all amplifier models:
|
||||
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| field | type | description |
|
||||
+======================+===========+=========================================+
|
||||
| `type_variety` | (string) | a unique name to ID the amplifier in the|
|
||||
| | | JSON/Excel template topology input file |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `out_voa_auto` | (boolean) | auto_design feature to optimize the |
|
||||
| | | amplifier output VOA. If true, output |
|
||||
| | | VOA is present and will be used to push |
|
||||
| | | amplifier gain to its maximum, within |
|
||||
| | | EOL power margins. |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `allowed_for_design` | (boolean) | If false, the amplifier will not be |
|
||||
| | | picked by auto-design but it can still |
|
||||
| | | be used as a manual input (from JSON or |
|
||||
| | | Excel template topology files.) |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
|
||||
The fiber library currently describes SSMF and NZDF but additional fiber types can be entered by the user following the same model:
|
||||
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| field | type | description |
|
||||
+======================+===========+=========================================+
|
||||
| `type_variety` | (string) | a unique name to ID the fiber in the |
|
||||
| | | JSON or Excel template topology input |
|
||||
| | | file |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `dispersion` | (number) | (s.m-1.m-1) |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `gamma` | (number) | 2pi.n2/(lambda*Aeff) (w-2.m-1) |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
|
||||
The transceiver equipment library is a list of supported transceivers. New
|
||||
transceivers can be added and existing ones removed at will by the user. It is
|
||||
used to determine the service list path feasibility when running the
|
||||
path_request_run.py routine.
|
||||
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| field | type | description |
|
||||
+======================+===========+=========================================+
|
||||
| `type_variety` | (string) | a unique name to ID the transceiver in |
|
||||
| | | the JSON or Excel template topology |
|
||||
| | | input file |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `frequency` | (number) | Min/max as below. |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `mode` | (number) | a list of modes supported by the |
|
||||
| | | transponder. New modes can be added at |
|
||||
| | | will by the user. The modes are specific|
|
||||
| | | to each transponder type_variety. |
|
||||
| | | Each mode is described as below. |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
|
||||
The modes are defined as follows:
|
||||
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| field | type | description |
|
||||
+======================+===========+=========================================+
|
||||
| `format` | (string) | a unique name to ID the mode. |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `baud_rate` | (number) | in Hz |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `OSNR` | (number) | min required OSNR in 0.1nm (dB) |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `bit_rate` | (number) | in bit/s |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `roll_off` | (number) | Not used. |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `tx_osnr` | (number) | In dB. OSNR out from transponder. |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| `cost` | (number) | Arbitrary unit |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
|
||||
Simulation parameters are defined as follows.
|
||||
|
||||
Auto-design automatically creates EDFA amplifier network elements when they are
|
||||
missing, after a fiber, or between a ROADM and a fiber. This auto-design
|
||||
functionality can be manually and locally deactivated by introducing a `Fused`
|
||||
network element after a `Fiber` or a `Roadm` that doesn't need amplification.
|
||||
The amplifier is chosen in the EDFA list of the equipment library based on
|
||||
gain, power, and NF criteria. Only the EDFA that are marked
|
||||
`'allowed_for_design': true` are considered.
|
||||
|
||||
For amplifiers defined in the topology JSON input but whose gain = 0
|
||||
(placeholder), auto-design will set its gain automatically: see `power_mode` in
|
||||
the `Spans` library to find out how the gain is calculated.
|
||||
|
||||
Span configuration is performed as follows. It is not a list (which may change
|
||||
in later releases) and the user can only modify the value of existing
|
||||
parameters:
|
||||
|
||||
+------------------------+-----------+---------------------------------------------+
|
||||
| field | type | description |
|
||||
+========================+===========+=============================================+
|
||||
| `power_mode` | (boolean) | If false, gain mode. Auto-design sets |
|
||||
| | | amplifier gain = preceding span loss, |
|
||||
| | | unless the amplifier exists and its |
|
||||
| | | gain > 0 in the topology input json. |
|
||||
| | | If true, power mode (recommended for |
|
||||
| | | auto-design and power sweep.) |
|
||||
| | | Auto-design sets amplifier power |
|
||||
| | | according to delta_power_range. If the |
|
||||
| | | amplifier exists with gain > 0 in the |
|
||||
| | | topology json input, then its gain is |
|
||||
| | | translated into a power target/channel. |
|
||||
| | | Moreover, when performing a power sweep |
|
||||
| | | (see power_range_db in the SI |
|
||||
| | | configuration library) the power sweep |
|
||||
| | | is performed w/r/t this power target, |
|
||||
| | | regardless of preceding amplifiers |
|
||||
| | | power saturation/limitations. |
|
||||
+------------------------+-----------+---------------------------------------------+
|
||||
| `delta_power_range_db` | (number) | Auto-design only, power-mode |
|
||||
| | | only. Specifies the [min, max, step] |
|
||||
| | | power excursion/span. It is a relative |
|
||||
| | | power excursion w/r/t the |
|
||||
| | | power_dbm + power_range_db |
|
||||
| | | (power sweep if applicable) defined in |
|
||||
| | | the SI configuration library. This |
|
||||
| | | relative power excursion is = 1/3 of |
|
||||
| | | the span loss difference with the |
|
||||
| | | reference 20 dB span. The 1/3 slope is |
|
||||
| | | derived from the GN model equations. |
|
||||
| | | For example, a 23 dB span loss will be |
|
||||
| | | set to 1 dB more power than a 20 dB |
|
||||
| | | span loss. The 20 dB reference spans |
|
||||
| | | will *always* be set to |
|
||||
| | | power = power_dbm + power_range_db. |
|
||||
| | | To configure the same power in all |
|
||||
| | | spans, use `[0, 0, 0]`. All spans will |
|
||||
| | | be set to |
|
||||
| | | power = power_dbm + power_range_db. |
|
||||
| | | To configure the same power in all spans |
|
||||
| | | and 3 dB more power just for the longest |
|
||||
| | | spans: `[0, 3, 3]`. The longest spans are |
|
||||
| | | set to |
|
||||
| | | power = power_dbm + power_range_db + 3. |
|
||||
| | | To configure a 4 dB power range across |
|
||||
| | | all spans in 0.5 dB steps: `[-2, 2, 0.5]`. |
|
||||
| | | A 17 dB span is set to |
|
||||
| | | power = power_dbm + power_range_db - 1, |
|
||||
| | | a 20 dB span to |
|
||||
| | | power = power_dbm + power_range_db and |
|
||||
| | | a 23 dB span to |
|
||||
| | | power = power_dbm + power_range_db + 1 |
|
||||
+------------------------+-----------+---------------------------------------------+
|
||||
| `max_length` | (number) | Split fiber lengths > max_length. |
|
||||
| | | Interest to support high level |
|
||||
| | | topologies that do not specify in line |
|
||||
| | | amplification sites. For example the |
|
||||
| | | CORONET_Global_Topology.xls defines |
|
||||
| | | links > 1000km between 2 sites: it |
|
||||
| | | couldn't be simulated if these links |
|
||||
| | | were not splitted in shorter span |
|
||||
| | | lengths. |
|
||||
+------------------------+-----------+---------------------------------------------+
|
||||
| `length_unit` | "m"/"km" | Unit for max_length. |
|
||||
+------------------------+-----------+---------------------------------------------+
|
||||
| `max_loss` | (number) | Not used in the current code |
|
||||
| | | implementation. |
|
||||
+------------------------+-----------+---------------------------------------------+
|
||||
| `padding` | (number) | In dB. Min span loss before putting an |
|
||||
| | | attenuator before fiber. Attenuator |
|
||||
| | | value |
|
||||
| | | Fiber.att_in = max(0, padding - span_loss). |
|
||||
| | | Padding can be set manually to reach a |
|
||||
| | | higher padding value for a given fiber |
|
||||
| | | by filling in the Fiber/params/att_in |
|
||||
| | | field in the topology json input [1] |
|
||||
| | | but if span_loss = length * loss_coef |
|
||||
| | | + att_in + con_in + con_out < padding, |
|
||||
| | | the specified att_in value will be |
|
||||
| | | completed to have span_loss = padding. |
|
||||
| | | Therefore it is not possible to set |
|
||||
| | | span_loss < padding. |
|
||||
+------------------------+-----------+---------------------------------------------+
|
||||
| `EOL` | (number) | All fiber span loss ageing. The value |
|
||||
| | | is added to the con_out (fiber output |
|
||||
| | | connector). So the design and the path |
|
||||
| | | feasibility are performed with |
|
||||
| | | span_loss + EOL. EOL cannot be set |
|
||||
| | | manually for a given fiber span |
|
||||
| | | (workaround is to specify higher con_out |
|
||||
| | | loss for this fiber). |
|
||||
+------------------------+-----------+---------------------------------------------+
|
||||
| `con_in`, `con_out` | (number) | Default values if Fiber/params/con_in/out |
|
||||
| | | is None in the topology input |
|
||||
| | | description. This default value is |
|
||||
| | | ignored if a Fiber/params/con_in/out |
|
||||
| | | value is input in the topology for a |
|
||||
| | | given Fiber. |
|
||||
+------------------------+-----------+---------------------------------------------+
|
||||
|
||||
**[1]**
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"uid": "fiber (A1->A2)",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params":
|
||||
{
|
||||
"type_variety": "SSMF",
|
||||
"length": 120.0,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0,
|
||||
"con_out": 0
|
||||
}
|
||||
}
|
||||
|
||||
ROADMs can be configured as follows. The user can only modify the value of
|
||||
existing parameters:
|
||||
|
||||
+-------------------------+-----------+---------------------------------------------+
|
||||
| field | type | description |
|
||||
+=========================+===========+=============================================+
|
||||
|`gain_mode_default_loss` | (number) | Default value if Roadm/params/loss is |
|
||||
| | | None in the topology input description. |
|
||||
| | | This default value is ignored if a |
|
||||
| | | params/loss value is input in the |
|
||||
| | | topology for a given ROADM. |
|
||||
+-------------------------+-----------+---------------------------------------------+
|
||||
|`power_mode_pref` | (number) | Power mode only. Auto-design sets the |
|
||||
| | | power of ROADM ingress amplifiers to |
|
||||
| | | power_dbm + power_range_db, |
|
||||
| | | regardless of existing gain settings |
|
||||
| | | from the topology JSON input. |
|
||||
| | | Auto-design sets the Roadm loss so that |
|
||||
| | | its egress channel power = power_mode_pref, |
|
||||
| | | regardless of existing loss settings |
|
||||
| | | from the topology JSON input. It means |
|
||||
| | | that the output power from a ROADM (and |
|
||||
| | | therefore its OSNR contribution) is Cte |
|
||||
| | | and not depending from power_dbm and |
|
||||
| | | power_range_db sweep settings. This |
|
||||
| | | choice is meant to reflect some typical |
|
||||
| | | control loop algorithms. |
|
||||
+-------------------------+-----------+---------------------------------------------+
|
||||
|
||||
The `SpectralInformation` object can be configured as follows. The user can
|
||||
only modify the value of existing parameters. It defines a spectrum of N
|
||||
identical carriers. While the code libraries allow for different carriers and
|
||||
power levels, the current user parametrization only allows one carrier type and
|
||||
one power/channel definition.
|
||||
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| field | type | description |
|
||||
+======================+===========+===========================================+
|
||||
| `f_min/max` | (number) | In Hz. Carrier min max excursion |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| `baud_rate` | (number) | In Hz. Simulated baud rate. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| `spacing` | (number) | In Hz. Carrier spacing. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| `roll_off` | (number) | Not used. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| `OSNR` | (number) | Not used. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| `bit_rate` | (number) | Not used. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| `tx_osnr` | (number) | In dB. OSNR out from transponder. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| `power_dbm` | (number) | Reference channel power. In gain mode |
|
||||
| | | (see spans/power_mode = false), all gain |
|
||||
| | | settings are offset w/r/t this reference |
|
||||
| | | power. In power mode, it is the |
|
||||
| | | reference power for |
|
||||
| | | Spans/delta_power_range_db. For example, |
|
||||
| | | if delta_power_range_db = `[0,0,0]`, the |
|
||||
| | | same power=power_dbm is launched in every |
|
||||
| | | spans. The network design is performed |
|
||||
| | | with the power_dbm value: even if a |
|
||||
| | | power sweep is defined (see after) the |
|
||||
| | | design is not repeated. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| `power_range_db` | (number) | Power sweep excursion around power_dbm. |
|
||||
| | | It is not the min and max channel power |
|
||||
| | | values! The reference power becomes: |
|
||||
| | | power_range_db + power_dbm. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
|
||||
The `transmission_main_example.py <examples/transmission_main_example.py>`_
|
||||
script propagates a spectrum of channels at 32 Gbaud, 50 GHz spacing and 0
|
||||
dBm/channel. These are not yet parametrized but can be modified directly in the
|
||||
script (via the SpectralInformation structure) to accommodate any baud rate,
|
||||
spacing, power or channel count demand.
|
||||
|
||||
Use `examples/path_requests_run.py <examples/path_requests_run.py>`_ to run multiple optimizations as follows:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ python path_requests_run.py -h
|
||||
Usage: path_requests_run.py [-h] [-v] [-o OUTPUT] [network_filename] [service_filename] [eqpt_filename]
|
||||
|
||||
The `network_filename` and `service_filename` can be an XLS or JSON file. The `eqpt_filename` must be a JSON file.
|
||||
|
||||
To see an example of it, run:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd examples
|
||||
$ python path_requests_run.py meshTopologyExampleV2.xls meshTopologyExampleV2_services.json eqpt_config.json -o output_file.json
|
||||
|
||||
This program requires a list of connections to be estimated and the equipment
|
||||
library. The program computes performances for the list of services (accepts
|
||||
json or excel format) using the same spectrum propagation modules as
|
||||
transmission_main_example.py. Explanation on the Excel template is provided in
|
||||
the `Excel_userguide.rst <Excel_userguide.rst#service-sheet>`_. Template for
|
||||
the json format can be found here: `service-template.json
|
||||
<service-template.json>`_.
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
``gnpy`` is looking for additional contributors, especially those with experience
|
||||
planning and maintaining large-scale, real-world mesh optical networks.
|
||||
|
||||
To get involved, please contact James Powell
|
||||
<james.powell@telecominfraproject.com> or Gert Grammel <ggrammel@juniper.net>.
|
||||
|
||||
``gnpy`` contributions are currently limited to members of `TIP
|
||||
<http://telecominfraproject.com>`_. Membership is free and open to all.
|
||||
|
||||
See the `Onboarding Guide
|
||||
<https://github.com/Telecominfraproject/gnpy/wiki/Onboarding-Guide>`_ for
|
||||
specific details on code contributions.
|
||||
|
||||
See `AUTHORS.rst <AUTHORS.rst>`_ for past and present contributors.
|
||||
|
||||
Project Background
|
||||
------------------
|
||||
|
||||
Data Centers are built upon interchangeable, highly standardized node and
|
||||
network architectures rather than a sum of isolated solutions. This also
|
||||
translates to optical networking. It leads to a push in enabling multi-vendor
|
||||
optical network by disaggregating HW and SW functions and focusing on
|
||||
interoperability. In this paradigm, the burden of responsibility for ensuring
|
||||
the performance of such disaggregated open optical systems falls on the
|
||||
operators. Consequently, operators and vendors are collaborating in defining
|
||||
control models that can be readily used by off-the-shelf controllers. However,
|
||||
node and network models are only part of the answer. To take reasonable
|
||||
decisions, controllers need to incorporate logic to simulate and assess optical
|
||||
performance. Hence, a vendor-independent optical quality estimator is required.
|
||||
Given its vendor-agnostic nature, such an estimator needs to be driven by a
|
||||
consortium of operators, system and component suppliers.
|
||||
|
||||
Founded in February 2016, the Telecom Infra Project (TIP) is an
|
||||
engineering-focused initiative which is operator driven, but features
|
||||
collaboration across operators, suppliers, developers, integrators, and
|
||||
startups with the goal of disaggregating the traditional network deployment
|
||||
approach. The group’s ultimate goal is to help provide better connectivity for
|
||||
communities all over the world as more people come on-line and demand more
|
||||
bandwidth- intensive experiences like video, virtual reality and augmented
|
||||
reality.
|
||||
|
||||
Within TIP, the Open Optical Packet Transport (OOPT) project group is chartered
|
||||
with unbundling monolithic packet-optical network technologies in order to
|
||||
unlock innovation and support new, more flexible connectivity paradigms.
|
||||
|
||||
The key to unbundling is the ability to accurately plan and predict the
|
||||
performance of optical line systems based on an accurate simulation of optical
|
||||
parameters. Under that OOPT umbrella, the Physical Simulation Environment (PSE)
|
||||
working group set out to disrupt the planning landscape by providing an open
|
||||
source simulation model which can be used freely across multiple vendor
|
||||
implementations.
|
||||
|
||||
.. |docs| image:: https://readthedocs.org/projects/gnpy/badge/?version=develop
|
||||
:target: http://gnpy.readthedocs.io/en/develop/?badge=develop
|
||||
:alt: Documentation Status
|
||||
:scale: 100%
|
||||
|
||||
.. image:: https://pyup.io/repos/github/<TBD>/gnpy/shield.svg
|
||||
:target: https://pyup.io/repos/github/<TBD>/gnpy/
|
||||
:alt: Updates
|
||||
.. |build| image:: https://travis-ci.com/Telecominfraproject/oopt-gnpy.svg?branch=develop
|
||||
:target: https://travis-ci.com/Telecominfraproject/oopt-gnpy
|
||||
:alt: Build Status
|
||||
:scale: 100%
|
||||
|
||||
TIP OOPT/PSE & PSE WG Charter
|
||||
-----------------------------
|
||||
|
||||
Gaussian Noise (GN) modeling library
|
||||
We believe that openly sharing ideas, specifications, and other intellectual
|
||||
property is the key to maximizing innovation and reducing complexity
|
||||
|
||||
TIP OOPT/PSE's goal is to build an end-to-end simulation environment which
|
||||
defines the network models of the optical device transfer functions and their
|
||||
parameters. This environment will provide validation of the optical
|
||||
performance requirements for the TIP OLS building blocks.
|
||||
|
||||
* Free software: BSD license
|
||||
* Documentation: https://gnpy.readthedocs.io.
|
||||
- The model may be approximate or complete depending on the network complexity.
|
||||
Each model shall be validated against the proposed network scenario.
|
||||
- The environment must be able to process network models from multiple vendors,
|
||||
and also allow users to pick any implementation in an open source framework.
|
||||
- The PSE will influence and benefit from the innovation of the DTC, API, and
|
||||
OLS working groups.
|
||||
- The PSE represents a step along the journey towards multi-layer optimization.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* TODO
|
||||
``gnpy`` is distributed under a standard BSD 3-Clause License.
|
||||
|
||||
See `LICENSE <LICENSE>`__ for more details.
|
||||
|
||||
3
docs/.gitignore
vendored
3
docs/.gitignore
vendored
@@ -1,3 +0,0 @@
|
||||
/gnpy.rst
|
||||
/gnpy.*.rst
|
||||
/modules.rst
|
||||
179
docs/Makefile
179
docs/Makefile
@@ -1,177 +1,20 @@
|
||||
# Makefile for Sphinx documentation
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
SPHINXBUILD = python -msphinx
|
||||
SPHINXPROJ = GNpy
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
# User-friendly check for sphinx-build
|
||||
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
|
||||
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
|
||||
endif
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " xml to make Docutils-native XML files"
|
||||
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
.PHONY: help Makefile
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/gnpy.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/gnpy.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/gnpy"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/gnpy"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
latexpdfja:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through platex and dvipdfmx..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
||||
xml:
|
||||
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
|
||||
@echo
|
||||
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
|
||||
|
||||
pseudoxml:
|
||||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
||||
@echo
|
||||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
@@ -1 +0,0 @@
|
||||
.. include:: ../AUTHORS.rst
|
||||
1850
docs/biblio.bib
Normal file
1850
docs/biblio.bib
Normal file
File diff suppressed because it is too large
Load Diff
256
docs/conf.py
Executable file → Normal file
256
docs/conf.py
Executable file → Normal file
@@ -1,8 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# gnpy documentation build configuration file, created by
|
||||
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
|
||||
# sphinx-quickstart on Mon Dec 18 14:41:01 2017.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
@@ -13,263 +13,165 @@
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.abspath('../'))
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another
|
||||
# directory, add these directories to sys.path here. If the directory is
|
||||
# relative to the documentation root, use os.path.abspath to make it
|
||||
# absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# Get the project root dir, which is the parent dir of this
|
||||
cwd = os.getcwd()
|
||||
project_root = os.path.dirname(cwd)
|
||||
|
||||
# Insert the project root dir as the first element in the PYTHONPATH.
|
||||
# This lets us ensure that the source package is imported, and that its
|
||||
# version is used.
|
||||
sys.path.insert(0, project_root)
|
||||
|
||||
import gnpy
|
||||
|
||||
# -- General configuration ---------------------------------------------
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = ['sphinx.ext.autodoc',
|
||||
'sphinx.ext.mathjax',
|
||||
'sphinx.ext.githubpages','sphinxcontrib.bibtex']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
source_suffix = ['.rst', '.md']
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'gnpy'
|
||||
copyright = u"2017, <TBD>"
|
||||
project = 'gnpy'
|
||||
copyright = '2018, Telecom InfraProject - OOPT PSE Group'
|
||||
author = 'Telecom InfraProject - OOPT PSE Group'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement
|
||||
# for |version| and |release|, also used in various other places throughout
|
||||
# the built documents.
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = gnpy.__version__
|
||||
version = '0.1'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = gnpy.__version__
|
||||
release = '0.1'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to
|
||||
# some non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built
|
||||
# documents.
|
||||
#keep_warnings = False
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
on_rtd = os.environ.get('READTHEDOCS') == 'True'
|
||||
if on_rtd:
|
||||
html_theme = 'default'
|
||||
else:
|
||||
html_theme = 'alabaster'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a
|
||||
# theme further. For a list of options available for each theme, see the
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as
|
||||
# html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the
|
||||
# top of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon
|
||||
# of the docs. This file should be a Windows icon file (.ico) being
|
||||
# 16x16 or 32x32 pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets)
|
||||
# here, relative to this directory. They are copied after the builtin
|
||||
# static files, so a file named "default.css" will overwrite the builtin
|
||||
# "default.css".
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page
|
||||
# bottom, using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names
|
||||
# Custom sidebar templates, must be a dictionary that maps document names
|
||||
# to template names.
|
||||
#html_additional_pages = {}
|
||||
#
|
||||
# This is required for the alabaster theme
|
||||
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
|
||||
html_sidebars = {
|
||||
'**': [
|
||||
'about.html',
|
||||
'navigation.html',
|
||||
'relations.html', # needs 'show_related': True theme option to display
|
||||
'searchbox.html',
|
||||
'donate.html',
|
||||
]
|
||||
}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer.
|
||||
# Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer.
|
||||
# Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages
|
||||
# will contain a <link> tag referring to it. The value of this option
|
||||
# must be the base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
# -- Options for HTMLHelp output ------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'gnpydoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ------------------------------------------
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#
|
||||
# 'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#
|
||||
# 'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
('index', 'gnpy.tex',
|
||||
u'gnpy Documentation',
|
||||
u'<TBD>', 'manual'),
|
||||
(master_doc, 'gnpy.tex', 'gnpy Documentation',
|
||||
'Telecom InfraProject - OOPT PSE Group', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at
|
||||
# the top of the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings
|
||||
# are parts, not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ------------------------------------
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'gnpy',
|
||||
u'gnpy Documentation',
|
||||
[u'<TBD>'], 1)
|
||||
(master_doc, 'gnpy', 'gnpy Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output ----------------------------------------
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'gnpy',
|
||||
u'gnpy Documentation',
|
||||
u'<TBD>',
|
||||
'gnpy',
|
||||
'One line description of project.',
|
||||
(master_doc, 'gnpy', 'gnpy Documentation',
|
||||
author, 'gnpy', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
.. include:: ../CONTRIBUTING.rst
|
||||
@@ -1 +0,0 @@
|
||||
.. include:: ../HISTORY.rst
|
||||
@@ -1,18 +1,33 @@
|
||||
Welcome to gnpy's documentation!
|
||||
======================================
|
||||
.. gnpy documentation master file, created by
|
||||
sphinx-quickstart on Mon Dec 18 14:41:01 2017.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Contents:
|
||||
Welcome to gnpy's documentation!
|
||||
================================
|
||||
|
||||
**gnpy is an open-source, community-developed library for building route planning
|
||||
and optimization tools in real-world mesh optical networks.**
|
||||
|
||||
`gnpy <http://github.com/telecominfraproject/gnpy>`_ is:
|
||||
|
||||
- a sponsored project of the `OOPT/PSE <http://telecominfraproject.com/project-groups-2/backhaul-projects/open-optical-packet-transport/>`_ working group of the `Telecom Infra Project <http://telecominfraproject.com>`_.
|
||||
- fully community-driven, fully open source library
|
||||
- driven by a consortium of operators, vendors, and academic researchers
|
||||
- intended for rapid development of production-grade route planning tools
|
||||
- easily extensible to include custom network elements
|
||||
- performant to the scale of real-world mesh optical networks
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
The following pages are meant to describe specific implementation details and
|
||||
modeling assumptions behind gnpy.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
readme
|
||||
installation
|
||||
usage
|
||||
modules
|
||||
contributing
|
||||
authors
|
||||
history
|
||||
model
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
@@ -20,3 +35,58 @@ Indices and tables
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
||||
Contributors in alphabetical order
|
||||
==================================
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Name | Surname | Affiliation | Contact |
|
||||
+==========+============+=======================+======================================+
|
||||
| Alessio | Ferrari | Politecnico di Torino | alessio.ferrari@polito.it |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Anders | Lindgren | Telia Company | Anders.X.Lindgren@teliacompany.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Andrea | d'Amico | Politecnico di Torino | andrea.damico@polito.it |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Brian | Taylor | Facebook | briantaylor@fb.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| David | Boertjes | Ciena | dboertje@ciena.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Esther | Le Rouzic | Orange | esther.lerouzic@orange.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Gabriele | Galimberti | Cisco | ggalimbe@cisco.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Gert | Grammel | Juniper Networks | ggrammel@juniper.net |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Gilad | Goldfarb | Facebook | giladg@fb.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| James | Powell | Telecom Infra Project | james.powell@telecominfraproject.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Jeanluc | Auge | Orange | jeanluc.auge@orange.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Jonas | Martensson | RISE Research Sweden | jonas.martensson@ri.se |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Mattia | Cantono | Politecnico di Torino | mattia.cantono@polito.it |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Miguel | Garrich | University Catalunya | miquel.garrich@upct.es |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Stefan | Melin | Telia Company | Stefan.Melin@teliacompany.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Raj | Nagarajan | Lumentum | raj.nagarajan@lumentum.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Vittorio | Curri | Politecnico di Torino | vittorio.curri@polito.it |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
|
||||
--------------
|
||||
|
||||
- Goal is to build an end-to-end simulation environment which defines the
|
||||
network models of the optical device transfer functions and their parameters.
|
||||
This environment will provide validation of the optical performance
|
||||
requirements for the TIP OLS building blocks.
|
||||
- The model may be approximate or complete depending on the network complexity.
|
||||
Each model shall be validated against the proposed network scenario.
|
||||
- The environment must be able to process network models from multiple vendors,
|
||||
and also allow users to pick any implementation in an open source framework.
|
||||
- The PSE will influence and benefit from the innovation of the DTC, API, and
|
||||
OLS working groups.
|
||||
- The PSE represents a step along the journey towards multi-layer optimization.
|
||||
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
.. highlight:: shell
|
||||
|
||||
============
|
||||
Installation
|
||||
============
|
||||
|
||||
|
||||
Stable release
|
||||
--------------
|
||||
|
||||
To install gnpy, run this command in your terminal:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ pip install gnpy
|
||||
|
||||
This is the preferred method to install gnpy, as it will always install the most recent stable release.
|
||||
|
||||
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
|
||||
you through the process.
|
||||
|
||||
.. _pip: https://pip.pypa.io
|
||||
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
|
||||
|
||||
|
||||
From sources
|
||||
------------
|
||||
|
||||
The sources for gnpy can be downloaded from the `Github repo`_.
|
||||
|
||||
You can either clone the public repository:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git clone git://github.com/<TBD>/gnpy
|
||||
|
||||
Or download the `tarball`_:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ curl -OL https://github.com/<TBD>/gnpy/tarball/master
|
||||
|
||||
Once you have a copy of the source, you can install it with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python setup.py install
|
||||
|
||||
|
||||
.. _Github repo: https://github.com/<TBD>/gnpy
|
||||
.. _tarball: https://github.com/<TBD>/gnpy/tarball/master
|
||||
234
docs/make.bat
234
docs/make.bat
@@ -1,242 +1,36 @@
|
||||
@ECHO OFF
|
||||
|
||||
pushd %~dp0
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
set SPHINXBUILD=python -msphinx
|
||||
)
|
||||
set SOURCEDIR=.
|
||||
set BUILDDIR=_build
|
||||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
|
||||
set I18NSPHINXOPTS=%SPHINXOPTS% .
|
||||
if NOT "%PAPER%" == "" (
|
||||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
||||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
|
||||
)
|
||||
set SPHINXPROJ=GNpy
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
if "%1" == "help" (
|
||||
:help
|
||||
echo.Please use `make ^<target^>` where ^<target^> is one of
|
||||
echo. html to make standalone HTML files
|
||||
echo. dirhtml to make HTML files named index.html in directories
|
||||
echo. singlehtml to make a single large HTML file
|
||||
echo. pickle to make pickle files
|
||||
echo. json to make JSON files
|
||||
echo. htmlhelp to make HTML files and a HTML help project
|
||||
echo. qthelp to make HTML files and a qthelp project
|
||||
echo. devhelp to make HTML files and a Devhelp project
|
||||
echo. epub to make an epub
|
||||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
||||
echo. text to make text files
|
||||
echo. man to make manual pages
|
||||
echo. texinfo to make Texinfo files
|
||||
echo. gettext to make PO message catalogs
|
||||
echo. changes to make an overview over all changed/added/deprecated items
|
||||
echo. xml to make Docutils-native XML files
|
||||
echo. pseudoxml to make pseudoxml-XML files for display purposes
|
||||
echo. linkcheck to check all external links for integrity
|
||||
echo. doctest to run all doctests embedded in the documentation if enabled
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "clean" (
|
||||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
||||
del /q /s %BUILDDIR%\*
|
||||
goto end
|
||||
)
|
||||
|
||||
|
||||
%SPHINXBUILD% 2> nul
|
||||
%SPHINXBUILD% >NUL 2>NUL
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.The Sphinx module was not found. Make sure you have Sphinx installed,
|
||||
echo.then set the SPHINXBUILD environment variable to point to the full
|
||||
echo.path of the 'sphinx-build' executable. Alternatively you may add the
|
||||
echo.Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.http://sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
if "%1" == "html" (
|
||||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
||||
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "dirhtml" (
|
||||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "singlehtml" (
|
||||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pickle" (
|
||||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the pickle files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "json" (
|
||||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the JSON files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "htmlhelp" (
|
||||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run HTML Help Workshop with the ^
|
||||
.hhp project file in %BUILDDIR%/htmlhelp.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "qthelp" (
|
||||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
||||
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
||||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\gnpy.qhcp
|
||||
echo.To view the help file:
|
||||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\gnpy.ghc
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "devhelp" (
|
||||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "epub" (
|
||||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latex" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdf" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf
|
||||
cd %BUILDDIR%/..
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdfja" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf-ja
|
||||
cd %BUILDDIR%/..
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "text" (
|
||||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The text files are in %BUILDDIR%/text.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "man" (
|
||||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "texinfo" (
|
||||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "gettext" (
|
||||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "changes" (
|
||||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.The overview file is in %BUILDDIR%/changes.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "linkcheck" (
|
||||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Link check complete; look for any errors in the above output ^
|
||||
or in %BUILDDIR%/linkcheck/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "doctest" (
|
||||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of doctests in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/doctest/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "xml" (
|
||||
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The XML files are in %BUILDDIR%/xml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pseudoxml" (
|
||||
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
|
||||
goto end
|
||||
)
|
||||
:help
|
||||
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
|
||||
|
||||
:end
|
||||
popd
|
||||
|
||||
146
docs/model.rst
Normal file
146
docs/model.rst
Normal file
@@ -0,0 +1,146 @@
|
||||
The QoT estimation in the PSE framework of TIP-OOPT
|
||||
=======================================================
|
||||
|
||||
QoT-E including ASE noise and NLI accumulation
|
||||
----------------------------------------------
|
||||
|
||||
The operations of PSE simulative framework are based on the capability to
|
||||
estimate the QoT of one or more channels operating lightpaths over a given
|
||||
network route. For backbone transport networks, we can suppose that
|
||||
transceivers are operating polarization-division-multiplexed multilevel
|
||||
modulation formats with DSP-based coherent receivers, including equalization.
|
||||
For the optical links, we focus on state-of-the-art amplified and uncompensated
|
||||
fiber links, connecting network nodes including ROADMs, where add and drop
|
||||
operations on data traffic are performed. In such a transmission scenario, it
|
||||
is well accepted
|
||||
:cite:`vacondio_nonlinear_2012,bononi_modeling_2012,carena_modeling_2012,mecozzi_nonlinear_2012,secondini_analytical_2012,johannisson_perturbation_2013,dar_properties_2013,serena_alternative_2013,secondini_achievable_2013,poggiolini_gn-model_2014,dar_accumulation_2014,poggiolini_analytical_2011,savory_approximations_2013,bononi_single-_2013,johannisson_modeling_2014`
|
||||
to assume that transmission performances are limited by the amplified
|
||||
spontaneous emission (ASE) noise generated by optical amplifiers and and
|
||||
by nonlinear propagation effects: accumulation of a Gaussian disturbance
|
||||
defined as nonlinear interference (NLI) and generation of phase noise.
|
||||
State-of-the-art DSP in commercial transceivers are typically able to
|
||||
compensate for most of the phase noise through carrier-phase estimator
|
||||
(CPE) algorithms, for modulation formats with cardinality up to 16, per
|
||||
polarization state
|
||||
:cite:`poggiolini_recent_2017,schmidt_experimental_2015,fehenberger_experimental_2016`.
|
||||
So, for backbone networks covering medium-to-wide geographical areas, we
|
||||
can suppose that propagation is limited by the accumulation of two
|
||||
Gaussian disturbances: the ASE noise and the NLI. Additional impairments
|
||||
such as filtering effects introduced by ROADMs can be considered as
|
||||
additional equivalent power penalties depending on the ratio between the
|
||||
channel bandwidth and the ROADMs filters and the number of traversed
|
||||
ROADMs (hops) of the route under analysis. Modeling the two major
|
||||
sources of impairments as Gaussian disturbances, and being the receivers
|
||||
*coherent*, the unique QoT parameter determining the bit error rate
|
||||
(BER) for the considered transmission scenario is the generalized
|
||||
signal-to-noise ratio (SNR) defined as
|
||||
|
||||
.. math::
|
||||
|
||||
{\text{SNR}}= L_F \frac{P_{\text{ch}}}{P_{\text{ASE}}+P_{\text{NLI}}} = L_F \left(\frac{1}{{\text{SNR}}_{\text{LIN}}}+\frac{1}{{\text{SNR}}_{\text{NL}}}\right)^{-1}
|
||||
|
||||
|
||||
where :math:`P_{\text{ch}}` is the channel power,
|
||||
:math:`P_{\text{ASE}}` and :math:`P_{\text{NLI}}` are the power levels of the disturbances
|
||||
in the channel bandwidth for ASE noise and NLI, respectively.
|
||||
:math:`L_F` is a parameter assuming values smaller or equal than one
|
||||
that summarizes the equivalent power penalty loss such as
|
||||
filtering effects. Note that for state-of-the art equipment, filtering
|
||||
effects can be typically neglected over routes with few hops
|
||||
:cite:`rahman_mitigation_2014,foggi_overcoming_2015`.
|
||||
|
||||
To properly estimate :math:`P_{\text{ch}}` and :math:`P_{\text{ASE}}`
|
||||
the transmitted power at the beginning of the considered route must be
|
||||
known, and losses and amplifiers gain and noise figure, including their
|
||||
variation with frequency, must be characterized. So, the evaluation of
|
||||
:math:`{\text{SNR}}_{\text{LIN}}` *just* requires an accurate
|
||||
knowledge of equipment, which is not a trivial aspect, but it is not
|
||||
related to physical-model issues. For the evaluation of the NLI, several
|
||||
models have been proposed and validated in the technical literature
|
||||
:cite:`vacondio_nonlinear_2012,bononi_modeling_2012,carena_modeling_2012,mecozzi_nonlinear_2012,secondini_analytical_2012,johannisson_perturbation_2013,dar_properties_2013,serena_alternative_2013,secondini_achievable_2013,poggiolini_gn-model_2014,dar_accumulation_2014,poggiolini_analytical_2011,savory_approximations_2013,bononi_single-_2013,johannisson_modeling_2014`.
|
||||
The decision about which model to test within the PSE activities was
|
||||
driven by requirements of the entire PSE framework:
|
||||
|
||||
i. the model must be *local*, i.e., related individually to each network
|
||||
element (i.e. fiber span) generating NLI, independently of preceding and
|
||||
subsequent elements; and ii. the related computational time must be compatible
|
||||
with interactive operations.
|
||||
|
||||
So, the choice fell on the Gaussian Noise
|
||||
(GN) model with incoherent accumulation of NLI over fiber spans
|
||||
:cite:`poggiolini_gn-model_2014`. We implemented both the
|
||||
exact GN-model evaluation of NLI based on a double integral (Eq. (11) of
|
||||
:cite:`poggiolini_gn-model_2014`) and its analytical
|
||||
approximation (Eq. (120-121) of
|
||||
:cite:`poggiolini_analytical_2011`). We performed several
|
||||
validation analyses comparing results of the two implementations with
|
||||
split-step simulations over wide bandwidths
|
||||
:cite:`pilori_ffss_2017`, and results clearly showed that
|
||||
for fiber types with chromatic dispersion roughly larger than 4
|
||||
ps/nm/km, the analytical approximation ensures an excellent accuracy
|
||||
with a computational time compatible with real-time operations.
|
||||
|
||||
The Gaussian Noise Model to evaluate the NLI
|
||||
--------------------------------------------
|
||||
|
||||
As previously stated, fiber propagation of multilevel modulation formats
|
||||
relying on the polarization-division-multiplexing generates impairments that
|
||||
can be summarized as a disturbance called nonlinear interference (NLI), when
|
||||
exploiting a DSP-based coherent receiver, as in all state-of-the-art equipment.
|
||||
From a practical point of view, the NLI can be modeled as an additive Gaussian
|
||||
random process added by each fiber span, and whose strength depends on the cube
|
||||
of the input power spectral density and on the fiber-span parameters.
|
||||
|
||||
Since the introduction in the market in 2007 of the first transponder based on
|
||||
such a transmission technique, the scientific community has intensively worked
|
||||
to define the propagation behavior of such a trasnmission technique. First,
|
||||
the role of in-line chromatic dispersion compensation has been investigated,
|
||||
deducing that besides being not essential, it is indeed detrimental for
|
||||
performances :cite:`curri_dispersion_2008`. Then, it has been observed that
|
||||
the fiber propagation impairments are practically summarized by the sole NLI,
|
||||
being all the other phenomena compensated for by the blind equalizer
|
||||
implemented in the receiver DSP :cite:`carena_statistical_2010`. Once these
|
||||
assessments have been accepted by the community, several prestigious research
|
||||
groups have started to work on deriving analytical models able to estimating
|
||||
the NLI accumulation, and consequentially the generalized SNR that sets the
|
||||
BER, according to the transponder BER vs. SNR performance. Many models
|
||||
delivering different levels of accuracy have been developed and validated. As
|
||||
previously clarified, for the purposes of the PSE framework, the GN-model with
|
||||
incoherent accumulation of NLI over fiber spans has been selected as adequate.
|
||||
The reason for such a choice is first such a model being a "local" model, so
|
||||
related to each fiber spans, independently of the preceding and succeeding
|
||||
network elements. The other model characteristic driving the choice is the
|
||||
availability of a closed form for the model, so permitting a real-time
|
||||
evaluation, as required by the PSE framework. For a detailed derivation of the
|
||||
model, please refer to :cite:`poggiolini_analytical_2011`, while a qualitative
|
||||
description can be summarized as in the following. The GN-model assumes that
|
||||
the channel comb propagating in the fiber is well approximated by unpolarized
|
||||
spectrally shaped Gaussian noise. In such a scenario, supposing to rely - as in
|
||||
state-of-the-art equipment - on a receiver entirely compensating for linear
|
||||
propagation effects, propagation in the fiber only excites the four-wave mixing
|
||||
(FWM) process among the continuity of the tones occupying the bandwidth. Such a
|
||||
FWM generates an unpolarized complex Gaussian disturbance in each spectral slot
|
||||
that can be easily evaluated extending the FWM theory from a set of discrete
|
||||
tones - the standard FWM theory introduced back in the 90s by Inoue
|
||||
:cite:`Innoue-FWM`- to a continuity of tones, possibly spectrally shaped.
|
||||
Signals propagating in the fiber are not equivalent to Gaussian noise, but
|
||||
thanks to the absence of in-line compensation for choromatic dispersion, the
|
||||
become so, over short distances. So, the Gaussian noise model with incoherent
|
||||
accumulation of NLI has estensively proved to be a quick yet accurate and
|
||||
conservative tool to estimate propagation impairments of fiber propagation.
|
||||
Note that the GN-model has not been derived with the aim of an *exact*
|
||||
performance estimation, but to pursue a conservative performance prediction.
|
||||
So, considering these characteristics, and the fact that the NLI is always a
|
||||
secondary effect with respect to the ASE noise accumulation, and - most
|
||||
importantly - that typically linear propagation parameters (losses, gains and
|
||||
noise figures) are known within a variation range, a QoT estimator based on the
|
||||
GN model is adequate to deliver performance predictions in terms of a
|
||||
reasonable SNR range, rather than an exact value. As final remark, it must be
|
||||
clarified that the GN-model is adequate to be used when relying on a relatively
|
||||
narrow bandwidth up to few THz. When exceeding such a bandwidth occupation, the
|
||||
GN-model must be generalized introducing the interaction with the Stimulated
|
||||
Raman Scattering in order to give a proper estimation for all channels
|
||||
:cite:`cantono2018modeling`. This will be the main upgrade required within the
|
||||
PSE framework.
|
||||
|
||||
.. bibliography:: biblio.bib
|
||||
@@ -1 +0,0 @@
|
||||
.. include:: ../README.rst
|
||||
70
docs/source/gnpy.core.rst
Normal file
70
docs/source/gnpy.core.rst
Normal file
@@ -0,0 +1,70 @@
|
||||
gnpy\.core package
|
||||
==================
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
gnpy\.core\.elements module
|
||||
---------------------------
|
||||
|
||||
.. automodule:: gnpy.core.elements
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.execute module
|
||||
--------------------------
|
||||
|
||||
.. automodule:: gnpy.core.execute
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.info module
|
||||
-----------------------
|
||||
|
||||
.. automodule:: gnpy.core.info
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.network module
|
||||
--------------------------
|
||||
|
||||
.. automodule:: gnpy.core.network
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.node module
|
||||
-----------------------
|
||||
|
||||
.. automodule:: gnpy.core.node
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.units module
|
||||
------------------------
|
||||
|
||||
.. automodule:: gnpy.core.units
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.utils module
|
||||
------------------------
|
||||
|
||||
.. automodule:: gnpy.core.utils
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: gnpy.core
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
17
docs/source/gnpy.rst
Normal file
17
docs/source/gnpy.rst
Normal file
@@ -0,0 +1,17 @@
|
||||
gnpy package
|
||||
============
|
||||
|
||||
Subpackages
|
||||
-----------
|
||||
|
||||
.. toctree::
|
||||
|
||||
gnpy.core
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: gnpy
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
7
docs/source/modules.rst
Normal file
7
docs/source/modules.rst
Normal file
@@ -0,0 +1,7 @@
|
||||
gnpy
|
||||
====
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
gnpy
|
||||
@@ -1,7 +0,0 @@
|
||||
=====
|
||||
Usage
|
||||
=====
|
||||
|
||||
To use gnpy in a project::
|
||||
|
||||
import gnpy
|
||||
10278
examples/CORONET_Global_Topology.json
Normal file
10278
examples/CORONET_Global_Topology.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
examples/CORONET_Global_Topology.xls
Normal file
BIN
examples/CORONET_Global_Topology.xls
Normal file
Binary file not shown.
0
examples/__init__.py
Normal file
0
examples/__init__.py
Normal file
103
examples/create_eqpt_sheet.py
Normal file
103
examples/create_eqpt_sheet.py
Normal file
@@ -0,0 +1,103 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
create_eqpt_sheet.py
|
||||
====================
|
||||
|
||||
XLS parser that can be called to create a "City" column in the "Eqpt" sheet.
|
||||
|
||||
If not present in the "Nodes" sheet, the "Type" column will be implicitly
|
||||
determined based on the topology.
|
||||
"""
|
||||
|
||||
from sys import exit
|
||||
try:
|
||||
from xlrd import open_workbook
|
||||
except ModuleNotFoundError:
|
||||
exit('Required: `pip install xlrd`')
|
||||
from argparse import ArgumentParser
|
||||
from collections import namedtuple, defaultdict
|
||||
|
||||
|
||||
Shortlink = namedtuple('Link', 'src dest')
|
||||
|
||||
Shortnode = namedtuple('Node', 'nodename eqt')
|
||||
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('workbook', nargs='?', default='meshTopologyExampleV2.xls',
|
||||
help = 'create the mandatory columns in Eqpt sheet ')
|
||||
all_rows = lambda sh, start=0: (sh.row(x) for x in range(start, sh.nrows))
|
||||
|
||||
def read_excel(input_filename):
|
||||
with open_workbook(input_filename) as wb:
|
||||
# reading Links sheet
|
||||
links_sheet = wb.sheet_by_name('Links')
|
||||
links = []
|
||||
nodeoccuranceinlinks = []
|
||||
links_by_src = defaultdict(list)
|
||||
links_by_dest = defaultdict(list)
|
||||
for row in all_rows(links_sheet, start=5):
|
||||
links.append(Shortlink(row[0].value,row[1].value))
|
||||
links_by_src[row[0].value].append(Shortnode(row[1].value,''))
|
||||
links_by_dest[row[1].value].append(Shortnode(row[0].value,''))
|
||||
#print(f'source {links[len(links)-1].src} dest {links[len(links)-1].dest}')
|
||||
nodeoccuranceinlinks.append(row[0].value)
|
||||
nodeoccuranceinlinks.append(row[1].value)
|
||||
|
||||
# reading Nodes sheet
|
||||
nodes_sheet = wb.sheet_by_name('Nodes')
|
||||
nodes = []
|
||||
node_degree = []
|
||||
for row in all_rows(nodes_sheet, start=5) :
|
||||
|
||||
temp_eqt = row[6].value
|
||||
# verify node degree to confirm eqt type
|
||||
node_degree.append(nodeoccuranceinlinks.count(row[0].value))
|
||||
if temp_eqt.lower() == 'ila' and nodeoccuranceinlinks.count(row[0].value) !=2 :
|
||||
print(f'Inconsistancy: node {nodes[len(nodes)-1]} has degree \
|
||||
{node_degree[len(nodes)-1]} and can not be an ILA ... replaced by ROADM')
|
||||
temp_eqt = 'ROADM'
|
||||
if temp_eqt == '' and nodeoccuranceinlinks.count(row[0].value) == 2 :
|
||||
temp_eqt = 'ILA'
|
||||
if temp_eqt == '' and nodeoccuranceinlinks.count(row[0].value) != 2 :
|
||||
temp_eqt = 'ROADM'
|
||||
# print(f'node {nodes[len(nodes)-1]} eqt {temp_eqt}')
|
||||
nodes.append(Shortnode(row[0].value,temp_eqt))
|
||||
# print(len(nodes)-1)
|
||||
print(f'reading: node {nodes[len(nodes)-1].nodename} eqpt {temp_eqt}')
|
||||
return links,nodes, links_by_src , links_by_dest
|
||||
|
||||
def create_eqt_template(links,nodes, links_by_src , links_by_dest, input_filename):
|
||||
output_filename = f'{input_filename[:-4]}_eqpt_sheet.txt'
|
||||
with open(output_filename, 'w', encoding='utf-8') as my_file:
|
||||
# print header similar to excel
|
||||
my_file.write('OPTIONAL\n\n\n\
|
||||
\t\tNode a egress amp (from a to z)\t\t\t\t\tNode a ingress amp (from z to a) \
|
||||
\nNode A \tNode Z \tamp type \tatt_in \tamp gain \ttilt \tatt_out\
|
||||
amp type \tatt_in \tamp gain \ttilt \tatt_out\n')
|
||||
|
||||
tab = []
|
||||
temp = []
|
||||
i = 0
|
||||
for lk in links:
|
||||
if [e for n,e in nodes if n==lk.src][0] != 'FUSED' :
|
||||
temp = [lk.src , lk.dest]
|
||||
tab.append(temp)
|
||||
my_file.write(f'{temp[0]}\t{temp[1]}\n')
|
||||
for n in nodes :
|
||||
if n.eqt.lower() == 'roadm' :
|
||||
for src in links_by_dest[n.nodename] :
|
||||
temp = [n.nodename , src.nodename]
|
||||
tab.append(temp)
|
||||
# print(temp)
|
||||
my_file.write(f'{temp[0]}\t{temp[1]}\n')
|
||||
i = i + 1
|
||||
print(f'File {output_filename} successfully created with Node A - Node Z ' +
|
||||
' entries for Eqpt sheet in excel file.')
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
input_filename = args.workbook
|
||||
links,nodes,links_by_src, links_by_dest = read_excel(input_filename)
|
||||
create_eqt_template(links,nodes, links_by_src , links_by_dest , input_filename)
|
||||
296
examples/default_edfa_config.json
Normal file
296
examples/default_edfa_config.json
Normal file
@@ -0,0 +1,296 @@
|
||||
{
|
||||
"nf_ripple": [
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0
|
||||
],
|
||||
"gain_ripple": [
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0
|
||||
],
|
||||
"dgt": [
|
||||
2.714526681131686,
|
||||
2.705443819238505,
|
||||
2.6947834587664494,
|
||||
2.6841217449620203,
|
||||
2.6681935771243177,
|
||||
2.6521732021128046,
|
||||
2.630396440815385,
|
||||
2.602860350286428,
|
||||
2.5696460593920065,
|
||||
2.5364027376452056,
|
||||
2.499446286796604,
|
||||
2.4587748041127506,
|
||||
2.414398437185221,
|
||||
2.3699990328716107,
|
||||
2.322373696229342,
|
||||
2.271520771371253,
|
||||
2.2174389328192197,
|
||||
2.16337565384239,
|
||||
2.1183028432496016,
|
||||
2.082225099873648,
|
||||
2.055100772005235,
|
||||
2.0279625371819305,
|
||||
2.0008103857988204,
|
||||
1.9736443063300082,
|
||||
1.9482128147680253,
|
||||
1.9245345552113182,
|
||||
1.9026104247588487,
|
||||
1.8806927939516411,
|
||||
1.862235672444246,
|
||||
1.847275503201129,
|
||||
1.835814081380705,
|
||||
1.824381436842932,
|
||||
1.8139629377087627,
|
||||
1.8045606557581335,
|
||||
1.7961751115773796,
|
||||
1.7877868031023945,
|
||||
1.7793941781790852,
|
||||
1.7709972329654864,
|
||||
1.7625959636196327,
|
||||
1.7541903672600494,
|
||||
1.7459181197626403,
|
||||
1.737780757913635,
|
||||
1.7297783508684146,
|
||||
1.7217732861435076,
|
||||
1.7137640932265894,
|
||||
1.7057507692361864,
|
||||
1.6918150918099673,
|
||||
1.6719047669939942,
|
||||
1.6460167077689267,
|
||||
1.6201194134191075,
|
||||
1.5986915141218316,
|
||||
1.5817353179379183,
|
||||
1.569199764184379,
|
||||
1.5566577309558969,
|
||||
1.545374152761467,
|
||||
1.5353620432989845,
|
||||
1.5266220576235803,
|
||||
1.5178910621476225,
|
||||
1.5097346239790443,
|
||||
1.502153039909686,
|
||||
1.495145456062699,
|
||||
1.488134243479226,
|
||||
1.48111939735681,
|
||||
1.474100442252211,
|
||||
1.4670307626366115,
|
||||
1.4599103316162523,
|
||||
1.45273959485914,
|
||||
1.445565137158368,
|
||||
1.4340878115214444,
|
||||
1.418273806730323,
|
||||
1.3981208704326855,
|
||||
1.3779439775587023,
|
||||
1.3598972673004606,
|
||||
1.3439818461440451,
|
||||
1.3301807335621048,
|
||||
1.316383926863083,
|
||||
1.3040618749785347,
|
||||
1.2932153453410835,
|
||||
1.2838336236692311,
|
||||
1.2744470198196236,
|
||||
1.2650555289898042,
|
||||
1.2556591482982988,
|
||||
1.2428104897182262,
|
||||
1.2264996957264114,
|
||||
1.2067249615595257,
|
||||
1.1869318618366975,
|
||||
1.1672278304018044,
|
||||
1.1476135933863398,
|
||||
1.1280891949729075,
|
||||
1.108555289615659,
|
||||
1.0895983485572227,
|
||||
1.0712204022764056,
|
||||
1.0534217504465226,
|
||||
1.0356155337864215,
|
||||
1.017807767853702,
|
||||
1.0
|
||||
]
|
||||
}
|
||||
80
examples/edfa_example_network.json
Normal file
80
examples/edfa_example_network.json
Normal file
@@ -0,0 +1,80 @@
|
||||
{
|
||||
"network_name": "EDFA Example Network - P2P",
|
||||
"elements": [{
|
||||
"uid": "Site_A",
|
||||
"type": "Transceiver",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site A",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Span1",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 80,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"region": "",
|
||||
"latitude": 1,
|
||||
"longitude": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Edfa1",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_low_gain",
|
||||
"operational": {
|
||||
"gain_target": 17,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"region": "",
|
||||
"latitude": 2,
|
||||
"longitude": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Site_B",
|
||||
"type": "Transceiver",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site B",
|
||||
"region": "",
|
||||
"latitude": 2,
|
||||
"longitude": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
],
|
||||
"connections": [{
|
||||
"from_node": "Site_A",
|
||||
"to_node": "Span1"
|
||||
},
|
||||
{
|
||||
"from_node": "Span1",
|
||||
"to_node": "Edfa1"
|
||||
},
|
||||
{
|
||||
"from_node": "Edfa1",
|
||||
"to_node": "Site_B"
|
||||
}
|
||||
|
||||
]
|
||||
}
|
||||
1
examples/edfa_model/DFG_96.txt
Normal file
1
examples/edfa_model/DFG_96.txt
Normal file
@@ -0,0 +1 @@
|
||||
2.5135969849999999e+01 2.5118228139999999e+01 2.5095421330000001e+01 2.5062457710000000e+01 2.5026027650000000e+01 2.4996379529999999e+01 2.4981672549999999e+01 2.4975306679999999e+01 2.4983207260000000e+01 2.4997185649999999e+01 2.5017572470000001e+01 2.5038327809999998e+01 2.5054955849999999e+01 2.5067071899999998e+01 2.5070914110000000e+01 2.5070943650000000e+01 2.5071143240000001e+01 2.5075336270000001e+01 2.5087310179999999e+01 2.5103139360000000e+01 2.5122762040000001e+01 2.5142394790000001e+01 2.5159456330000001e+01 2.5173927039999999e+01 2.5176737670000001e+01 2.5170371410000001e+01 2.5152162539999999e+01 2.5131143099999999e+01 2.5108023350000000e+01 2.5085487770000000e+01 2.5069166750000001e+01 2.5058481759999999e+01 2.5054473130000002e+01 2.5051544410000002e+01 2.5049460589999999e+01 2.5047178490000000e+01 2.5045516559999999e+01 2.5044676490000001e+01 2.5040729200000001e+01 2.5032854080000000e+01 2.5023488300000000e+01 2.5016592339999999e+01 2.5013321359999999e+01 2.5011234340000001e+01 2.5010300149999999e+01 2.5009365480000000e+01 2.5008739640000002e+01 2.5008425350000000e+01 2.5006964660000001e+01 2.5004043100000001e+01 2.5000709980000000e+01 2.4998423200000001e+01 2.4993063320000001e+01 2.4983524209999999e+01 2.4971251030000001e+01 2.4960381080000001e+01 2.4948887209999999e+01 2.4935314890000001e+01 2.4921319270000001e+01 2.4908986970000001e+01 2.4898965140000001e+01 2.4889584630000002e+01 2.4880838700000002e+01 2.4872100920000001e+01 2.4864620259999999e+01 2.4858397730000000e+01 2.4854458380000001e+01 2.4851554430000000e+01 2.4851766009999999e+01 2.4854080140000001e+01 2.4859096240000000e+01 2.4864744580000000e+01 2.4872034859999999e+01 2.4880365200000000e+01 2.4889106689999998e+01 2.4897213130000001e+01 2.4902826040000001e+01 2.4906566900000001e+01 2.4908650800000000e+01 2.4910939440000000e+01 2.4913430790000000e+01 2.4915923440000000e+01 2.4921553509999999e+01 2.4930318610000000e+01 2.4940528120000000e+01 2.4949046689999999e+01 2.4957571229999999e+01 2.4967818449999999e+01 2.4981800929999999e+01 2.4997826860000000e+01 2.5013931830000001e+01 2.5028098459999999e+01 2.5040325750000001e+01 2.5052569810000001e+01 2.5064797009999999e+01 2.5077046970000001e+01
|
||||
1
examples/edfa_model/DGT_96.txt
Normal file
1
examples/edfa_model/DGT_96.txt
Normal file
@@ -0,0 +1 @@
|
||||
2.7145266811316859e+00 2.7054438192385049e+00 2.6947834587664494e+00 2.6841217449620203e+00 2.6681935771243177e+00 2.6521732021128046e+00 2.6303964408153848e+00 2.6028603502864280e+00 2.5696460593920065e+00 2.5364027376452056e+00 2.4994462867966041e+00 2.4587748041127506e+00 2.4143984371852212e+00 2.3699990328716107e+00 2.3223736962293420e+00 2.2715207713712529e+00 2.2174389328192197e+00 2.1633756538423898e+00 2.1183028432496016e+00 2.0822250998736478e+00 2.0551007720052352e+00 2.0279625371819305e+00 2.0008103857988204e+00 1.9736443063300082e+00 1.9482128147680253e+00 1.9245345552113182e+00 1.9026104247588487e+00 1.8806927939516411e+00 1.8622356724442459e+00 1.8472755032011290e+00 1.8358140813807049e+00 1.8243814368429321e+00 1.8139629377087627e+00 1.8045606557581335e+00 1.7961751115773796e+00 1.7877868031023945e+00 1.7793941781790852e+00 1.7709972329654864e+00 1.7625959636196327e+00 1.7541903672600494e+00 1.7459181197626403e+00 1.7377807579136351e+00 1.7297783508684146e+00 1.7217732861435076e+00 1.7137640932265894e+00 1.7057507692361864e+00 1.6918150918099673e+00 1.6719047669939942e+00 1.6460167077689267e+00 1.6201194134191075e+00 1.5986915141218316e+00 1.5817353179379183e+00 1.5691997641843789e+00 1.5566577309558969e+00 1.5453741527614671e+00 1.5353620432989845e+00 1.5266220576235803e+00 1.5178910621476225e+00 1.5097346239790443e+00 1.5021530399096861e+00 1.4951454560626991e+00 1.4881342434792260e+00 1.4811193973568100e+00 1.4741004422522110e+00 1.4670307626366115e+00 1.4599103316162523e+00 1.4527395948591399e+00 1.4455651371583680e+00 1.4340878115214444e+00 1.4182738067303231e+00 1.3981208704326855e+00 1.3779439775587023e+00 1.3598972673004606e+00 1.3439818461440451e+00 1.3301807335621048e+00 1.3163839268630830e+00 1.3040618749785347e+00 1.2932153453410835e+00 1.2838336236692311e+00 1.2744470198196236e+00 1.2650555289898042e+00 1.2556591482982988e+00 1.2428104897182262e+00 1.2264996957264114e+00 1.2067249615595257e+00 1.1869318618366975e+00 1.1672278304018044e+00 1.1476135933863398e+00 1.1280891949729075e+00 1.1085552896156590e+00 1.0895983485572227e+00 1.0712204022764056e+00 1.0534217504465226e+00 1.0356155337864215e+00 1.0178077678537021e+00 1.0000000000000000e+00
|
||||
1
examples/edfa_model/NFR_96.txt
Normal file
1
examples/edfa_model/NFR_96.txt
Normal file
@@ -0,0 +1 @@
|
||||
-3.1537433199999998e-01 -3.1537433199999998e-01 -3.1540091571002721e-01 -3.1849146117510951e-01 -3.2158358425400546e-01 -3.2467728615499991e-01 -3.2762368641496226e-01 -3.2054138461232762e-01 -3.1345546385118733e-01 -3.0636592135697482e-01 -2.9920267890990127e-01 -2.7061972852631744e-01 -2.4202215770774693e-01 -2.1340995523361256e-01 -1.8478227130158695e-01 -1.4809761118389625e-01 -1.1139416731807622e-01 -7.4671925273579881e-02 -3.8026748965679924e-02 -1.9958469399422092e-02 -1.8809287980157928e-03 1.6205879960573561e-02 3.4301964005709673e-02 5.2407330474054062e-02 7.0521986509597359e-02 7.9578036683472006e-02 8.8546647361909522e-02 9.7519863231965306e-02 1.0649768784154924e-01 9.7741380449907406e-02 8.8803437172660038e-02 7.9860899732845866e-02 7.0913764587403796e-02 6.3335892740565308e-02 5.5756212252058776e-02 4.8172631747863209e-02 4.0585148217162359e-02 3.3381591675710129e-02 2.6178308595650738e-02 1.8971315351761126e-02 1.1760609076833628e-02 1.6950294922759991e-02 2.2274991357701439e-02 2.7602433189104329e-02 3.2932622540790261e-02 3.8265561538776145e-02 4.3601252311271169e-02 3.4856990743481552e-02 2.5991055149117932e-02 1.7120541224980364e-02 8.2757587359203223e-03 1.9423214065246042e-03 -4.3943890171043590e-03 -1.0734375072893196e-02 -1.7077639301414434e-02 -2.4679702899572852e-02 -3.2297970403821680e-02 -3.9920180090477250e-02 -4.7534566327530239e-02 -4.9234003141433724e-02 -5.0934320036547187e-02 -5.2635517696692252e-02 -5.4337596806402461e-02 -5.6040558050919301e-02 -5.7718452237076875e-02 -5.6840590379175944e-02 -5.5962273198734966e-02 -5.5083500341416583e-02 -5.4204271452516814e-02 -5.8396088726955113e-02 -6.2627330169715334e-02 -6.6860769089203700e-02 -7.0901736256069450e-02 -5.2096097309052243e-02 -3.3280684121412940e-02 -1.4455489070928059e-02 4.3150387579057158e-03 1.4839202394482527e-02 2.5368841662503576e-02 3.5903960836465652e-02 4.6444564195321399e-02 5.6990656022467459e-02 6.7542240605774059e-02 1.0002709623672751e-01 1.3258013095133617e-01 1.6515013362773309e-01 1.9773711753599391e-01 2.3194802687829724e-01 2.6618779883837107e-01 3.0044543658085349e-01 3.3472095409250663e-01 3.5929034770587287e-01 3.8384389188855605e-01 4.0841026111391787e-01 4.3298946543290784e-01 4.3298946543290784e-01
|
||||
5
examples/edfa_model/OA.json
Normal file
5
examples/edfa_model/OA.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"nf_ripple": "NFR0_96.txt",
|
||||
"gain_ripple": "DFG0_96.txt",
|
||||
"dgt": "DGT_96.txt"
|
||||
}
|
||||
8
examples/edfa_model/Pchan2D.txt
Normal file
8
examples/edfa_model/Pchan2D.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
-1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01
|
||||
-2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02
|
||||
-1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01 -2.0000000000000000e+01
|
||||
-2.0500000000000000e+01 -2.0489473680000000e+01 -2.0478947370000000e+01 -2.0468421050000000e+01 -2.0457894740000000e+01 -2.0447368420000000e+01 -2.0436842110000001e+01 -2.0426315790000000e+01 -2.0415789470000000e+01 -2.0405263160000001e+01 -2.0394736840000000e+01 -2.0384210530000001e+01 -2.0373684210000000e+01 -2.0363157890000000e+01 -2.0352631580000001e+01 -2.0342105260000000e+01 -2.0331578950000001e+01 -2.0321052630000001e+01 -2.0310526320000001e+01 -2.0300000000000001e+01 -2.0289473680000000e+01 -2.0278947370000001e+01 -2.0268421050000001e+01 -2.0257894740000001e+01 -2.0247368420000001e+01 -2.0236842110000001e+01 -2.0226315790000001e+01 -2.0215789470000001e+01 -2.0205263160000001e+01 -2.0194736840000001e+01 -2.0184210530000001e+01 -2.0173684210000001e+01 -2.0163157890000001e+01 -2.0152631580000001e+01 -2.0142105260000001e+01 -2.0131578950000002e+01 -2.0121052630000001e+01 -2.0110526320000002e+01 -2.0100000000000001e+01 -2.0089473680000001e+01 -2.0078947370000002e+01 -2.0068421050000001e+01 -2.0057894739999998e+01 -2.0047368420000002e+01 -2.0036842109999998e+01 -2.0026315790000002e+01 -2.0015789470000001e+01 -2.0005263159999998e+01 -1.9994736840000002e+01 -1.9984210529999999e+01 -1.9973684209999998e+01 -1.9963157890000002e+01 -1.9952631579999998e+01 -1.9942105260000002e+01 -1.9931578949999999e+01 -1.9921052629999998e+01 -1.9910526319999999e+01 -1.9899999999999999e+01 -1.9889473679999998e+01 -1.9878947369999999e+01 -1.9868421049999998e+01 -1.9857894739999999e+01 -1.9847368419999999e+01 -1.9836842109999999e+01 -1.9826315789999999e+01 -1.9815789469999999e+01 -1.9805263159999999e+01 -1.9794736839999999e+01 -1.9784210529999999e+01 -1.9773684209999999e+01 -1.9763157889999999e+01 -1.9752631579999999e+01 -1.9742105259999999e+01 -1.9731578949999999e+01 -1.9721052629999999e+01 -1.9710526320000000e+01 -1.9699999999999999e+01 -1.9689473679999999e+01 -1.9678947369999999e+01 -1.9668421049999999e+01 -1.9657894740000000e+01 -1.9647368419999999e+01 -1.9636842110000000e+01 -1.9626315790000000e+01 -1.9615789469999999e+01 -1.9605263160000000e+01 -1.9594736839999999e+01 -1.9584210530000000e+01 -1.9573684210000000e+01 -1.9563157889999999e+01 -1.9552631580000000e+01 -1.9542105260000000e+01 -1.9531578950000000e+01 -1.9521052630000000e+01 -1.9510526320000000e+01 -1.9500000000000000e+01
|
||||
-2.0500000000000000e+01 -2.0489473680000000e+01 -2.0478947370000000e+01 -2.0468421050000000e+01 -2.0457894740000000e+01 -2.0447368420000000e+01 -2.0436842110000001e+01 -2.0426315790000000e+01 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.9573684210000000e+01 -1.9563157889999999e+01 -1.9552631580000000e+01 -1.9542105260000000e+01 -1.9531578950000000e+01 -1.9521052630000000e+01 -1.9510526320000000e+01 -1.9500000000000000e+01
|
||||
-1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.4460000000000001e+01
|
||||
-1.4460000000000001e+01 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01 -1.4460000000000001e+01
|
||||
-1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.4460000000000001e+01 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02 -1.0000000000000000e+02
|
||||
8
examples/edfa_model/Pchan2DLegend.txt
Normal file
8
examples/edfa_model/Pchan2DLegend.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
7.0000000000000000e+01 1.1700000000000000e+02 1.0800000000000000e+02 1.0800000000000000e+02 3.2000000000000000e+01 7.0000000000000000e+01 1.0800000000000000e+02 9.7000000000000000e+01 1.1600000000000000e+02 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01
|
||||
7.9000000000000000e+01 1.1000000000000000e+02 1.0100000000000000e+02 3.2000000000000000e+01 7.1000000000000000e+01 1.1400000000000000e+02 1.1100000000000000e+02 1.1700000000000000e+02 1.1200000000000000e+02 3.2000000000000000e+01 6.6000000000000000e+01 1.0800000000000000e+02 1.1700000000000000e+02 1.0100000000000000e+02 3.2000000000000000e+01
|
||||
7.9000000000000000e+01 1.1000000000000000e+02 1.0100000000000000e+02 3.2000000000000000e+01 7.1000000000000000e+01 1.1400000000000000e+02 1.1100000000000000e+02 1.1700000000000000e+02 1.1200000000000000e+02 3.2000000000000000e+01 8.2000000000000000e+01 1.0100000000000000e+02 1.0000000000000000e+02 3.2000000000000000e+01 3.2000000000000000e+01
|
||||
7.0000000000000000e+01 1.1700000000000000e+02 1.0800000000000000e+02 1.0800000000000000e+02 3.2000000000000000e+01 1.1900000000000000e+02 3.2000000000000000e+01 8.3000000000000000e+01 8.2000000000000000e+01 8.3000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01
|
||||
6.6000000000000000e+01 1.1100000000000000e+02 1.1600000000000000e+02 1.0400000000000000e+02 3.2000000000000000e+01 6.9000000000000000e+01 1.1000000000000000e+02 1.0000000000000000e+02 1.1500000000000000e+02 3.2000000000000000e+01 1.1900000000000000e+02 3.2000000000000000e+01 8.3000000000000000e+01 8.2000000000000000e+01 8.3000000000000000e+01
|
||||
1.0400000000000000e+02 1.0100000000000000e+02 9.7000000000000000e+01 1.1800000000000000e+02 1.2100000000000000e+02 3.2000000000000000e+01 9.8000000000000000e+01 1.0800000000000000e+02 1.1700000000000000e+02 1.0100000000000000e+02 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01
|
||||
1.0400000000000000e+02 1.0100000000000000e+02 9.7000000000000000e+01 1.1800000000000000e+02 1.2100000000000000e+02 3.2000000000000000e+01 1.1400000000000000e+02 1.0100000000000000e+02 1.0000000000000000e+02 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01
|
||||
1.1900000000000000e+02 1.1100000000000000e+02 1.1400000000000000e+02 1.1500000000000000e+02 1.1600000000000000e+02 3.2000000000000000e+01 9.9000000000000000e+01 9.7000000000000000e+01 1.1500000000000000e+02 1.0100000000000000e+02 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01 3.2000000000000000e+01
|
||||
301
examples/edfa_model/amplifier.py
Normal file
301
examples/edfa_model/amplifier.py
Normal file
@@ -0,0 +1,301 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon Nov 27 12:32:04 2017
|
||||
|
||||
@author: briantaylor
|
||||
"""
|
||||
import numpy as np
|
||||
from numpy import polyfit, polyval, mean
|
||||
from utilities import lin2db, db2lin, itufs, freq2wavelength
|
||||
import matplotlib.pyplot as plt
|
||||
from scipy.constants import h
|
||||
|
||||
|
||||
def noise_profile(nf, gain, ffs, df):
|
||||
""" noise_profile(nf, gain, ffs, df) computes amplifier ase
|
||||
|
||||
:param nf: Noise figure in dB
|
||||
:param gain: Actual gain calculated for the EDFA in dB units
|
||||
:param ffs: A numpy array of frequencies
|
||||
:param df: the reference bw in THz
|
||||
:type nf: numpy.ndarray
|
||||
:type gain: numpy.ndarray
|
||||
:type ffs: numpy.ndarray
|
||||
:type df: float
|
||||
:return: the asepower in dBm
|
||||
:rtype: numpy.ndarray
|
||||
|
||||
ASE POWER USING PER CHANNEL GAIN PROFILE
|
||||
INPUTS:
|
||||
NF_dB - Noise figure in dB, vector of length number of channels or
|
||||
spectral slices
|
||||
G_dB - Actual gain calculated for the EDFA, vector of length number of
|
||||
channels or spectral slices
|
||||
ffs - Center frequency grid of the channels or spectral slices in THz,
|
||||
vector of length number of channels or spectral slices
|
||||
dF - width of each channel or spectral slice in THz,
|
||||
vector of length number of channels or spectral slices
|
||||
OUTPUT:
|
||||
ase_dBm - ase in dBm per channel or spectral slice
|
||||
NOTE: the output is the total ASE in the channel or spectral slice. For
|
||||
50GHz channels the ASE BW is effectively 0.4nm. To get to noise power in
|
||||
0.1nm, subtract 6dB.
|
||||
|
||||
ONSR is usually quoted as channel power divided by
|
||||
the ASE power in 0.1nm RBW, regardless of the width of the actual
|
||||
channel. This is a historical convention from the days when optical
|
||||
signals were much smaller (155Mbps, 2.5Gbps, ... 10Gbps) than the
|
||||
resolution of the OSAs that were used to measure spectral power which
|
||||
were set to 0.1nm resolution for convenience. Moving forward into
|
||||
flexible grid and high baud rate signals, it may be convenient to begin
|
||||
quoting power spectral density in the same BW for both signal and ASE,
|
||||
e.g. 12.5GHz."""
|
||||
|
||||
h_mWThz = 1e-3 * h * (1e14)**2
|
||||
nf_lin = db2lin(nf)
|
||||
g_lin = db2lin(gain)
|
||||
ase = h_mWThz * df * ffs * (nf_lin * g_lin - 1)
|
||||
asedb = lin2db(ase)
|
||||
|
||||
return asedb
|
||||
|
||||
|
||||
def gain_profile(dfg, dgt, Pin, gp, gtp):
|
||||
"""
|
||||
:param dfg: design flat gain
|
||||
:param dgt: design gain tilt
|
||||
:param Pin: channing input power profile
|
||||
:param gp: Average gain setpoint in dB units
|
||||
:param gtp: gain tilt setting
|
||||
:type dfg: numpy.ndarray
|
||||
:type dgt: numpy.ndarray
|
||||
:type Pin: numpy.ndarray
|
||||
:type gp: float
|
||||
:type gtp: float
|
||||
:return: gain profile in dBm
|
||||
:rtype: numpy.ndarray
|
||||
|
||||
AMPLIFICATION USING INPUT PROFILE
|
||||
INPUTS:
|
||||
DFG - vector of length number of channels or spectral slices
|
||||
DGT - vector of length number of channels or spectral slices
|
||||
Pin - input powers vector of length number of channels or
|
||||
spectral slices
|
||||
Gp - provisioned gain length 1
|
||||
GTp - provisioned tilt length 1
|
||||
|
||||
OUTPUT:
|
||||
amp gain per channel or spectral slice
|
||||
NOTE: there is no checking done for violations of the total output power
|
||||
capability of the amp.
|
||||
Ported from Matlab version written by David Boerges at Ciena.
|
||||
Based on:
|
||||
R. di Muro, "The Er3+ fiber gain coefficient derived from a dynamic
|
||||
gain
|
||||
tilt technique", Journal of Lightwave Technology, Vol. 18, Iss. 3,
|
||||
Pp. 343-347, 2000.
|
||||
"""
|
||||
err_tolerance = 1.0e-11
|
||||
simple_opt = True
|
||||
|
||||
# TODO make all values linear unit and convert to dB units as needed within
|
||||
# this function.
|
||||
nchan = list(range(len(Pin)))
|
||||
|
||||
# TODO find a way to use these or lose them. Primarily we should have a
|
||||
# way to determine if exceeding the gain or output power of the amp
|
||||
tot_in_power_db = lin2db(np.sum(db2lin(Pin)))
|
||||
avg_gain_db = lin2db(mean(db2lin(dfg)))
|
||||
|
||||
# Linear fit to get the
|
||||
p = polyfit(nchan, dgt, 1)
|
||||
dgt_slope = p[0]
|
||||
|
||||
# Calculate the target slope- Currently assumes equal spaced channels
|
||||
# TODO make it so that supports arbitrary channel spacing.
|
||||
targ_slope = gtp / (len(nchan) - 1)
|
||||
|
||||
# 1st estimate of DGT scaling
|
||||
dgts1 = targ_slope / dgt_slope
|
||||
|
||||
# when simple_opt is true code makes 2 attempts to compute gain and
|
||||
# the internal voa value. This is currently here to provide direct
|
||||
# comparison with original Matlab code. Will be removed.
|
||||
# TODO replace with loop
|
||||
|
||||
if simple_opt:
|
||||
|
||||
# 1st estimate of Er gain & voa loss
|
||||
g1st = dfg + dgt * dgts1
|
||||
voa = lin2db(mean(db2lin(g1st))) - gp
|
||||
|
||||
# 2nd estimate of Amp ch gain using the channel input profile
|
||||
g2nd = g1st - voa
|
||||
pout_db = lin2db(np.sum(db2lin(Pin + g2nd)))
|
||||
dgts2 = gp - (pout_db - tot_in_power_db)
|
||||
|
||||
# Center estimate of amp ch gain
|
||||
xcent = dgts2
|
||||
gcent = g1st - voa + dgt * xcent
|
||||
pout_db = lin2db(np.sum(db2lin(Pin + gcent)))
|
||||
gavg_cent = pout_db - tot_in_power_db
|
||||
|
||||
# Lower estimate of Amp ch gain
|
||||
deltax = np.max(g1st) - np.min(g1st)
|
||||
xlow = dgts2 - deltax
|
||||
glow = g1st - voa + xlow * dgt
|
||||
pout_db = lin2db(np.sum(db2lin(Pin + glow)))
|
||||
gavg_low = pout_db - tot_in_power_db
|
||||
|
||||
# Upper gain estimate
|
||||
xhigh = dgts2 + deltax
|
||||
ghigh = g1st - voa + xhigh * dgt
|
||||
pout_db = lin2db(np.sum(db2lin(Pin + ghigh)))
|
||||
gavg_high = pout_db - tot_in_power_db
|
||||
|
||||
# compute slope
|
||||
slope1 = (gavg_low - gavg_cent) / (xlow - xcent)
|
||||
slope2 = (gavg_cent - gavg_high) / (xcent - xhigh)
|
||||
|
||||
if np.abs(gp - gavg_cent) <= err_tolerance:
|
||||
dgts3 = xcent
|
||||
elif gp < gavg_cent:
|
||||
dgts3 = xcent - (gavg_cent - gp) / slope1
|
||||
else:
|
||||
dgts3 = xcent + (-gavg_cent + gp) / slope2
|
||||
|
||||
gprofile = g1st - voa + dgt * dgts3
|
||||
else:
|
||||
gprofile = None
|
||||
|
||||
return gprofile
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
plt.close('all')
|
||||
fc = itufs(0.05)
|
||||
lc = freq2wavelength(fc) / 1000
|
||||
nchan = list(range(len(lc)))
|
||||
df = np.array([0.05] * (nchan[-1] + 1))
|
||||
# TODO remove path dependence
|
||||
path = ''
|
||||
|
||||
"""
|
||||
DFG_96: Design flat gain at each wavelength in the 96 channel 50GHz ITU
|
||||
grid in dB. This can be experimentally determined by measuring the gain
|
||||
at each wavelength using a full, flat channel (or ASE) load at the input.
|
||||
The amplifier should be set to its maximum flat gain (tilt = 0dB). This
|
||||
measurement captures the ripple of the amplifier. If the amplifier was
|
||||
designed to be mimimum ripple at some other tilt value, then the ripple
|
||||
reflected in this measurement will not be that minimum. However, when
|
||||
the DGT gets applied through the provisioning of tilt, the model should
|
||||
accurately reproduce the expected ripple at that tilt value. One could
|
||||
also do the measurement at some expected tilt value and back-calculate
|
||||
this vector using the DGT method. Alternatively, one could re-write the
|
||||
algorithm to accept a nominal tilt and a tiled version of this vector.
|
||||
"""
|
||||
|
||||
dfg_96 = np.loadtxt(path + 'DFG_96.txt')
|
||||
|
||||
"""maximum gain for flat operation - the amp in the data file was designed
|
||||
for 25dB gain and has an internal VOA for setting the external gain
|
||||
"""
|
||||
|
||||
avg_dfg = dfg_96.mean()
|
||||
|
||||
"""
|
||||
DGT_96: This is the so-called Dynamic Gain Tilt of the EDFA in dB/dB. It
|
||||
is the change in gain at each wavelength corresponding to a 1dB change at
|
||||
the longest wavelength supported. The value can be obtained
|
||||
experimentally or through analysis of the cross sections or Giles
|
||||
parameters of the Er fibre. This is experimentally measured by changing
|
||||
the gain of the amplifier above the maximum flat gain while not changing
|
||||
the internal VOA (i.e. the mid-stage VOA is set to minimum and does not
|
||||
change during the measurement). Note that the measurement can change the
|
||||
gain by an arbitrary amount and divide by the gain change (in dB) which
|
||||
is measured at the reference wavelength (the red end of the band).
|
||||
"""
|
||||
|
||||
dgt_96 = np.loadtxt(path + 'DGT_96.txt')
|
||||
|
||||
"""
|
||||
pNFfit3: Cubic polynomial fit coefficients to noise figure in dB
|
||||
averaged across wavelength as a function of gain change from design flat:
|
||||
|
||||
NFavg = pNFfit3(1)*dG^3 + pNFfit3(2)*dG^2 pNFfit3(3)*dG + pNFfit3(4)
|
||||
where
|
||||
dG = GainTarget - average(DFG_96)
|
||||
note that dG will normally be a negative value.
|
||||
"""
|
||||
|
||||
nf_fitco = np.loadtxt(path + 'pNFfit3.txt')
|
||||
|
||||
"""NFR_96: Noise figure ripple in dB away from the average noise figure
|
||||
across the band. This captures the wavelength dependence of the NF. To
|
||||
calculate the NF across channels, one uses the cubic fit coefficients
|
||||
with the external gain target to get the average nosie figure, NFavg and
|
||||
then adds this to NFR_96:
|
||||
NF_96 = NFR_96 + NFavg
|
||||
"""
|
||||
|
||||
nf_ripple = np.loadtxt(path + 'NFR_96.txt')
|
||||
|
||||
# This is an example to set the provisionable gain and gain-tilt values
|
||||
# Tilt is in units of dB/THz
|
||||
gain_target = 20.0
|
||||
tilt_target = -0.7
|
||||
|
||||
# calculate the NF for the EDFA at this gain setting
|
||||
dg = gain_target - avg_dfg
|
||||
nf_avg = polyval(nf_fitco, dg)
|
||||
nf_96 = nf_ripple + nf_avg
|
||||
|
||||
# get the input power profiles to show
|
||||
pch2d = np.loadtxt(path + 'Pchan2D.txt')
|
||||
|
||||
# Load legend and assemble legend text
|
||||
pch2d_legend_data = np.loadtxt(path + 'Pchan2DLegend.txt')
|
||||
pch2d_legend = []
|
||||
for ea in pch2d_legend_data:
|
||||
s = ''.join([chr(xx) for xx in ea.astype(dtype=int)]).strip()
|
||||
pch2d_legend.append(s)
|
||||
|
||||
# assemble plot
|
||||
axis_font = {'fontname': 'Arial', 'size': '16', 'fontweight': 'bold'}
|
||||
title_font = {'fontname': 'Arial', 'size': '17', 'fontweight': 'bold'}
|
||||
tic_font = {'fontname': 'Arial', 'size': '12'}
|
||||
|
||||
plt.rcParams["font.family"] = "Arial"
|
||||
plt.figure()
|
||||
plt.plot(nchan, pch2d.T, '.-', lw=2)
|
||||
plt.xlabel('Channel Number', **axis_font)
|
||||
plt.ylabel('Channel Power [dBm]', **axis_font)
|
||||
plt.title('Input Power Profiles for Different Channel Loading',
|
||||
**title_font)
|
||||
plt.legend(pch2d_legend, loc=5)
|
||||
plt.grid()
|
||||
plt.ylim((-100, -10))
|
||||
plt.xlim((0, 110))
|
||||
plt.xticks(np.arange(0, 100, 10), **tic_font)
|
||||
plt.yticks(np.arange(-110, -10, 10), **tic_font)
|
||||
|
||||
plt.figure()
|
||||
ea = pch2d[1, :]
|
||||
for ea in pch2d:
|
||||
chgain = gain_profile(dfg_96, dgt_96, ea, gain_target, tilt_target)
|
||||
pase = noise_profile(nf_96, chgain, fc, df)
|
||||
pout = lin2db(db2lin(ea + chgain) + db2lin(pase))
|
||||
plt.plot(nchan, pout, '.-', lw=2)
|
||||
plt.title('Output Power with ASE for Different Channel Loading',
|
||||
**title_font)
|
||||
plt.xlabel('Channel Number', **axis_font)
|
||||
plt.ylabel('Channel Power [dBm]', **axis_font)
|
||||
plt.grid()
|
||||
plt.ylim((-50, 10))
|
||||
plt.xlim((0, 100))
|
||||
plt.xticks(np.arange(0, 100, 10), **tic_font)
|
||||
plt.yticks(np.arange(-50, 10, 10), **tic_font)
|
||||
plt.legend(pch2d_legend, loc=5)
|
||||
plt.show()
|
||||
104
examples/edfa_model/build_oa_json.py
Normal file
104
examples/edfa_model/build_oa_json.py
Normal file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Jan 30 12:32:00 2018
|
||||
|
||||
@author: jeanluc-auge
|
||||
@comments about amplifier input files from Brian Taylor & Dave Boertjes
|
||||
|
||||
update an existing json file with all the 96ch txt files for a given amplifier type
|
||||
amplifier type 'OA_type1' is hard coded but can be modified and other types added
|
||||
returns an updated amplifier json file: output_json_file_name = 'edfa_config.json'
|
||||
"""
|
||||
import re
|
||||
import sys
|
||||
import json
|
||||
import numpy as np
|
||||
from gnpy.core.utils import lin2db, db2lin
|
||||
|
||||
"""amplifier file names
|
||||
convert a set of amplifier files + input json definiton file into a valid edfa_json_file:
|
||||
nf_fit_coeff: NF polynomial coefficients txt file (optional)
|
||||
nf_ripple: NF ripple excursion txt file
|
||||
dfg: gain txt file
|
||||
dgt: dynamic gain txt file
|
||||
input json file in argument (defult = 'OA.json')
|
||||
the json input file should have the following fields:
|
||||
{
|
||||
"gain_flatmax": 25,
|
||||
"gain_min": 15,
|
||||
"p_max": 21,
|
||||
"nf_fit_coeff": "pNFfit3.txt",
|
||||
"nf_ripple": "NFR_96.txt",
|
||||
"dfg": "DFG_96.txt",
|
||||
"dgt": "DGT_96.txt",
|
||||
"nf_model":
|
||||
{
|
||||
"enabled": true,
|
||||
"nf_min": 5.8,
|
||||
"nf_max": 10
|
||||
}
|
||||
}
|
||||
gain_flat = max flat gain (dB)
|
||||
gain_min = min gain (dB) : will consider an input VOA if below (TBD vs throwing an exception)
|
||||
p_max = max power (dBm)
|
||||
nf_fit = boolean (True, False) :
|
||||
if False nf_fit_coeff are ignored and nf_model fields are used
|
||||
"""
|
||||
|
||||
input_json_file_name = "OA.json" #default path
|
||||
output_json_file_name = "default_edfa_config.json"
|
||||
param_field ="params"
|
||||
gain_min_field = "gain_min"
|
||||
gain_max_field = "gain_flatmax"
|
||||
gain_ripple_field = "dfg"
|
||||
nf_ripple_field = "nf_ripple"
|
||||
nf_fit_coeff = "nf_fit_coeff"
|
||||
nf_model_field = "nf_model"
|
||||
nf_model_enabled_field = "enabled"
|
||||
nf_min_field ="nf_min"
|
||||
nf_max_field = "nf_max"
|
||||
|
||||
def read_file(field, file_name):
|
||||
"""read and format the 96 channels txt files describing the amplifier NF and ripple
|
||||
convert dfg into gain ripple by removing the mean component
|
||||
"""
|
||||
|
||||
#with open(path + file_name,'r') as this_file:
|
||||
# data = this_file.read()
|
||||
#data.strip()
|
||||
#data = re.sub(r"([0-9])([ ]{1,3})([0-9-+])",r"\1,\3",data)
|
||||
#data = list(data.split(","))
|
||||
#data = [float(x) for x in data]
|
||||
data = np.loadtxt(file_name)
|
||||
print(len(data), file_name)
|
||||
if field == gain_ripple_field or field == nf_ripple_field:
|
||||
#consider ripple excursion only to avoid redundant information
|
||||
#because the max flat_gain is already given by the 'gain_flat' field in json
|
||||
#remove the mean component
|
||||
data = data - data.mean()
|
||||
data = data.tolist()
|
||||
return data
|
||||
|
||||
def input_json(path):
|
||||
"""read the json input file and add all the 96 channels txt files
|
||||
create the output json file with output_json_file_name"""
|
||||
with open(path,'r') as edfa_json_file:
|
||||
amp_text = edfa_json_file.read()
|
||||
amp_dict = json.loads(amp_text)
|
||||
|
||||
for k, v in amp_dict.items():
|
||||
if re.search(r'.txt$',str(v)) :
|
||||
amp_dict[k] = read_file(k, v)
|
||||
|
||||
amp_text = json.dumps(amp_dict, indent=4)
|
||||
#print(amp_text)
|
||||
with open(output_json_file_name,'w') as edfa_json_file:
|
||||
edfa_json_file.write(amp_text)
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) == 2:
|
||||
path = sys.argv[1]
|
||||
else:
|
||||
path = input_json_file_name
|
||||
input_json(path)
|
||||
313
examples/edfa_model/edfa_config.json
Normal file
313
examples/edfa_model/edfa_config.json
Normal file
@@ -0,0 +1,313 @@
|
||||
{
|
||||
"params": {
|
||||
"gain_flatmax": 25,
|
||||
"gain_min": 15,
|
||||
"p_max": 21,
|
||||
"nf_fit_coeff": [
|
||||
0.000168241,
|
||||
0.0469961,
|
||||
0.0359549,
|
||||
5.82851
|
||||
],
|
||||
"nf_ripple": [
|
||||
-0.3110761646066259,
|
||||
-0.3110761646066259,
|
||||
-0.31110274831665313,
|
||||
-0.31419329378173544,
|
||||
-0.3172854168606314,
|
||||
-0.32037911876162584,
|
||||
-0.3233255190215882,
|
||||
-0.31624321721895354,
|
||||
-0.30915729645781326,
|
||||
-0.30206775396360075,
|
||||
-0.2949045115165272,
|
||||
-0.26632156113294336,
|
||||
-0.23772399031437283,
|
||||
-0.20911178784023846,
|
||||
-0.18048410390821285,
|
||||
-0.14379944379052215,
|
||||
-0.10709599992470213,
|
||||
-0.07037375788020579,
|
||||
-0.03372858157230583,
|
||||
-0.015660302006048,
|
||||
0.0024172385953583004,
|
||||
0.020504047353947653,
|
||||
0.03860013139908377,
|
||||
0.05670549786742816,
|
||||
0.07482015390297145,
|
||||
0.0838762040768461,
|
||||
0.09284481475528361,
|
||||
0.1018180306253394,
|
||||
0.11079585523492333,
|
||||
0.1020395478432815,
|
||||
0.09310160456603413,
|
||||
0.08415906712621996,
|
||||
0.07521193198077789,
|
||||
0.0676340601339394,
|
||||
0.06005437964543287,
|
||||
0.052470799141237305,
|
||||
0.044883315610536455,
|
||||
0.037679759069084225,
|
||||
0.03047647598902483,
|
||||
0.02326948274513522,
|
||||
0.01605877647020772,
|
||||
0.021248462316134083,
|
||||
0.02657315875107553,
|
||||
0.03190060058247842,
|
||||
0.03723078993416436,
|
||||
0.04256372893215024,
|
||||
0.047899419704645264,
|
||||
0.03915515813685565,
|
||||
0.030289222542492025,
|
||||
0.021418708618354456,
|
||||
0.012573926129294415,
|
||||
0.006240488799898697,
|
||||
-9.622162373026585e-05,
|
||||
-0.006436207679519103,
|
||||
-0.012779471908040341,
|
||||
-0.02038153550619876,
|
||||
-0.027999803010447587,
|
||||
-0.035622012697103154,
|
||||
-0.043236398934156144,
|
||||
-0.04493583574805963,
|
||||
-0.04663615264317309,
|
||||
-0.048337350303318156,
|
||||
-0.050039429413028365,
|
||||
-0.051742390657545205,
|
||||
-0.05342028484370278,
|
||||
-0.05254242298580185,
|
||||
-0.05166410580536087,
|
||||
-0.05078533294804249,
|
||||
-0.04990610405914272,
|
||||
-0.05409792133358102,
|
||||
-0.05832916277634124,
|
||||
-0.06256260169582961,
|
||||
-0.06660356886269536,
|
||||
-0.04779792991567815,
|
||||
-0.028982516728038848,
|
||||
-0.010157321677553965,
|
||||
0.00861320615127981,
|
||||
0.01913736978785662,
|
||||
0.029667009055877668,
|
||||
0.04020212822983975,
|
||||
0.050742731588695494,
|
||||
0.061288823415841555,
|
||||
0.07184040799914815,
|
||||
0.1043252636301016,
|
||||
0.13687829834471027,
|
||||
0.1694483010211072,
|
||||
0.202035284929368,
|
||||
0.23624619427167134,
|
||||
0.27048596623174515,
|
||||
0.30474360397422756,
|
||||
0.3390191214858807,
|
||||
0.36358851509924695,
|
||||
0.38814205928193013,
|
||||
0.41270842850729195,
|
||||
0.4372876328262819,
|
||||
0.4372876328262819
|
||||
],
|
||||
"dgt": [
|
||||
2.714526681131686,
|
||||
2.705443819238505,
|
||||
2.6947834587664494,
|
||||
2.6841217449620203,
|
||||
2.6681935771243177,
|
||||
2.6521732021128046,
|
||||
2.630396440815385,
|
||||
2.602860350286428,
|
||||
2.5696460593920065,
|
||||
2.5364027376452056,
|
||||
2.499446286796604,
|
||||
2.4587748041127506,
|
||||
2.414398437185221,
|
||||
2.3699990328716107,
|
||||
2.322373696229342,
|
||||
2.271520771371253,
|
||||
2.2174389328192197,
|
||||
2.16337565384239,
|
||||
2.1183028432496016,
|
||||
2.082225099873648,
|
||||
2.055100772005235,
|
||||
2.0279625371819305,
|
||||
2.0008103857988204,
|
||||
1.9736443063300082,
|
||||
1.9482128147680253,
|
||||
1.9245345552113182,
|
||||
1.9026104247588487,
|
||||
1.8806927939516411,
|
||||
1.862235672444246,
|
||||
1.847275503201129,
|
||||
1.835814081380705,
|
||||
1.824381436842932,
|
||||
1.8139629377087627,
|
||||
1.8045606557581335,
|
||||
1.7961751115773796,
|
||||
1.7877868031023945,
|
||||
1.7793941781790852,
|
||||
1.7709972329654864,
|
||||
1.7625959636196327,
|
||||
1.7541903672600494,
|
||||
1.7459181197626403,
|
||||
1.737780757913635,
|
||||
1.7297783508684146,
|
||||
1.7217732861435076,
|
||||
1.7137640932265894,
|
||||
1.7057507692361864,
|
||||
1.6918150918099673,
|
||||
1.6719047669939942,
|
||||
1.6460167077689267,
|
||||
1.6201194134191075,
|
||||
1.5986915141218316,
|
||||
1.5817353179379183,
|
||||
1.569199764184379,
|
||||
1.5566577309558969,
|
||||
1.545374152761467,
|
||||
1.5353620432989845,
|
||||
1.5266220576235803,
|
||||
1.5178910621476225,
|
||||
1.5097346239790443,
|
||||
1.502153039909686,
|
||||
1.495145456062699,
|
||||
1.488134243479226,
|
||||
1.48111939735681,
|
||||
1.474100442252211,
|
||||
1.4670307626366115,
|
||||
1.4599103316162523,
|
||||
1.45273959485914,
|
||||
1.445565137158368,
|
||||
1.4340878115214444,
|
||||
1.418273806730323,
|
||||
1.3981208704326855,
|
||||
1.3779439775587023,
|
||||
1.3598972673004606,
|
||||
1.3439818461440451,
|
||||
1.3301807335621048,
|
||||
1.316383926863083,
|
||||
1.3040618749785347,
|
||||
1.2932153453410835,
|
||||
1.2838336236692311,
|
||||
1.2744470198196236,
|
||||
1.2650555289898042,
|
||||
1.2556591482982988,
|
||||
1.2428104897182262,
|
||||
1.2264996957264114,
|
||||
1.2067249615595257,
|
||||
1.1869318618366975,
|
||||
1.1672278304018044,
|
||||
1.1476135933863398,
|
||||
1.1280891949729075,
|
||||
1.108555289615659,
|
||||
1.0895983485572227,
|
||||
1.0712204022764056,
|
||||
1.0534217504465226,
|
||||
1.0356155337864215,
|
||||
1.017807767853702,
|
||||
1.0
|
||||
],
|
||||
"nf_model": {
|
||||
"enabled": true,
|
||||
"nf1": 5.727887800964238,
|
||||
"nf2": 7.727887800964238,
|
||||
"delta_p": 5.238350271545567
|
||||
},
|
||||
"gain_ripple": [
|
||||
0.1359703369791596,
|
||||
0.11822862697916037,
|
||||
0.09542181697916163,
|
||||
0.06245819697916133,
|
||||
0.02602813697916062,
|
||||
-0.0036199830208403228,
|
||||
-0.018326963020840026,
|
||||
-0.0246928330208398,
|
||||
-0.016792253020838643,
|
||||
-0.0028138630208403015,
|
||||
0.017572956979162058,
|
||||
0.038328296979159404,
|
||||
0.054956336979159914,
|
||||
0.0670723869791594,
|
||||
0.07091459697916136,
|
||||
0.07094413697916124,
|
||||
0.07114372697916238,
|
||||
0.07533675697916209,
|
||||
0.08731066697916035,
|
||||
0.10313984697916112,
|
||||
0.12276252697916235,
|
||||
0.14239527697916188,
|
||||
0.15945681697916214,
|
||||
0.1739275269791598,
|
||||
0.1767381569791624,
|
||||
0.17037189697916233,
|
||||
0.15216302697916007,
|
||||
0.13114358697916018,
|
||||
0.10802383697916085,
|
||||
0.08548825697916129,
|
||||
0.06916723697916183,
|
||||
0.05848224697916038,
|
||||
0.05447361697916264,
|
||||
0.05154489697916276,
|
||||
0.04946107697915991,
|
||||
0.04717897697916129,
|
||||
0.04551704697916037,
|
||||
0.04467697697916151,
|
||||
0.04072968697916224,
|
||||
0.03285456697916089,
|
||||
0.023488786979161347,
|
||||
0.01659282697915998,
|
||||
0.013321846979160057,
|
||||
0.011234826979162449,
|
||||
0.01030063697916006,
|
||||
0.00936596697916059,
|
||||
0.00874012697916271,
|
||||
0.00842583697916055,
|
||||
0.006965146979162284,
|
||||
0.0040435869791615175,
|
||||
0.0007104669791608842,
|
||||
-0.0015763130208377163,
|
||||
-0.006936193020838033,
|
||||
-0.016475303020840215,
|
||||
-0.028748483020837767,
|
||||
-0.039618433020837784,
|
||||
-0.051112303020840244,
|
||||
-0.06468462302083822,
|
||||
-0.07868024302083754,
|
||||
-0.09101254302083817,
|
||||
-0.10103437302083762,
|
||||
-0.11041488302083735,
|
||||
-0.11916081302083725,
|
||||
-0.12789859302083784,
|
||||
-0.1353792530208402,
|
||||
-0.14160178302083892,
|
||||
-0.1455411330208385,
|
||||
-0.1484450830208388,
|
||||
-0.14823350302084037,
|
||||
-0.14591937302083835,
|
||||
-0.1409032730208395,
|
||||
-0.13525493302083902,
|
||||
-0.1279646530208396,
|
||||
-0.11963431302083904,
|
||||
-0.11089282302084058,
|
||||
-0.1027863830208382,
|
||||
-0.09717347302083823,
|
||||
-0.09343261302083761,
|
||||
-0.0913487130208388,
|
||||
-0.08906007302083907,
|
||||
-0.0865687230208394,
|
||||
-0.08407607302083875,
|
||||
-0.07844600302084004,
|
||||
-0.06968090302083851,
|
||||
-0.05947139302083926,
|
||||
-0.05095282302083959,
|
||||
-0.042428283020839785,
|
||||
-0.03218106302083967,
|
||||
-0.01819858302084043,
|
||||
-0.0021726530208390216,
|
||||
0.01393231697916164,
|
||||
0.028098946979159933,
|
||||
0.040326236979161934,
|
||||
0.05257029697916238,
|
||||
0.06479749697916048,
|
||||
0.07704745697916238
|
||||
]
|
||||
}
|
||||
}
|
||||
1
examples/edfa_model/pNFfit3.txt
Normal file
1
examples/edfa_model/pNFfit3.txt
Normal file
@@ -0,0 +1 @@
|
||||
1.6824099999999999e-04 4.6996099999999999e-02 3.5954899999999998e-02 5.8285099999999996e+00
|
||||
197
examples/eqpt_config.json
Normal file
197
examples/eqpt_config.json
Normal file
@@ -0,0 +1,197 @@
|
||||
{ "Edfa":[{
|
||||
"type_variety": "high_detail_model_example",
|
||||
"gain_flatmax": 25,
|
||||
"gain_min": 15,
|
||||
"p_max": 21,
|
||||
"advanced_config_from_json": "std_medium_gain_advanced_config.json",
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": false
|
||||
},
|
||||
{
|
||||
"type_variety": "operator_model_example",
|
||||
"type_def": "variable_gain",
|
||||
"gain_flatmax": 26,
|
||||
"gain_min": 15,
|
||||
"p_max": 23,
|
||||
"nf_min": 6,
|
||||
"nf_max": 10,
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": false
|
||||
},
|
||||
{
|
||||
"type_variety": "low_noise",
|
||||
"type_def": "openroadm",
|
||||
"gain_flatmax": 27,
|
||||
"gain_min": 12,
|
||||
"p_max": 22,
|
||||
"nf_coef": [-8.104e-4,-6.221e-2,-5.889e-1,37.62],
|
||||
"allowed_for_design": false
|
||||
},
|
||||
{
|
||||
"type_variety": "standard",
|
||||
"type_def": "openroadm",
|
||||
"gain_flatmax": 27,
|
||||
"gain_min": 12,
|
||||
"p_max": 22,
|
||||
"nf_coef": [-5.952e-4,-6.250e-2,-1.071,28.99],
|
||||
"allowed_for_design": false
|
||||
},
|
||||
{
|
||||
"type_variety": "std_medium_gain",
|
||||
"type_def": "variable_gain",
|
||||
"gain_flatmax": 26,
|
||||
"gain_min": 15,
|
||||
"p_max": 23,
|
||||
"nf_min": 6,
|
||||
"nf_max": 10,
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": true
|
||||
},
|
||||
{
|
||||
"type_variety": "std_low_gain",
|
||||
"type_def": "variable_gain",
|
||||
"gain_flatmax": 16,
|
||||
"gain_min": 8,
|
||||
"p_max": 23,
|
||||
"nf_min": 6.5,
|
||||
"nf_max": 11,
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": true
|
||||
},
|
||||
{
|
||||
"type_variety": "std_fixed_gain",
|
||||
"type_def": "fixed_gain",
|
||||
"gain_flatmax": 21,
|
||||
"gain_min": 20,
|
||||
"p_max": 21,
|
||||
"nf0": 5.5,
|
||||
"allowed_for_design": false
|
||||
}
|
||||
],
|
||||
"Fiber":[{
|
||||
"type_variety": "SSMF",
|
||||
"dispersion": 1.67e-05,
|
||||
"gamma": 0.00127
|
||||
},
|
||||
{
|
||||
"type_variety": "NZDF",
|
||||
"dispersion": 0.5e-05,
|
||||
"gamma": 0.00146
|
||||
},
|
||||
{
|
||||
"type_variety": "LOF",
|
||||
"dispersion": 2.2e-05,
|
||||
"gamma": 0.000843
|
||||
}
|
||||
],
|
||||
"Spans":[{
|
||||
"power_mode":true,
|
||||
"delta_power_range_db": [0,0,0.5],
|
||||
"max_length": 150,
|
||||
"length_units": "km",
|
||||
"max_loss": 28,
|
||||
"padding": 10,
|
||||
"EOL": 0,
|
||||
"con_in": 0,
|
||||
"con_out": 0
|
||||
}
|
||||
],
|
||||
"Roadms":[{
|
||||
"gain_mode_default_loss": 20,
|
||||
"power_mode_pout_target": -20,
|
||||
"add_drop_osnr": 38
|
||||
}],
|
||||
"SI":[{
|
||||
"f_min": 191.3e12,
|
||||
"baud_rate": 32e9,
|
||||
"f_max":195.1e12,
|
||||
"spacing": 50e9,
|
||||
"power_dbm": 0,
|
||||
"power_range_db": [0,0,0.5],
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"sys_margins": 0
|
||||
}],
|
||||
"Transceiver":[
|
||||
{
|
||||
"type_variety": "vendorA_trx-type1",
|
||||
"frequency":{
|
||||
"min": 191.35e12,
|
||||
"max": 196.1e12
|
||||
},
|
||||
"mode":[
|
||||
{
|
||||
|
||||
"format": "mode 1",
|
||||
"baud_rate": 32e9,
|
||||
"OSNR": 11,
|
||||
"bit_rate": 100e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 37.5e9,
|
||||
"cost":1
|
||||
},
|
||||
{
|
||||
"format": "mode 2",
|
||||
"baud_rate": 66e9,
|
||||
"OSNR": 15,
|
||||
"bit_rate": 200e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 75e9,
|
||||
"cost":1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type_variety": "Voyager",
|
||||
"frequency":{
|
||||
"min": 191.35e12,
|
||||
"max": 196.1e12
|
||||
},
|
||||
"mode":[
|
||||
{
|
||||
"format": "mode 1",
|
||||
"baud_rate": 32e9,
|
||||
"OSNR": 12,
|
||||
"bit_rate": 100e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 37.5e9,
|
||||
"cost":1
|
||||
},
|
||||
{
|
||||
"format": "mode 3",
|
||||
"baud_rate": 44e9,
|
||||
"OSNR": 18,
|
||||
"bit_rate": 300e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 62.5e9,
|
||||
"cost":1
|
||||
},
|
||||
{
|
||||
"format": "mode 2",
|
||||
"baud_rate": 66e9,
|
||||
"OSNR": 21,
|
||||
"bit_rate": 400e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 75e9,
|
||||
"cost":1
|
||||
},
|
||||
{
|
||||
"format": "mode 4",
|
||||
"baud_rate": 66e9,
|
||||
"OSNR": 16,
|
||||
"bit_rate": 200e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 75e9,
|
||||
"cost":1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
}
|
||||
196
examples/fused_roadm_example_network.json
Normal file
196
examples/fused_roadm_example_network.json
Normal file
@@ -0,0 +1,196 @@
|
||||
{
|
||||
"elements": [
|
||||
{
|
||||
"uid": "trx Site_A",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_A",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Transceiver"
|
||||
},
|
||||
{
|
||||
"uid": "trx Site_C",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_C",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Transceiver"
|
||||
},
|
||||
{
|
||||
"uid": "roadm Site_A",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_A",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Roadm",
|
||||
"params": {
|
||||
"loss": 17
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "roadm Site_C",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_C",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Roadm"
|
||||
},
|
||||
{
|
||||
"uid": "ingress fused spans in Site_B",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_B",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Fused",
|
||||
"params": {
|
||||
"loss": 0.5
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "egress fused spans in Site_B",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_B",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Fused"
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Site_A \u2192 Site_B)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 40.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Site_B \u2192 Site_C)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 50.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Site_B \u2192 Site_A)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 40.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Site_C \u2192 Site_B)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 50.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2
|
||||
}
|
||||
}
|
||||
],
|
||||
"connections": [
|
||||
{
|
||||
"from_node": "roadm Site_A",
|
||||
"to_node": "fiber (Site_A \u2192 Site_B)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Site_B \u2192 Site_A)-",
|
||||
"to_node": "roadm Site_A"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Site_A \u2192 Site_B)-",
|
||||
"to_node": "ingress fused spans in Site_B"
|
||||
},
|
||||
{
|
||||
"from_node": "ingress fused spans in Site_B",
|
||||
"to_node": "fiber (Site_B \u2192 Site_C)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Site_C \u2192 Site_B)-",
|
||||
"to_node": "egress fused spans in Site_B"
|
||||
},
|
||||
{
|
||||
"from_node": "egress fused spans in Site_B",
|
||||
"to_node": "fiber (Site_B \u2192 Site_A)-"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Site_C",
|
||||
"to_node": "fiber (Site_C \u2192 Site_B)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Site_B \u2192 Site_C)-",
|
||||
"to_node": "roadm Site_C"
|
||||
},
|
||||
{
|
||||
"from_node": "trx Site_A",
|
||||
"to_node": "roadm Site_A"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Site_A",
|
||||
"to_node": "trx Site_A"
|
||||
},
|
||||
{
|
||||
"from_node": "trx Site_C",
|
||||
"to_node": "roadm Site_C"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Site_C",
|
||||
"to_node": "trx Site_C"
|
||||
}
|
||||
]
|
||||
}
|
||||
838
examples/meshTopologyExampleV2.json
Normal file
838
examples/meshTopologyExampleV2.json
Normal file
@@ -0,0 +1,838 @@
|
||||
{
|
||||
"elements": [
|
||||
{
|
||||
"uid": "trx Lannion_CAS",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Lannion_CAS",
|
||||
"region": "RLD",
|
||||
"latitude": 2.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Transceiver"
|
||||
},
|
||||
{
|
||||
"uid": "trx Lorient_KMA",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Lorient_KMA",
|
||||
"region": "RLD",
|
||||
"latitude": 2.0,
|
||||
"longitude": 3.0
|
||||
}
|
||||
},
|
||||
"type": "Transceiver"
|
||||
},
|
||||
{
|
||||
"uid": "trx Vannes_KBE",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Vannes_KBE",
|
||||
"region": "RLD",
|
||||
"latitude": 2.0,
|
||||
"longitude": 4.0
|
||||
}
|
||||
},
|
||||
"type": "Transceiver"
|
||||
},
|
||||
{
|
||||
"uid": "trx Rennes_STA",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Rennes_STA",
|
||||
"region": "RLD",
|
||||
"latitude": 0.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Transceiver"
|
||||
},
|
||||
{
|
||||
"uid": "trx Brest_KLA",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Brest_KLA",
|
||||
"region": "RLD",
|
||||
"latitude": 4.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Transceiver"
|
||||
},
|
||||
{
|
||||
"uid": "roadm Lannion_CAS",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Lannion_CAS",
|
||||
"region": "RLD",
|
||||
"latitude": 2.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Roadm"
|
||||
},
|
||||
{
|
||||
"uid": "roadm Lorient_KMA",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Lorient_KMA",
|
||||
"region": "RLD",
|
||||
"latitude": 2.0,
|
||||
"longitude": 3.0
|
||||
}
|
||||
},
|
||||
"type": "Roadm"
|
||||
},
|
||||
{
|
||||
"uid": "roadm Vannes_KBE",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Vannes_KBE",
|
||||
"region": "RLD",
|
||||
"latitude": 2.0,
|
||||
"longitude": 4.0
|
||||
}
|
||||
},
|
||||
"type": "Roadm"
|
||||
},
|
||||
{
|
||||
"uid": "roadm Rennes_STA",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Rennes_STA",
|
||||
"region": "RLD",
|
||||
"latitude": 0.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Roadm"
|
||||
},
|
||||
{
|
||||
"uid": "roadm Brest_KLA",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Brest_KLA",
|
||||
"region": "RLD",
|
||||
"latitude": 4.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Roadm"
|
||||
},
|
||||
{
|
||||
"uid": "west fused spans in Corlay",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Corlay",
|
||||
"region": "RLD",
|
||||
"latitude": 2.0,
|
||||
"longitude": 1.0
|
||||
}
|
||||
},
|
||||
"type": "Fused"
|
||||
},
|
||||
{
|
||||
"uid": "west fused spans in Loudeac",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Loudeac",
|
||||
"region": "RLD",
|
||||
"latitude": 2.0,
|
||||
"longitude": 2.0
|
||||
}
|
||||
},
|
||||
"type": "Fused"
|
||||
},
|
||||
{
|
||||
"uid": "west fused spans in Morlaix",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Morlaix",
|
||||
"region": "RLD",
|
||||
"latitude": 3.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fused"
|
||||
},
|
||||
{
|
||||
"uid": "east fused spans in Corlay",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Corlay",
|
||||
"region": "RLD",
|
||||
"latitude": 2.0,
|
||||
"longitude": 1.0
|
||||
}
|
||||
},
|
||||
"type": "Fused"
|
||||
},
|
||||
{
|
||||
"uid": "east fused spans in Loudeac",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Loudeac",
|
||||
"region": "RLD",
|
||||
"latitude": 2.0,
|
||||
"longitude": 2.0
|
||||
}
|
||||
},
|
||||
"type": "Fused"
|
||||
},
|
||||
{
|
||||
"uid": "east fused spans in Morlaix",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Morlaix",
|
||||
"region": "RLD",
|
||||
"latitude": 3.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fused"
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Lannion_CAS → Corlay)-F061",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.0,
|
||||
"longitude": 0.5
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 20.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Corlay → Loudeac)-F010",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.0,
|
||||
"longitude": 1.5
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 50.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Loudeac → Lorient_KMA)-F054",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.0,
|
||||
"longitude": 2.5
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 60.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Lorient_KMA → Vannes_KBE)-F055",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.0,
|
||||
"longitude": 3.5
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 10.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Lannion_CAS → Stbrieuc)-F056",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1.5,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 60.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Stbrieuc → Rennes_STA)-F057",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 65.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Lannion_CAS → Morlaix)-F059",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.5,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 40.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Morlaix → Brest_KLA)-F060",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 3.5,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 35.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Brest_KLA → Quimper)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.5,
|
||||
"longitude": 0.5
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 75.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Quimper → Lorient_KMA)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1.5,
|
||||
"longitude": 2.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 70.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Ploermel → Vannes_KBE)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1.5,
|
||||
"longitude": 3.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 50.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Ploermel → Rennes_STA)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 1.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 55.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Corlay → Lannion_CAS)-F061",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.0,
|
||||
"longitude": 0.5
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 20.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Loudeac → Corlay)-F010",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.0,
|
||||
"longitude": 1.5
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 50.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Lorient_KMA → Loudeac)-F054",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.0,
|
||||
"longitude": 2.5
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 60.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Vannes_KBE → Lorient_KMA)-F055",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.0,
|
||||
"longitude": 3.5
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 10.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Stbrieuc → Lannion_CAS)-F056",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1.5,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 60.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Rennes_STA → Stbrieuc)-F057",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 65.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Morlaix → Lannion_CAS)-F059",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.5,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 40.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Brest_KLA → Morlaix)-F060",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 3.5,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 35.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Quimper → Brest_KLA)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2.5,
|
||||
"longitude": 0.5
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 75.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Lorient_KMA → Quimper)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1.5,
|
||||
"longitude": 2.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 70.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Vannes_KBE → Ploermel)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1.5,
|
||||
"longitude": 3.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 50.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Rennes_STA → Ploermel)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 1.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 55.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": null,
|
||||
"con_out": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"connections": [
|
||||
{
|
||||
"from_node": "roadm Lannion_CAS",
|
||||
"to_node": "fiber (Lannion_CAS → Corlay)-F061"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Corlay → Lannion_CAS)-F061",
|
||||
"to_node": "roadm Lannion_CAS"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Lannion_CAS",
|
||||
"to_node": "fiber (Lannion_CAS → Stbrieuc)-F056"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Stbrieuc → Lannion_CAS)-F056",
|
||||
"to_node": "roadm Lannion_CAS"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Lannion_CAS",
|
||||
"to_node": "fiber (Lannion_CAS → Morlaix)-F059"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Morlaix → Lannion_CAS)-F059",
|
||||
"to_node": "roadm Lannion_CAS"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Lannion_CAS → Corlay)-F061",
|
||||
"to_node": "west fused spans in Corlay"
|
||||
},
|
||||
{
|
||||
"from_node": "west fused spans in Corlay",
|
||||
"to_node": "fiber (Corlay → Loudeac)-F010"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Loudeac → Corlay)-F010",
|
||||
"to_node": "east fused spans in Corlay"
|
||||
},
|
||||
{
|
||||
"from_node": "east fused spans in Corlay",
|
||||
"to_node": "fiber (Corlay → Lannion_CAS)-F061"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Corlay → Loudeac)-F010",
|
||||
"to_node": "west fused spans in Loudeac"
|
||||
},
|
||||
{
|
||||
"from_node": "west fused spans in Loudeac",
|
||||
"to_node": "fiber (Loudeac → Lorient_KMA)-F054"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Lorient_KMA → Loudeac)-F054",
|
||||
"to_node": "east fused spans in Loudeac"
|
||||
},
|
||||
{
|
||||
"from_node": "east fused spans in Loudeac",
|
||||
"to_node": "fiber (Loudeac → Corlay)-F010"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Lorient_KMA",
|
||||
"to_node": "fiber (Lorient_KMA → Loudeac)-F054"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Loudeac → Lorient_KMA)-F054",
|
||||
"to_node": "roadm Lorient_KMA"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Lorient_KMA",
|
||||
"to_node": "fiber (Lorient_KMA → Vannes_KBE)-F055"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Vannes_KBE → Lorient_KMA)-F055",
|
||||
"to_node": "roadm Lorient_KMA"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Lorient_KMA",
|
||||
"to_node": "fiber (Lorient_KMA → Quimper)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Quimper → Lorient_KMA)-",
|
||||
"to_node": "roadm Lorient_KMA"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Vannes_KBE",
|
||||
"to_node": "fiber (Vannes_KBE → Lorient_KMA)-F055"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Lorient_KMA → Vannes_KBE)-F055",
|
||||
"to_node": "roadm Vannes_KBE"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Vannes_KBE",
|
||||
"to_node": "fiber (Vannes_KBE → Ploermel)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Ploermel → Vannes_KBE)-",
|
||||
"to_node": "roadm Vannes_KBE"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Lannion_CAS → Stbrieuc)-F056",
|
||||
"to_node": "fiber (Stbrieuc → Rennes_STA)-F057"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Rennes_STA → Stbrieuc)-F057",
|
||||
"to_node": "fiber (Stbrieuc → Lannion_CAS)-F056"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Rennes_STA",
|
||||
"to_node": "fiber (Rennes_STA → Stbrieuc)-F057"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Stbrieuc → Rennes_STA)-F057",
|
||||
"to_node": "roadm Rennes_STA"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Rennes_STA",
|
||||
"to_node": "fiber (Rennes_STA → Ploermel)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Ploermel → Rennes_STA)-",
|
||||
"to_node": "roadm Rennes_STA"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Lannion_CAS → Morlaix)-F059",
|
||||
"to_node": "west fused spans in Morlaix"
|
||||
},
|
||||
{
|
||||
"from_node": "west fused spans in Morlaix",
|
||||
"to_node": "fiber (Morlaix → Brest_KLA)-F060"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Brest_KLA → Morlaix)-F060",
|
||||
"to_node": "east fused spans in Morlaix"
|
||||
},
|
||||
{
|
||||
"from_node": "east fused spans in Morlaix",
|
||||
"to_node": "fiber (Morlaix → Lannion_CAS)-F059"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Brest_KLA",
|
||||
"to_node": "fiber (Brest_KLA → Morlaix)-F060"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Morlaix → Brest_KLA)-F060",
|
||||
"to_node": "roadm Brest_KLA"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Brest_KLA",
|
||||
"to_node": "fiber (Brest_KLA → Quimper)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Quimper → Brest_KLA)-",
|
||||
"to_node": "roadm Brest_KLA"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Brest_KLA → Quimper)-",
|
||||
"to_node": "fiber (Quimper → Lorient_KMA)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Lorient_KMA → Quimper)-",
|
||||
"to_node": "fiber (Quimper → Brest_KLA)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Vannes_KBE → Ploermel)-",
|
||||
"to_node": "fiber (Ploermel → Rennes_STA)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Rennes_STA → Ploermel)-",
|
||||
"to_node": "fiber (Ploermel → Vannes_KBE)-"
|
||||
},
|
||||
{
|
||||
"from_node": "trx Lannion_CAS",
|
||||
"to_node": "roadm Lannion_CAS"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Lannion_CAS",
|
||||
"to_node": "trx Lannion_CAS"
|
||||
},
|
||||
{
|
||||
"from_node": "trx Lorient_KMA",
|
||||
"to_node": "roadm Lorient_KMA"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Lorient_KMA",
|
||||
"to_node": "trx Lorient_KMA"
|
||||
},
|
||||
{
|
||||
"from_node": "trx Vannes_KBE",
|
||||
"to_node": "roadm Vannes_KBE"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Vannes_KBE",
|
||||
"to_node": "trx Vannes_KBE"
|
||||
},
|
||||
{
|
||||
"from_node": "trx Rennes_STA",
|
||||
"to_node": "roadm Rennes_STA"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Rennes_STA",
|
||||
"to_node": "trx Rennes_STA"
|
||||
},
|
||||
{
|
||||
"from_node": "trx Brest_KLA",
|
||||
"to_node": "roadm Brest_KLA"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Brest_KLA",
|
||||
"to_node": "trx Brest_KLA"
|
||||
}
|
||||
]
|
||||
}
|
||||
BIN
examples/meshTopologyExampleV2.xls
Normal file
BIN
examples/meshTopologyExampleV2.xls
Normal file
Binary file not shown.
307
examples/meshTopologyExampleV2_services.json
Normal file
307
examples/meshTopologyExampleV2_services.json
Normal file
@@ -0,0 +1,307 @@
|
||||
{
|
||||
"path-request": [
|
||||
{
|
||||
"request-id": "0",
|
||||
"source": "Lorient_KMA",
|
||||
"destination": "Vannes_KBE",
|
||||
"src-tp-id": "trx Lorient_KMA",
|
||||
"dst-tp-id": "trx Vannes_KBE",
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": null,
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"n": "null",
|
||||
"m": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": 80,
|
||||
"output-power": 0.0012589254117941673,
|
||||
"path_bandwidth": 100000000000.0
|
||||
}
|
||||
},
|
||||
"optimizations": {
|
||||
"explicit-route-include-objects": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "1",
|
||||
"source": "Brest_KLA",
|
||||
"destination": "Vannes_KBE",
|
||||
"src-tp-id": "trx Brest_KLA",
|
||||
"dst-tp-id": "trx Vannes_KBE",
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"n": "null",
|
||||
"m": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": 0.0012589254117941673,
|
||||
"path_bandwidth": 200000000000.0
|
||||
}
|
||||
},
|
||||
"optimizations": {
|
||||
"explicit-route-include-objects": [
|
||||
{
|
||||
"index": 0,
|
||||
"unnumbered-hop": {
|
||||
"node-id": "roadm Brest_KLA",
|
||||
"link-tp-id": "link-tp-id is not used",
|
||||
"hop-type": "loose",
|
||||
"direction": "direction is not used"
|
||||
},
|
||||
"label-hop": {
|
||||
"te-label": {
|
||||
"generic": "generic is not used",
|
||||
"direction": "direction is not used"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"index": 1,
|
||||
"unnumbered-hop": {
|
||||
"node-id": "roadm Lannion_CAS",
|
||||
"link-tp-id": "link-tp-id is not used",
|
||||
"hop-type": "loose",
|
||||
"direction": "direction is not used"
|
||||
},
|
||||
"label-hop": {
|
||||
"te-label": {
|
||||
"generic": "generic is not used",
|
||||
"direction": "direction is not used"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"index": 2,
|
||||
"unnumbered-hop": {
|
||||
"node-id": "roadm Lorient_KMA",
|
||||
"link-tp-id": "link-tp-id is not used",
|
||||
"hop-type": "loose",
|
||||
"direction": "direction is not used"
|
||||
},
|
||||
"label-hop": {
|
||||
"te-label": {
|
||||
"generic": "generic is not used",
|
||||
"direction": "direction is not used"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"index": 3,
|
||||
"unnumbered-hop": {
|
||||
"node-id": "roadm Vannes_KBE",
|
||||
"link-tp-id": "link-tp-id is not used",
|
||||
"hop-type": "loose",
|
||||
"direction": "direction is not used"
|
||||
},
|
||||
"label-hop": {
|
||||
"te-label": {
|
||||
"generic": "generic is not used",
|
||||
"direction": "direction is not used"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "3",
|
||||
"source": "Lannion_CAS",
|
||||
"destination": "Rennes_STA",
|
||||
"src-tp-id": "trx Lannion_CAS",
|
||||
"dst-tp-id": "trx Rennes_STA",
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "vendorA_trx-type1",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"n": "null",
|
||||
"m": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": null,
|
||||
"path_bandwidth": 60000000000.0
|
||||
}
|
||||
},
|
||||
"optimizations": {
|
||||
"explicit-route-include-objects": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "4",
|
||||
"source": "Rennes_STA",
|
||||
"destination": "Lannion_CAS",
|
||||
"src-tp-id": "trx Rennes_STA",
|
||||
"dst-tp-id": "trx Lannion_CAS",
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "vendorA_trx-type1",
|
||||
"trx_mode": null,
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"n": "null",
|
||||
"m": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 75000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": 0.0019952623149688794,
|
||||
"path_bandwidth": 150000000000.0
|
||||
}
|
||||
},
|
||||
"optimizations": {
|
||||
"explicit-route-include-objects": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "5",
|
||||
"source": "Rennes_STA",
|
||||
"destination": "Lannion_CAS",
|
||||
"src-tp-id": "trx Rennes_STA",
|
||||
"dst-tp-id": "trx Lannion_CAS",
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "vendorA_trx-type1",
|
||||
"trx_mode": "mode 2",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"n": "null",
|
||||
"m": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 75000000000.0,
|
||||
"max-nb-of-channel": 63,
|
||||
"output-power": 0.0019952623149688794,
|
||||
"path_bandwidth": 20000000000.0
|
||||
}
|
||||
},
|
||||
"optimizations": {
|
||||
"explicit-route-include-objects": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "6",
|
||||
"source": "Lannion_CAS",
|
||||
"destination": "Lorient_KMA",
|
||||
"src-tp-id": "trx Lannion_CAS",
|
||||
"dst-tp-id": "trx Lorient_KMA",
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"n": "null",
|
||||
"m": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": 76,
|
||||
"output-power": 0.001,
|
||||
"path_bandwidth": 300000000000.0
|
||||
}
|
||||
},
|
||||
"optimizations": {
|
||||
"explicit-route-include-objects": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "7",
|
||||
"source": "Lannion_CAS",
|
||||
"destination": "Lorient_KMA",
|
||||
"src-tp-id": "trx Lannion_CAS",
|
||||
"dst-tp-id": "trx Lorient_KMA",
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"n": "null",
|
||||
"m": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": 76,
|
||||
"output-power": 0.001,
|
||||
"path_bandwidth": 400000000000.0
|
||||
}
|
||||
},
|
||||
"optimizations": {
|
||||
"explicit-route-include-objects": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "7b",
|
||||
"source": "Lannion_CAS",
|
||||
"destination": "Lorient_KMA",
|
||||
"src-tp-id": "trx Lannion_CAS",
|
||||
"dst-tp-id": "trx Lorient_KMA",
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"n": "null",
|
||||
"m": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 75000000000.0,
|
||||
"max-nb-of-channel": 50,
|
||||
"output-power": 0.001,
|
||||
"path_bandwidth": 400000000000.0
|
||||
}
|
||||
},
|
||||
"optimizations": {
|
||||
"explicit-route-include-objects": []
|
||||
}
|
||||
}
|
||||
],
|
||||
"synchronization": [
|
||||
{
|
||||
"synchronization-id": "3",
|
||||
"svec": {
|
||||
"relaxable": "False",
|
||||
"link-diverse": "True",
|
||||
"node-diverse": "True",
|
||||
"request-id-number": [
|
||||
"3",
|
||||
"1"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"synchronization-id": "4",
|
||||
"svec": {
|
||||
"relaxable": "False",
|
||||
"link-diverse": "True",
|
||||
"node-diverse": "True",
|
||||
"request-id-number": [
|
||||
"4",
|
||||
"5"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
BIN
examples/meshTopologyToy.xls
Normal file
BIN
examples/meshTopologyToy.xls
Normal file
Binary file not shown.
BIN
examples/meshTopologyToy2.xls
Normal file
BIN
examples/meshTopologyToy2.xls
Normal file
Binary file not shown.
399
examples/path_requests_run.py
Executable file
399
examples/path_requests_run.py
Executable file
@@ -0,0 +1,399 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
path_requests_run.py
|
||||
====================
|
||||
|
||||
Reads a JSON request file in accordance with the Yang model
|
||||
for requesting path computation and returns path results in terms
|
||||
of path and feasibilty.
|
||||
|
||||
See: draft-ietf-teas-yang-path-computation-01.txt
|
||||
"""
|
||||
|
||||
from sys import exit
|
||||
from argparse import ArgumentParser
|
||||
from pathlib import Path
|
||||
from collections import namedtuple
|
||||
from logging import getLogger, basicConfig, CRITICAL, DEBUG, INFO
|
||||
from json import dumps, loads
|
||||
from networkx import (draw_networkx_nodes, draw_networkx_edges,
|
||||
draw_networkx_labels)
|
||||
from numpy import mean
|
||||
from gnpy.core.service_sheet import convert_service_sheet, Request_element, Element
|
||||
from gnpy.core.utils import load_json
|
||||
from gnpy.core.network import load_network, build_network, set_roadm_loss, save_network
|
||||
from gnpy.core.equipment import load_equipment, trx_mode_params, automatic_nch, automatic_spacing
|
||||
from gnpy.core.elements import Transceiver, Roadm, Edfa, Fused, Fiber
|
||||
from gnpy.core.utils import db2lin, lin2db
|
||||
from gnpy.core.request import (Path_request, Result_element, compute_constrained_path,
|
||||
propagate, jsontocsv, Disjunction, compute_path_dsjctn, requests_aggregation,
|
||||
propagate_and_optimize_mode)
|
||||
from copy import copy, deepcopy
|
||||
from textwrap import dedent
|
||||
from math import ceil
|
||||
import time
|
||||
|
||||
#EQPT_LIBRARY_FILENAME = Path(__file__).parent / 'eqpt_config.json'
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
parser = ArgumentParser(description = 'A function that computes performances for a list of services provided in a json file or an excel sheet.')
|
||||
parser.add_argument('network_filename', nargs='?', type = Path, default= Path(__file__).parent / 'meshTopologyExampleV2.xls')
|
||||
parser.add_argument('service_filename', nargs='?', type = Path, default= Path(__file__).parent / 'meshTopologyExampleV2.xls')
|
||||
parser.add_argument('eqpt_filename', nargs='?', type = Path, default=Path(__file__).parent / 'eqpt_config.json')
|
||||
parser.add_argument('-v', '--verbose', action='count', default=0, help='increases verbosity for each occurence')
|
||||
parser.add_argument('-o', '--output', type = Path)
|
||||
|
||||
|
||||
def requests_from_json(json_data,equipment):
|
||||
requests_list = []
|
||||
|
||||
for req in json_data['path-request']:
|
||||
# init all params from request
|
||||
params = {}
|
||||
params['request_id'] = req['request-id']
|
||||
params['source'] = req['src-tp-id']
|
||||
params['destination'] = req['dst-tp-id']
|
||||
params['trx_type'] = req['path-constraints']['te-bandwidth']['trx_type']
|
||||
params['trx_mode'] = req['path-constraints']['te-bandwidth']['trx_mode']
|
||||
params['format'] = params['trx_mode']
|
||||
nd_list = req['optimizations']['explicit-route-include-objects']
|
||||
params['nodes_list'] = [n['unnumbered-hop']['node-id'] for n in nd_list]
|
||||
params['loose_list'] = [n['unnumbered-hop']['hop-type'] for n in nd_list]
|
||||
params['spacing'] = req['path-constraints']['te-bandwidth']['spacing']
|
||||
|
||||
# recover trx physical param (baudrate, ...) from type and mode
|
||||
# in trx_mode_params optical power is read from equipment['SI']['default'] and
|
||||
# nb_channel is computed based on min max frequency and spacing
|
||||
trx_params = trx_mode_params(equipment,params['trx_type'],params['trx_mode'],True)
|
||||
params.update(trx_params)
|
||||
# print(trx_params['min_spacing'])
|
||||
# optical power might be set differently in the request. if it is indicated then the
|
||||
# params['power'] is updated
|
||||
if req['path-constraints']['te-bandwidth']['output-power']:
|
||||
params['power'] = req['path-constraints']['te-bandwidth']['output-power']
|
||||
|
||||
# same process for nb-channel
|
||||
f_min = params['f_min']
|
||||
f_max_from_si = params['f_max']
|
||||
if req['path-constraints']['te-bandwidth']['max-nb-of-channel'] is not None :
|
||||
nch = req['path-constraints']['te-bandwidth']['max-nb-of-channel']
|
||||
params['nb_channel'] = nch
|
||||
spacing = params['spacing']
|
||||
params['f_max'] = f_min + nch*spacing
|
||||
else :
|
||||
params['nb_channel'] = automatic_nch(f_min,f_max_from_si,params['spacing'])
|
||||
|
||||
consistency_check(params, f_max_from_si)
|
||||
|
||||
try :
|
||||
params['path_bandwidth'] = req['path-constraints']['te-bandwidth']['path_bandwidth']
|
||||
except KeyError:
|
||||
pass
|
||||
requests_list.append(Path_request(**params))
|
||||
return requests_list
|
||||
|
||||
def consistency_check(params, f_max_from_si):
|
||||
f_min = params['f_min']
|
||||
f_max = params['f_max']
|
||||
max_recommanded_nb_channels = automatic_nch(f_min,f_max,
|
||||
params['spacing'])
|
||||
if params['baud_rate'] is not None:
|
||||
#implicitely means that a mode is defined with min_spacing
|
||||
if params['min_spacing']>params['spacing'] :
|
||||
msg = f'Request {params["request_id"]} has spacing below transponder {params["trx_type"]}'+\
|
||||
f' {params["trx_mode"]} min spacing value {params["min_spacing"]*1e-9}GHz.\n'+\
|
||||
'Computation stopped'
|
||||
print(msg)
|
||||
logger.critical(msg)
|
||||
exit()
|
||||
if f_max>f_max_from_si:
|
||||
msg = dedent(f'''
|
||||
Requested channel number {params["nb_channel"]}, baud rate {params["baud_rate"]} GHz and requested spacing {params["spacing"]*1e-9}GHz
|
||||
is not consistent with frequency range {f_min*1e-12} THz, {f_max*1e-12} THz, min recommanded spacing {params["min_spacing"]*1e-9}GHz.
|
||||
max recommanded nb of channels is {max_recommanded_nb_channels}
|
||||
Computation stopped.''')
|
||||
logger.critical(msg)
|
||||
exit()
|
||||
|
||||
|
||||
def disjunctions_from_json(json_data):
|
||||
disjunctions_list = []
|
||||
|
||||
for snc in json_data['synchronization']:
|
||||
params = {}
|
||||
params['disjunction_id'] = snc['synchronization-id']
|
||||
params['relaxable'] = snc['svec']['relaxable']
|
||||
params['link_diverse'] = snc['svec']['link-diverse']
|
||||
params['node_diverse'] = snc['svec']['node-diverse']
|
||||
params['disjunctions_req'] = snc['svec']['request-id-number']
|
||||
disjunctions_list.append(Disjunction(**params))
|
||||
return disjunctions_list
|
||||
|
||||
|
||||
def load_requests(filename,eqpt_filename):
|
||||
if filename.suffix.lower() == '.xls':
|
||||
logger.info('Automatically converting requests from XLS to JSON')
|
||||
json_data = convert_service_sheet(filename,eqpt_filename)
|
||||
else:
|
||||
with open(filename, encoding='utf-8') as f:
|
||||
json_data = loads(f.read())
|
||||
return json_data
|
||||
|
||||
def compute_path(network, equipment, pathreqlist):
|
||||
|
||||
# This function is obsolete and not relevant with respect to network building: suggest either to correct
|
||||
# or to suppress it
|
||||
|
||||
path_res_list = []
|
||||
|
||||
for pathreq in pathreqlist:
|
||||
#need to rebuid the network for each path because the total power
|
||||
#can be different and the choice of amplifiers in autodesign is power dependant
|
||||
#but the design is the same if the total power is the same
|
||||
#TODO parametrize the total spectrum power so the same design can be shared
|
||||
p_db = lin2db(pathreq.power*1e3)
|
||||
p_total_db = p_db + lin2db(pathreq.nb_channel)
|
||||
build_network(network, equipment, p_db, p_total_db)
|
||||
pathreq.nodes_list.append(pathreq.destination)
|
||||
#we assume that the destination is a strict constraint
|
||||
pathreq.loose_list.append('strict')
|
||||
print(f'Computing path from {pathreq.source} to {pathreq.destination}')
|
||||
print(f'with path constraint: {[pathreq.source]+pathreq.nodes_list}') #adding first node to be clearer on the output
|
||||
total_path = compute_constrained_path(network, pathreq)
|
||||
print(f'Computed path (roadms):{[e.uid for e in total_path if isinstance(e, Roadm)]}\n')
|
||||
|
||||
if total_path :
|
||||
total_path = propagate(total_path,pathreq,equipment, show=False)
|
||||
else:
|
||||
total_path = []
|
||||
# we record the last tranceiver object in order to have th whole
|
||||
# information about spectrum. Important Note: since transceivers
|
||||
# attached to roadms are actually logical elements to simulate
|
||||
# performance, several demands having the same destination may use
|
||||
# the same transponder for the performance simaulation. This is why
|
||||
# we use deepcopy: to ensure each propagation is recorded and not
|
||||
# overwritten
|
||||
|
||||
path_res_list.append(deepcopy(total_path))
|
||||
return path_res_list
|
||||
|
||||
def compute_path_with_disjunction(network, equipment, pathreqlist, pathlist):
|
||||
|
||||
# use a list but a dictionnary might be helpful to find path bathsed on request_id
|
||||
# TODO change all these req, dsjct, res lists into dict !
|
||||
path_res_list = []
|
||||
|
||||
for i,pathreq in enumerate(pathreqlist):
|
||||
|
||||
# use the power specified in requests but might be different from the one specified for design
|
||||
# the power is an optional parameter for requests definition
|
||||
# if optional, use the one defines in eqt_config.json
|
||||
p_db = lin2db(pathreq.power*1e3)
|
||||
p_total_db = p_db + lin2db(pathreq.nb_channel)
|
||||
print(f'request {pathreq.request_id}')
|
||||
print(f'Computing path from {pathreq.source} to {pathreq.destination}')
|
||||
print(f'with path constraint: {[pathreq.source]+pathreq.nodes_list}') #adding first node to be clearer on the output
|
||||
|
||||
total_path = pathlist[i]
|
||||
print(f'Computed path (roadms):{[e.uid for e in total_path if isinstance(e, Roadm)]}\n')
|
||||
# for debug
|
||||
# print(f'{pathreq.baud_rate} {pathreq.power} {pathreq.spacing} {pathreq.nb_channel}')
|
||||
if total_path :
|
||||
if pathreq.baud_rate is not None:
|
||||
total_path = propagate(total_path,pathreq,equipment, show=False)
|
||||
temp_snr01nm = round(mean(total_path[-1].snr+lin2db(pathreq.baud_rate/(12.5e9))),2)
|
||||
if temp_snr01nm < pathreq.OSNR :
|
||||
msg = f'\tWarning! Request {pathreq.request_id} computed path from {pathreq.source} to {pathreq.destination} does not pass with {pathreq.tsp_mode}\n' +\
|
||||
f'\tcomputedSNR in 0.1nm = {temp_snr01nm} - required osnr {pathreq.OSNR}\n'
|
||||
print(msg)
|
||||
logger.warning(msg)
|
||||
total_path = []
|
||||
else:
|
||||
total_path,mode = propagate_and_optimize_mode(total_path,pathreq,equipment)
|
||||
# if no baudrate satisfies spacing, no mode is returned and an empty path is returned
|
||||
# a warning is shown in the propagate_and_optimize_mode
|
||||
if mode is not None :
|
||||
# propagate_and_optimize_mode function returns the mode with the highest bitrate
|
||||
# that passes. if no mode passes, then it returns an empty path
|
||||
pathreq.baud_rate = mode['baud_rate']
|
||||
pathreq.tsp_mode = mode['format']
|
||||
pathreq.format = mode['format']
|
||||
pathreq.OSNR = mode['OSNR']
|
||||
pathreq.tx_osnr = mode['tx_osnr']
|
||||
pathreq.bit_rate = mode['bit_rate']
|
||||
else :
|
||||
total_path = []
|
||||
# we record the last tranceiver object in order to have th whole
|
||||
# information about spectrum. Important Note: since transceivers
|
||||
# attached to roadms are actually logical elements to simulate
|
||||
# performance, several demands having the same destination may use
|
||||
# the same transponder for the performance simaulation. This is why
|
||||
# we use deepcopy: to ensure each propagation is recorded and not
|
||||
# overwritten
|
||||
|
||||
path_res_list.append(deepcopy(total_path))
|
||||
return path_res_list
|
||||
|
||||
def correct_route_list(network, pathreqlist):
|
||||
# prepares the format of route list of nodes to be consistant
|
||||
# remove wrong names, remove endpoints
|
||||
# also correct source and destination
|
||||
anytype = [n.uid for n in network.nodes() if not isinstance(n, Transceiver) and not isinstance(n, Fiber)]
|
||||
# TODO there is a problem of identification of fibers in case of parallel fibers bitween two adjacent roadms
|
||||
# so fiber constraint is not supported
|
||||
transponders = [n.uid for n in network.nodes() if isinstance(n, Transceiver)]
|
||||
for pathreq in pathreqlist:
|
||||
for i,n_id in enumerate(pathreq.nodes_list):
|
||||
# replace possibly wrong name with a formated roadm name
|
||||
# print(n_id)
|
||||
if n_id not in anytype :
|
||||
nodes_suggestion = [uid for uid in anytype \
|
||||
if n_id.lower() in uid.lower()]
|
||||
if pathreq.loose_list[i] == 'loose':
|
||||
if len(nodes_suggestion)>0 :
|
||||
new_n = nodes_suggestion[0]
|
||||
print(f'invalid route node specified:\
|
||||
\n\'{n_id}\', replaced with \'{new_n}\'')
|
||||
pathreq.nodes_list[i] = new_n
|
||||
else:
|
||||
print(f'\x1b[1;33;40m'+f'invalid route node specified \'{n_id}\', could not use it as constraint, skipped!'+'\x1b[0m')
|
||||
pathreq.nodes_list.remove(n_id)
|
||||
pathreq.loose_list.pop(i)
|
||||
else:
|
||||
msg = f'\x1b[1;33;40m'+f'could not find node : {n_id} in network topology. Strict constraint can not be applied.'+'\x1b[0m'
|
||||
logger.critical(msg)
|
||||
raise ValueError(msg)
|
||||
if pathreq.source not in transponders:
|
||||
msg = f'\x1b[1;31;40m'+f'Request: {pathreq.request_id}: could not find transponder source : {pathreq.source}.'+'\x1b[0m'
|
||||
logger.critical(msg)
|
||||
print(f'{msg}\nComputation stopped.')
|
||||
exit()
|
||||
|
||||
if pathreq.destination not in transponders:
|
||||
msg = f'\x1b[1;31;40m'+f'Request: {pathreq.request_id}: could not find transponder destination : {pathreq.destination}.'+'\x1b[0m'
|
||||
logger.critical(msg)
|
||||
print(f'{msg}\nComputation stopped.')
|
||||
exit()
|
||||
|
||||
# TODO remove endpoints from this list in case they were added by the user in the xls or json files
|
||||
return pathreqlist
|
||||
|
||||
def correct_disjn(disjn):
|
||||
local_disjn = disjn.copy()
|
||||
for el in local_disjn:
|
||||
for d in local_disjn:
|
||||
if set(el.disjunctions_req) == set(d.disjunctions_req) and\
|
||||
el.disjunction_id != d.disjunction_id:
|
||||
local_disjn.remove(d)
|
||||
return local_disjn
|
||||
|
||||
|
||||
def path_result_json(pathresult):
|
||||
data = {
|
||||
'path': [n.json for n in pathresult]
|
||||
}
|
||||
return data
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
start = time.time()
|
||||
args = parser.parse_args()
|
||||
basicConfig(level={2: DEBUG, 1: INFO, 0: CRITICAL}.get(args.verbose, DEBUG))
|
||||
logger.info(f'Computing path requests {args.service_filename} into JSON format')
|
||||
print('\x1b[1;34;40m'+f'Computing path requests {args.service_filename} into JSON format'+ '\x1b[0m')
|
||||
# for debug
|
||||
# print( args.eqpt_filename)
|
||||
data = load_requests(args.service_filename,args.eqpt_filename)
|
||||
equipment = load_equipment(args.eqpt_filename)
|
||||
network = load_network(args.network_filename,equipment)
|
||||
|
||||
# Build the network once using the default power defined in SI in eqpt config
|
||||
# TODO power density : db2linp(ower_dbm": 0)/power_dbm": 0 * nb channels as defined by
|
||||
# spacing, f_min and f_max
|
||||
p_db = equipment['SI']['default'].power_dbm
|
||||
|
||||
p_total_db = p_db + lin2db(automatic_nch(equipment['SI']['default'].f_min,\
|
||||
equipment['SI']['default'].f_max, equipment['SI']['default'].spacing))
|
||||
build_network(network, equipment, p_db, p_total_db)
|
||||
save_network(args.network_filename, network)
|
||||
|
||||
rqs = requests_from_json(data, equipment)
|
||||
|
||||
# check that request ids are unique. Non unique ids, may
|
||||
# mess the computation : better to stop the computation
|
||||
all_ids = [r.request_id for r in rqs]
|
||||
if len(all_ids) != len(set(all_ids)):
|
||||
for a in list(set(all_ids)):
|
||||
all_ids.remove(a)
|
||||
msg = f'Requests id {all_ids} are not unique'
|
||||
logger.critical(msg)
|
||||
exit()
|
||||
rqs = correct_route_list(network, rqs)
|
||||
|
||||
# pths = compute_path(network, equipment, rqs)
|
||||
dsjn = disjunctions_from_json(data)
|
||||
|
||||
print('\x1b[1;34;40m'+f'List of disjunctions'+ '\x1b[0m')
|
||||
print(dsjn)
|
||||
# need to warn or correct in case of wrong disjunction form
|
||||
# disjunction must not be repeated with same or different ids
|
||||
dsjn = correct_disjn(dsjn)
|
||||
|
||||
# Aggregate demands with same exact constraints
|
||||
print('\x1b[1;34;40m'+f'Aggregating similar requests'+ '\x1b[0m')
|
||||
|
||||
rqs,dsjn = requests_aggregation(rqs,dsjn)
|
||||
# TODO export novel set of aggregated demands in a json file
|
||||
|
||||
print('\x1b[1;34;40m'+'The following services have been requested:'+ '\x1b[0m')
|
||||
print(rqs)
|
||||
|
||||
print('\x1b[1;34;40m'+f'Computing all paths with constraints'+ '\x1b[0m')
|
||||
pths = compute_path_dsjctn(network, equipment, rqs, dsjn)
|
||||
|
||||
print('\x1b[1;34;40m'+f'Propagating on selected path'+ '\x1b[0m')
|
||||
propagatedpths = compute_path_with_disjunction(network, equipment, rqs, pths)
|
||||
|
||||
end = time.time()
|
||||
print(f'computation time {end-start}')
|
||||
print('\x1b[1;34;40m'+f'Result summary'+ '\x1b[0m')
|
||||
|
||||
header = ['req id', ' demand',' snr@bandwidth',' snr@0.1nm',' Receiver minOSNR', ' mode', ' Gbit/s' , ' nb of tsp pairs']
|
||||
data = []
|
||||
data.append(header)
|
||||
for i, p in enumerate(propagatedpths):
|
||||
if p:
|
||||
line = [f'{rqs[i].request_id}', f' {rqs[i].source} to {rqs[i].destination} : ', f'{round(mean(p[-1].snr),2)}',\
|
||||
f'{round(mean(p[-1].snr+lin2db(rqs[i].baud_rate/(12.5e9))),2)}',\
|
||||
f'{rqs[i].OSNR}', f'{rqs[i].tsp_mode}' , f'{round(rqs[i].path_bandwidth * 1e-9,2)}' , f'{ceil(rqs[i].path_bandwidth / rqs[i].bit_rate) }']
|
||||
else:
|
||||
line = [f'{rqs[i].request_id}',f' {rqs[i].source} to {rqs[i].destination} : not feasible ']
|
||||
data.append(line)
|
||||
|
||||
col_width = max(len(word) for row in data for word in row[2:]) # padding
|
||||
firstcol_width = max(len(row[0]) for row in data ) # padding
|
||||
secondcol_width = max(len(row[1]) for row in data ) # padding
|
||||
for row in data:
|
||||
firstcol = ''.join(row[0].ljust(firstcol_width))
|
||||
secondcol = ''.join(row[1].ljust(secondcol_width))
|
||||
remainingcols = ''.join(word.center(col_width,' ') for word in row[2:])
|
||||
print(f'{firstcol} {secondcol} {remainingcols}')
|
||||
|
||||
|
||||
if args.output :
|
||||
result = []
|
||||
# assumes that list of rqs and list of propgatedpths have same order
|
||||
for i,p in enumerate(propagatedpths):
|
||||
result.append(Result_element(rqs[i],p))
|
||||
temp = path_result_json(result)
|
||||
fnamecsv = f'{str(args.output)[0:len(str(args.output))-len(str(args.output.suffix))]}.csv'
|
||||
fnamejson = f'{str(args.output)[0:len(str(args.output))-len(str(args.output.suffix))]}.json'
|
||||
with open(fnamejson, 'w', encoding='utf-8') as f:
|
||||
f.write(dumps(path_result_json(result), indent=2, ensure_ascii=False))
|
||||
with open(fnamecsv,"w", encoding='utf-8') as fcsv :
|
||||
jsontocsv(temp,equipment,fcsv)
|
||||
print('\x1b[1;34;40m'+f'saving in {args.output} and {fnamecsv}'+ '\x1b[0m')
|
||||
|
||||
301
examples/std_medium_gain_advanced_config.json
Normal file
301
examples/std_medium_gain_advanced_config.json
Normal file
@@ -0,0 +1,301 @@
|
||||
{ "nf_fit_coeff": [
|
||||
0.000168241,
|
||||
0.0469961,
|
||||
0.0359549,
|
||||
5.82851
|
||||
],
|
||||
"nf_ripple": [
|
||||
-0.3110761646066259,
|
||||
-0.3110761646066259,
|
||||
-0.31110274831665313,
|
||||
-0.31419329378173544,
|
||||
-0.3172854168606314,
|
||||
-0.32037911876162584,
|
||||
-0.3233255190215882,
|
||||
-0.31624321721895354,
|
||||
-0.30915729645781326,
|
||||
-0.30206775396360075,
|
||||
-0.2949045115165272,
|
||||
-0.26632156113294336,
|
||||
-0.23772399031437283,
|
||||
-0.20911178784023846,
|
||||
-0.18048410390821285,
|
||||
-0.14379944379052215,
|
||||
-0.10709599992470213,
|
||||
-0.07037375788020579,
|
||||
-0.03372858157230583,
|
||||
-0.015660302006048,
|
||||
0.0024172385953583004,
|
||||
0.020504047353947653,
|
||||
0.03860013139908377,
|
||||
0.05670549786742816,
|
||||
0.07482015390297145,
|
||||
0.0838762040768461,
|
||||
0.09284481475528361,
|
||||
0.1018180306253394,
|
||||
0.11079585523492333,
|
||||
0.1020395478432815,
|
||||
0.09310160456603413,
|
||||
0.08415906712621996,
|
||||
0.07521193198077789,
|
||||
0.0676340601339394,
|
||||
0.06005437964543287,
|
||||
0.052470799141237305,
|
||||
0.044883315610536455,
|
||||
0.037679759069084225,
|
||||
0.03047647598902483,
|
||||
0.02326948274513522,
|
||||
0.01605877647020772,
|
||||
0.021248462316134083,
|
||||
0.02657315875107553,
|
||||
0.03190060058247842,
|
||||
0.03723078993416436,
|
||||
0.04256372893215024,
|
||||
0.047899419704645264,
|
||||
0.03915515813685565,
|
||||
0.030289222542492025,
|
||||
0.021418708618354456,
|
||||
0.012573926129294415,
|
||||
0.006240488799898697,
|
||||
-9.622162373026585e-05,
|
||||
-0.006436207679519103,
|
||||
-0.012779471908040341,
|
||||
-0.02038153550619876,
|
||||
-0.027999803010447587,
|
||||
-0.035622012697103154,
|
||||
-0.043236398934156144,
|
||||
-0.04493583574805963,
|
||||
-0.04663615264317309,
|
||||
-0.048337350303318156,
|
||||
-0.050039429413028365,
|
||||
-0.051742390657545205,
|
||||
-0.05342028484370278,
|
||||
-0.05254242298580185,
|
||||
-0.05166410580536087,
|
||||
-0.05078533294804249,
|
||||
-0.04990610405914272,
|
||||
-0.05409792133358102,
|
||||
-0.05832916277634124,
|
||||
-0.06256260169582961,
|
||||
-0.06660356886269536,
|
||||
-0.04779792991567815,
|
||||
-0.028982516728038848,
|
||||
-0.010157321677553965,
|
||||
0.00861320615127981,
|
||||
0.01913736978785662,
|
||||
0.029667009055877668,
|
||||
0.04020212822983975,
|
||||
0.050742731588695494,
|
||||
0.061288823415841555,
|
||||
0.07184040799914815,
|
||||
0.1043252636301016,
|
||||
0.13687829834471027,
|
||||
0.1694483010211072,
|
||||
0.202035284929368,
|
||||
0.23624619427167134,
|
||||
0.27048596623174515,
|
||||
0.30474360397422756,
|
||||
0.3390191214858807,
|
||||
0.36358851509924695,
|
||||
0.38814205928193013,
|
||||
0.41270842850729195,
|
||||
0.4372876328262819,
|
||||
0.4372876328262819
|
||||
],
|
||||
"dgt": [
|
||||
2.714526681131686,
|
||||
2.705443819238505,
|
||||
2.6947834587664494,
|
||||
2.6841217449620203,
|
||||
2.6681935771243177,
|
||||
2.6521732021128046,
|
||||
2.630396440815385,
|
||||
2.602860350286428,
|
||||
2.5696460593920065,
|
||||
2.5364027376452056,
|
||||
2.499446286796604,
|
||||
2.4587748041127506,
|
||||
2.414398437185221,
|
||||
2.3699990328716107,
|
||||
2.322373696229342,
|
||||
2.271520771371253,
|
||||
2.2174389328192197,
|
||||
2.16337565384239,
|
||||
2.1183028432496016,
|
||||
2.082225099873648,
|
||||
2.055100772005235,
|
||||
2.0279625371819305,
|
||||
2.0008103857988204,
|
||||
1.9736443063300082,
|
||||
1.9482128147680253,
|
||||
1.9245345552113182,
|
||||
1.9026104247588487,
|
||||
1.8806927939516411,
|
||||
1.862235672444246,
|
||||
1.847275503201129,
|
||||
1.835814081380705,
|
||||
1.824381436842932,
|
||||
1.8139629377087627,
|
||||
1.8045606557581335,
|
||||
1.7961751115773796,
|
||||
1.7877868031023945,
|
||||
1.7793941781790852,
|
||||
1.7709972329654864,
|
||||
1.7625959636196327,
|
||||
1.7541903672600494,
|
||||
1.7459181197626403,
|
||||
1.737780757913635,
|
||||
1.7297783508684146,
|
||||
1.7217732861435076,
|
||||
1.7137640932265894,
|
||||
1.7057507692361864,
|
||||
1.6918150918099673,
|
||||
1.6719047669939942,
|
||||
1.6460167077689267,
|
||||
1.6201194134191075,
|
||||
1.5986915141218316,
|
||||
1.5817353179379183,
|
||||
1.569199764184379,
|
||||
1.5566577309558969,
|
||||
1.545374152761467,
|
||||
1.5353620432989845,
|
||||
1.5266220576235803,
|
||||
1.5178910621476225,
|
||||
1.5097346239790443,
|
||||
1.502153039909686,
|
||||
1.495145456062699,
|
||||
1.488134243479226,
|
||||
1.48111939735681,
|
||||
1.474100442252211,
|
||||
1.4670307626366115,
|
||||
1.4599103316162523,
|
||||
1.45273959485914,
|
||||
1.445565137158368,
|
||||
1.4340878115214444,
|
||||
1.418273806730323,
|
||||
1.3981208704326855,
|
||||
1.3779439775587023,
|
||||
1.3598972673004606,
|
||||
1.3439818461440451,
|
||||
1.3301807335621048,
|
||||
1.316383926863083,
|
||||
1.3040618749785347,
|
||||
1.2932153453410835,
|
||||
1.2838336236692311,
|
||||
1.2744470198196236,
|
||||
1.2650555289898042,
|
||||
1.2556591482982988,
|
||||
1.2428104897182262,
|
||||
1.2264996957264114,
|
||||
1.2067249615595257,
|
||||
1.1869318618366975,
|
||||
1.1672278304018044,
|
||||
1.1476135933863398,
|
||||
1.1280891949729075,
|
||||
1.108555289615659,
|
||||
1.0895983485572227,
|
||||
1.0712204022764056,
|
||||
1.0534217504465226,
|
||||
1.0356155337864215,
|
||||
1.017807767853702,
|
||||
1.0
|
||||
],
|
||||
"gain_ripple": [
|
||||
0.1359703369791596,
|
||||
0.11822862697916037,
|
||||
0.09542181697916163,
|
||||
0.06245819697916133,
|
||||
0.02602813697916062,
|
||||
-0.0036199830208403228,
|
||||
-0.018326963020840026,
|
||||
-0.0246928330208398,
|
||||
-0.016792253020838643,
|
||||
-0.0028138630208403015,
|
||||
0.017572956979162058,
|
||||
0.038328296979159404,
|
||||
0.054956336979159914,
|
||||
0.0670723869791594,
|
||||
0.07091459697916136,
|
||||
0.07094413697916124,
|
||||
0.07114372697916238,
|
||||
0.07533675697916209,
|
||||
0.08731066697916035,
|
||||
0.10313984697916112,
|
||||
0.12276252697916235,
|
||||
0.14239527697916188,
|
||||
0.15945681697916214,
|
||||
0.1739275269791598,
|
||||
0.1767381569791624,
|
||||
0.17037189697916233,
|
||||
0.15216302697916007,
|
||||
0.13114358697916018,
|
||||
0.10802383697916085,
|
||||
0.08548825697916129,
|
||||
0.06916723697916183,
|
||||
0.05848224697916038,
|
||||
0.05447361697916264,
|
||||
0.05154489697916276,
|
||||
0.04946107697915991,
|
||||
0.04717897697916129,
|
||||
0.04551704697916037,
|
||||
0.04467697697916151,
|
||||
0.04072968697916224,
|
||||
0.03285456697916089,
|
||||
0.023488786979161347,
|
||||
0.01659282697915998,
|
||||
0.013321846979160057,
|
||||
0.011234826979162449,
|
||||
0.01030063697916006,
|
||||
0.00936596697916059,
|
||||
0.00874012697916271,
|
||||
0.00842583697916055,
|
||||
0.006965146979162284,
|
||||
0.0040435869791615175,
|
||||
0.0007104669791608842,
|
||||
-0.0015763130208377163,
|
||||
-0.006936193020838033,
|
||||
-0.016475303020840215,
|
||||
-0.028748483020837767,
|
||||
-0.039618433020837784,
|
||||
-0.051112303020840244,
|
||||
-0.06468462302083822,
|
||||
-0.07868024302083754,
|
||||
-0.09101254302083817,
|
||||
-0.10103437302083762,
|
||||
-0.11041488302083735,
|
||||
-0.11916081302083725,
|
||||
-0.12789859302083784,
|
||||
-0.1353792530208402,
|
||||
-0.14160178302083892,
|
||||
-0.1455411330208385,
|
||||
-0.1484450830208388,
|
||||
-0.14823350302084037,
|
||||
-0.14591937302083835,
|
||||
-0.1409032730208395,
|
||||
-0.13525493302083902,
|
||||
-0.1279646530208396,
|
||||
-0.11963431302083904,
|
||||
-0.11089282302084058,
|
||||
-0.1027863830208382,
|
||||
-0.09717347302083823,
|
||||
-0.09343261302083761,
|
||||
-0.0913487130208388,
|
||||
-0.08906007302083907,
|
||||
-0.0865687230208394,
|
||||
-0.08407607302083875,
|
||||
-0.07844600302084004,
|
||||
-0.06968090302083851,
|
||||
-0.05947139302083926,
|
||||
-0.05095282302083959,
|
||||
-0.042428283020839785,
|
||||
-0.03218106302083967,
|
||||
-0.01819858302084043,
|
||||
-0.0021726530208390216,
|
||||
0.01393231697916164,
|
||||
0.028098946979159933,
|
||||
0.040326236979161934,
|
||||
0.05257029697916238,
|
||||
0.06479749697916048,
|
||||
0.07704745697916238
|
||||
]
|
||||
}
|
||||
222
examples/transmission_main_example.py
Executable file
222
examples/transmission_main_example.py
Executable file
@@ -0,0 +1,222 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
transmission_main_example.py
|
||||
============================
|
||||
|
||||
Main example for transmission simulation.
|
||||
|
||||
Reads from network JSON (by default, `edfa_example_network.json`)
|
||||
'''
|
||||
|
||||
from gnpy.core.equipment import load_equipment, trx_mode_params
|
||||
from gnpy.core.utils import db2lin, lin2db, write_csv
|
||||
from argparse import ArgumentParser
|
||||
from sys import exit
|
||||
from pathlib import Path
|
||||
from json import loads
|
||||
from collections import Counter
|
||||
from logging import getLogger, basicConfig, INFO, ERROR, DEBUG
|
||||
from numpy import linspace, mean
|
||||
from matplotlib.pyplot import show, axis, figure, title
|
||||
from networkx import (draw_networkx_nodes, draw_networkx_edges,
|
||||
draw_networkx_labels, dijkstra_path)
|
||||
from gnpy.core.network import load_network, build_network, save_network
|
||||
from gnpy.core.elements import Transceiver, Fiber, Edfa, Roadm
|
||||
from gnpy.core.info import create_input_spectral_information, SpectralInformation, Channel, Power, Pref
|
||||
from gnpy.core.request import Path_request, RequestParams, compute_constrained_path, propagate
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
def plot_results(network, path, source, destination):
|
||||
path_edges = set(zip(path[:-1], path[1:]))
|
||||
edges = set(network.edges()) - path_edges
|
||||
pos = {n: (n.lng, n.lat) for n in network.nodes()}
|
||||
labels = {n: n.location.city for n in network.nodes() if isinstance(n, Transceiver)}
|
||||
city_labels = set(labels.values())
|
||||
for n in network.nodes():
|
||||
if n.location.city and n.location.city not in city_labels:
|
||||
labels[n] = n.location.city
|
||||
city_labels.add(n.location.city)
|
||||
label_pos = pos
|
||||
|
||||
fig = figure()
|
||||
kwargs = {'figure': fig, 'pos': pos}
|
||||
plot = draw_networkx_nodes(network, nodelist=network.nodes(), node_color='#ababab', **kwargs)
|
||||
draw_networkx_nodes(network, nodelist=path, node_color='#ff0000', **kwargs)
|
||||
draw_networkx_edges(network, edgelist=edges, edge_color='#ababab', **kwargs)
|
||||
draw_networkx_edges(network, edgelist=path_edges, edge_color='#ff0000', **kwargs)
|
||||
draw_networkx_labels(network, labels=labels, font_size=14, **{**kwargs, 'pos': label_pos})
|
||||
title(f'Propagating from {source.loc.city} to {destination.loc.city}')
|
||||
axis('off')
|
||||
show()
|
||||
|
||||
|
||||
def main(network, equipment, source, destination, req = None):
|
||||
result_dicts = {}
|
||||
network_data = [{
|
||||
'network_name' : str(args.filename),
|
||||
'source' : source.uid,
|
||||
'destination' : destination.uid
|
||||
}]
|
||||
result_dicts.update({'network': network_data})
|
||||
design_data = [{
|
||||
'power_mode' : equipment['Spans']['default'].power_mode,
|
||||
'span_power_range' : equipment['Spans']['default'].delta_power_range_db,
|
||||
'design_pch' : equipment['SI']['default'].power_dbm,
|
||||
'baud_rate' : equipment['SI']['default'].baud_rate
|
||||
}]
|
||||
result_dicts.update({'design': design_data})
|
||||
simulation_data = []
|
||||
result_dicts.update({'simulation results': simulation_data})
|
||||
|
||||
power_mode = equipment['Spans']['default'].power_mode
|
||||
print('\n'.join([f'Power mode is set to {power_mode}',
|
||||
f'=> it can be modified in eqpt_config.json - Spans']))
|
||||
|
||||
pref_ch_db = lin2db(req.power*1e3) #reference channel power / span (SL=20dB)
|
||||
pref_total_db = pref_ch_db + lin2db(req.nb_channel) #reference total power / span (SL=20dB)
|
||||
build_network(network, equipment, pref_ch_db, pref_total_db)
|
||||
path = compute_constrained_path(network, req)
|
||||
|
||||
spans = [s.length for s in path if isinstance(s, Fiber)]
|
||||
print(f'\nThere are {len(spans)} fiber spans over {sum(spans):.0f}m between {source.uid} and {destination.uid}')
|
||||
print(f'\nNow propagating between {source.uid} and {destination.uid}:')
|
||||
|
||||
try:
|
||||
p_start, p_stop, p_step = equipment['SI']['default'].power_range_db
|
||||
p_num = abs(int(round((p_stop - p_start)/p_step))) + 1 if p_step != 0 else 1
|
||||
power_range = list(linspace(p_start, p_stop, p_num))
|
||||
except TypeError:
|
||||
print('invalid power range definition in eqpt_config, should be power_range_db: [lower, upper, step]')
|
||||
power_range = [0]
|
||||
|
||||
for dp_db in power_range:
|
||||
req.power = db2lin(pref_ch_db + dp_db)*1e-3
|
||||
print(f'\nPropagating with input power = {lin2db(req.power*1e3):.2f}dBm :')
|
||||
propagate(path, req, equipment, show=len(power_range)==1)
|
||||
print(f'\nTransmission result for input power = {lin2db(req.power*1e3):.2f}dBm :')
|
||||
print(destination)
|
||||
|
||||
#print(f'\n !!!!!!!!!!!!!!!!! TEST POINT !!!!!!!!!!!!!!!!!!!!!')
|
||||
#print(f'carriers ase output of {path[1]} =\n {list(path[1].carriers("out", "nli"))}')
|
||||
# => use "in" or "out" parameter
|
||||
# => use "nli" or "ase" or "signal" or "total" parameter
|
||||
|
||||
simulation_data.append({
|
||||
'Pch_dBm' : pref_ch_db + dp_db,
|
||||
'OSNR_ASE_0.1nm' : round(mean(destination.osnr_ase_01nm),2),
|
||||
'OSNR_ASE_signal_bw' : round(mean(destination.osnr_ase),2),
|
||||
'SNR_nli_signal_bw' : round(mean(destination.osnr_nli),2),
|
||||
'SNR_total_signal_bw' : round(mean(destination.snr),2)
|
||||
})
|
||||
write_csv(result_dicts, 'simulation_result.csv')
|
||||
return path
|
||||
|
||||
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('-e', '--equipment', type=Path,
|
||||
default=Path(__file__).parent / 'eqpt_config.json')
|
||||
parser.add_argument('-pl', '--plot', action='store_true')
|
||||
parser.add_argument('-v', '--verbose', action='count', default=0, help='increases verbosity for each occurence')
|
||||
parser.add_argument('-l', '--list-nodes', action='store_true', help='list all transceiver nodes')
|
||||
parser.add_argument('-po', '--power', default=0, help='channel ref power in dBm')
|
||||
parser.add_argument('-names', '--names-matching', action='store_true', help='display network names that are closed matches')
|
||||
#parser.add_argument('-plb', '--power-lower-bound', default=0, help='power sweep lower bound')
|
||||
#parser.add_argument('-pub', '--power-upper-bound', default=1, help='power sweep upper bound')
|
||||
parser.add_argument('filename', nargs='?', type=Path,
|
||||
default=Path(__file__).parent / 'edfa_example_network.json')
|
||||
parser.add_argument('source', nargs='?', help='source node')
|
||||
parser.add_argument('destination', nargs='?', help='destination node')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
basicConfig(level={0: ERROR, 1: INFO, 2: DEBUG}.get(args.verbose, DEBUG))
|
||||
|
||||
equipment = load_equipment(args.equipment)
|
||||
# logger.info(equipment)
|
||||
# print(args.filename)
|
||||
network = load_network(args.filename, equipment, args.names_matching)
|
||||
# print(network)
|
||||
|
||||
transceivers = {n.uid: n for n in network.nodes() if isinstance(n, Transceiver)}
|
||||
|
||||
if not transceivers:
|
||||
exit('Network has no transceivers!')
|
||||
if len(transceivers) < 2:
|
||||
exit('Network has only one transceiver!')
|
||||
|
||||
if args.list_nodes:
|
||||
for uid in transceivers:
|
||||
print(uid)
|
||||
exit()
|
||||
|
||||
#First try to find exact match if source/destination provided
|
||||
if args.source:
|
||||
source = transceivers.pop(args.source, None)
|
||||
valid_source = True if source else False
|
||||
else:
|
||||
source = None
|
||||
logger.info('No source node specified: picking random transceiver')
|
||||
|
||||
if args.destination:
|
||||
destination = transceivers.pop(args.destination, None)
|
||||
valid_destination = True if destination else False
|
||||
else:
|
||||
destination = None
|
||||
logger.info('No destination node specified: picking random transceiver')
|
||||
|
||||
#If no exact match try to find partial match
|
||||
if args.source and not source:
|
||||
#TODO code a more advanced regex to find nodes match
|
||||
source = next((transceivers.pop(uid) for uid in transceivers \
|
||||
if args.source.lower() in uid.lower()), None)
|
||||
|
||||
if args.destination and not destination:
|
||||
#TODO code a more advanced regex to find nodes match
|
||||
destination = next((transceivers.pop(uid) for uid in transceivers \
|
||||
if args.destination.lower() in uid.lower()), None)
|
||||
|
||||
#If no partial match or no source/destination provided pick random
|
||||
if not source:
|
||||
source = list(transceivers.values())[0]
|
||||
del transceivers[source.uid]
|
||||
|
||||
if not destination:
|
||||
destination = list(transceivers.values())[0]
|
||||
|
||||
logger.info(f'source = {args.source!r}')
|
||||
logger.info(f'destination = {args.destination!r}')
|
||||
|
||||
params = {}
|
||||
params['request_id'] = 0
|
||||
params['trx_type'] = ''
|
||||
params['trx_mode'] = ''
|
||||
params['source'] = source.uid
|
||||
params['destination'] = destination.uid
|
||||
params['nodes_list'] = [destination.uid]
|
||||
params['loose_list'] = ['strict']
|
||||
params['format'] = ''
|
||||
params['path_bandwidth'] = 0
|
||||
trx_params = trx_mode_params(equipment)
|
||||
if args.power:
|
||||
trx_params['power'] = db2lin(float(args.power))*1e-3
|
||||
params.update(trx_params)
|
||||
req = Path_request(**params)
|
||||
path = main(network, equipment, source, destination, req)
|
||||
save_network(args.filename, network)
|
||||
|
||||
if not args.source:
|
||||
print(f'\n(No source node specified: picked {source.uid})')
|
||||
elif not valid_source:
|
||||
print(f'\n(Invalid source node {args.source!r} replaced with {source.uid})')
|
||||
|
||||
if not args.destination:
|
||||
print(f'\n(No destination node specified: picked {destination.uid})')
|
||||
elif not valid_destination:
|
||||
print(f'\n(Invalid destination node {args.destination!r} replaced with {destination.uid})')
|
||||
|
||||
if args.plot:
|
||||
plot_results(network, path, source, destination)
|
||||
36
examples/write_path_jsontocsv.py
Normal file
36
examples/write_path_jsontocsv.py
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
write_path_jsontocsv.py
|
||||
========================
|
||||
|
||||
Reads JSON path result file in accordance with the Yang model for requesting
|
||||
path computation and writes results to a CSV file.
|
||||
|
||||
See: draft-ietf-teas-yang-path-computation-01.txt
|
||||
"""
|
||||
|
||||
from argparse import ArgumentParser
|
||||
from pathlib import Path
|
||||
from json import loads
|
||||
from gnpy.core.equipment import load_equipment
|
||||
from gnpy.core.request import jsontocsv
|
||||
|
||||
|
||||
parser = ArgumentParser(description = 'A function that writes json path results in an excel sheet.')
|
||||
parser.add_argument('filename', nargs='?', type = Path)
|
||||
parser.add_argument('output_filename', nargs='?', type = Path)
|
||||
parser.add_argument('eqpt_filename', nargs='?', type = Path, default=Path(__file__).parent / 'eqpt_config.json')
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
|
||||
with open(args.output_filename, 'w', encoding='utf-8') as file:
|
||||
with open(args.filename, encoding='utf-8') as f:
|
||||
print(f'Reading {args.filename}')
|
||||
json_data = loads(f.read())
|
||||
equipment = load_equipment(args.eqpt_filename)
|
||||
print(f'Writing in {args.output_filename}')
|
||||
jsontocsv(json_data,equipment,file)
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from .gnpy import (raised_cosine_comb, analytic_formula, compute_psi, fwm_eff,
|
||||
get_f_computed_interp, get_freqarray, gn_analytic, gn_model,
|
||||
interpolate_in_range, GN_integral)
|
||||
|
||||
__all__ = ['gnpy']
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
import gnpy as gn
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import time
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
# Accuracy parameters
|
||||
flag_analytic = True
|
||||
num_computed_values = 2
|
||||
interp_method = 'linear'
|
||||
threshold_fwm = 50
|
||||
n_points = 500
|
||||
n_points_min = 4
|
||||
accuracy_param = {'is_analytic': flag_analytic, 'points_not_interp': num_computed_values, 'kind_interp': interp_method,
|
||||
'th_fwm': threshold_fwm, 'n_points': n_points, 'n_points_min': n_points_min}
|
||||
|
||||
# Parallelization Parameters
|
||||
n_cores = 1
|
||||
|
||||
# Spectrum parameters
|
||||
num_ch = 95
|
||||
rs = np.ones(num_ch) * 0.032
|
||||
b_ch = rs # For root raised cosine shapes, the -3 dB band is equal to the symbol rate
|
||||
roll_off = np.ones(num_ch) * 0.05
|
||||
power = np.ones(num_ch) * 0.001
|
||||
central_freq = 193.5
|
||||
if num_ch % 2 == 1: # odd number of channels
|
||||
fch = np.arange(-(num_ch // 2), (num_ch // 2) + 1, 1) * 0.05 # noqa: E501
|
||||
else:
|
||||
fch = (np.arange(0, num_ch) - (num_ch / 2.0) + 0.5) * 0.05
|
||||
spectrum_param = {'num_ch': num_ch, 'f_ch': fch, 'b_ch': b_ch, 'roll_off': roll_off, 'power': power}
|
||||
|
||||
# Fiber Parameters
|
||||
beta2 = 21.27
|
||||
l_span = 100.0
|
||||
loss = 0.2
|
||||
gam = 1.27
|
||||
fiber_param = {'alpha': loss, 'span_length': l_span, 'beta_2': beta2, 'gamma': gam}
|
||||
|
||||
# EDFA Parameters
|
||||
noise_fig = 5.5
|
||||
gain_zero = 25.0
|
||||
gain_tilting = 0.5
|
||||
|
||||
# Compute the GN model
|
||||
t = time.time()
|
||||
nli_cmp, f_nli_cmp, nli_int, f_nli_int = gn.gn_model(spectrum_param, fiber_param, accuracy_param, n_cores) # noqa: E501
|
||||
print('Elapsed: %s' % (time.time() - t))
|
||||
|
||||
# Compute the EDFA profile
|
||||
gain, g_ase = gn.compute_edfa_profile(gain_zero, gain_tilting, noise_fig, central_freq, fch)
|
||||
|
||||
# Compute the raised cosine comb
|
||||
f1_array = np.linspace(np.amin(fch), np.amax(fch), 1e3)
|
||||
gtx = gn.raised_cosine_comb(f1_array, rs, roll_off, fch, power)
|
||||
gtx = gtx + 10 ** -6 # To avoid log10 issues.
|
||||
|
||||
# Plot the results
|
||||
plt.figure(1)
|
||||
plt.plot(f1_array, 10 * np.log10(gtx), '-b', label='WDM comb')
|
||||
plt.plot(f_nli_cmp, 10 * np.log10(nli_cmp), 'ro', label='GNLI computed')
|
||||
plt.plot(f_nli_int, 10 * np.log10(nli_int), 'g+', label='GNLI interpolated')
|
||||
plt.plot(fch, 10 * np.log10(g_ase), 'yo', label='GASE')
|
||||
plt.ylabel('PSD [dB(W/THz)]')
|
||||
plt.xlabel('f [THz]')
|
||||
plt.legend(loc='upper left')
|
||||
plt.grid()
|
||||
plt.draw()
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,84 +0,0 @@
|
||||
import os
|
||||
import gnpy as gn
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import time
|
||||
|
||||
|
||||
def main_ole():
|
||||
|
||||
# String indicating the folder in which outputs will be saved
|
||||
string_date_time = time.strftime("%Y-%m-%d") + '_' + time.strftime("%H-%M-%S")
|
||||
output_path = './output/' + string_date_time + '/'
|
||||
|
||||
# Creates the directory if it doesn't exist
|
||||
if not os.path.isdir(output_path):
|
||||
os.makedirs(output_path)
|
||||
|
||||
from configuration.fiber_parameters import fibers
|
||||
from configuration.general_parameters import sys_param, control_param
|
||||
from configuration.link_description import link
|
||||
from input.spectrum_in import spectrum
|
||||
|
||||
# adapt the laser position to the grid
|
||||
if len(spectrum['laser_position']) < sys_param['ns']:
|
||||
n = sys_param['ns'] - len(spectrum['laser_position'])
|
||||
missing_zeros = [0 for _ in range(n)]
|
||||
spectrum['laser_position'] += missing_zeros
|
||||
elif len(spectrum['laser_position']) > sys_param['ns']:
|
||||
print('Error: the spectrum definition requires a larger number of slots ns in the spectrum grid')
|
||||
|
||||
delta_f = 6.25E-3
|
||||
f_0 = sys_param['f0']
|
||||
f_cent = f_0 + ((sys_param['ns'] // 2.0) * delta_f)
|
||||
|
||||
n_ch = spectrum['laser_position'].count(1)
|
||||
# Get comb parameters
|
||||
f_ch = np.zeros(n_ch)
|
||||
count = 0
|
||||
for index, bool_laser in enumerate(spectrum['laser_position']):
|
||||
if bool_laser:
|
||||
f_ch[count] = delta_f * index + (f_0 - f_cent)
|
||||
count += 1
|
||||
|
||||
t = time.time()
|
||||
# It runs the OLE
|
||||
osnr_nl_db, osnr_lin_db = gn.ole(spectrum, link, fibers, sys_param, control_param, output_path=output_path)
|
||||
print('Elapsed: %s' % (time.time() - t))
|
||||
|
||||
# Compute the raised cosine comb
|
||||
power, rs, roll_off, p_ase, p_nli, n_ch = gn.get_spectrum_param(spectrum)
|
||||
f1_array = np.linspace(np.amin(f_ch), np.amax(f_ch), 1e3)
|
||||
gtx = gn.raised_cosine_comb(f1_array, rs, roll_off, f_ch, power)
|
||||
gtx = gtx + 10 ** -6 # To avoid log10 issues.
|
||||
|
||||
# OSNR at in the central channel
|
||||
ind_c = n_ch // 2
|
||||
osnr_lin_central_db = osnr_lin_db[ind_c]
|
||||
osnr_nl_central_db = osnr_nl_db[ind_c]
|
||||
print('The linear OSNR in the central channel is: ' + str(osnr_lin_central_db) + ' dB')
|
||||
print('The non linear OSNR in the central channel is: ' + str(osnr_nl_central_db) + ' dB')
|
||||
|
||||
# Plot the results
|
||||
plt.figure(1)
|
||||
plt.plot(f1_array, 10 * np.log10(gtx), '-b', label='WDM comb PSD [dB(W/THz)]')
|
||||
plt.plot(f_ch, 10 * np.log10(p_nli), 'ro', label='NLI [dBw]')
|
||||
plt.plot(f_ch, 10 * np.log10(p_ase), 'g+', label='ASE noise [dBw]')
|
||||
plt.ylabel('')
|
||||
plt.xlabel('f [THz]')
|
||||
plt.legend(loc='upper right')
|
||||
plt.grid()
|
||||
plt.draw()
|
||||
|
||||
plt.figure(2)
|
||||
plt.plot(f_ch, osnr_nl_db, 'ro', label='non-linear OSNR')
|
||||
plt.plot(f_ch, osnr_lin_db, 'g+', label='linear OSNR')
|
||||
plt.ylabel('OSNR [dB]')
|
||||
plt.xlabel('f [THz]')
|
||||
plt.legend(loc='lower left')
|
||||
plt.grid()
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main_ole()
|
||||
17
gnpy/cli.py
17
gnpy/cli.py
@@ -1,17 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Console script for gnpy."""
|
||||
|
||||
import click
|
||||
|
||||
|
||||
@click.command()
|
||||
def main(args=None):
|
||||
"""Console script for gnpy."""
|
||||
click.echo("Replace this message by putting your code into "
|
||||
"gnpy.cli.main")
|
||||
click.echo("See click documentation at http://click.pocoo.org/")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1 +0,0 @@
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
# coding=utf-8
|
||||
""" fiber_parameters.py describes the fiber parameters.
|
||||
fibers is a dictionary containing a dictionary for each kind of fiber
|
||||
each dictionary has to report:
|
||||
reference_frequency: the frequency at which the parameters are evaluated [THz]
|
||||
alpha: the attenuation coefficient [dB/km]
|
||||
alpha_1st: the first derivative of alpha indicating the alpha slope [dB/km/THz]
|
||||
if you assume a flat attenuation with respect to the frequency you put it as zero
|
||||
beta_2: the dispersion coefficient [ps^2/km]
|
||||
n_2: second-order nonlinear refractive index [m^2/W]
|
||||
a typical value is 2.5E-20 m^2/W
|
||||
a_eff: the effective area of the fiber [um^2]
|
||||
"""
|
||||
|
||||
fibers = {
|
||||
'SMF': {
|
||||
'reference_frequency': 193.5,
|
||||
'alpha': 0.2,
|
||||
'alpha_1st': 0,
|
||||
'beta_2': 21.27,
|
||||
'n_2': 2.5E-20,
|
||||
'a_eff': 77.77,
|
||||
},
|
||||
'NZDF': {
|
||||
'reference_frequency': 193.5,
|
||||
'alpha': 0.22,
|
||||
'alpha_1st': 0,
|
||||
'beta_2': 21,
|
||||
'n_2': 2.5E-20,
|
||||
'a_eff': 70,
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
# -*- coding: utf-8 -*
|
||||
"""general_parameters.py contains the general configuration settings
|
||||
|
||||
The sectings are subdivided in two dictionaries:
|
||||
sys_param: a dictionary containing the general system parameters:
|
||||
f0: the starting frequency of the laser grid used to describe the WDM system [THz]
|
||||
ns: the number of 6.25 GHz slots in the grid
|
||||
|
||||
control_param:
|
||||
save_each_comp: a boolean flag. If true, it saves in output folder one spectrum file at the output of each
|
||||
component, otherwise it saves just the last spectrum
|
||||
is_linear: a bool flag. If true, is doesn't compute NLI, if false, OLE will consider NLI
|
||||
is_analytic: a boolean flag. If true, the NLI is computed through the analytic formula, otherwise it uses
|
||||
the double integral. Warning: the double integral is very slow.
|
||||
points_not_interp: if the double integral is used, it indicates how much points are calculated, others will
|
||||
be interpolated
|
||||
kind_interp: a string indicating the interpolation method for the double integral
|
||||
th_fwm: the threshold of the four wave mixing efficiency for the double integral
|
||||
n_points: number of points in which the double integral is computed in the high FWM efficiency region
|
||||
n_points_min: number of points in which the double integral is computed in the low FWM efficiency region
|
||||
n_cores: number of cores for parallel computation [not yet implemented]
|
||||
"""
|
||||
# System parameters
|
||||
sys_param = {
|
||||
'f0': 192.075,
|
||||
'ns': 328
|
||||
}
|
||||
|
||||
# control parameters
|
||||
control_param = {
|
||||
'save_each_comp': True,
|
||||
'is_linear': False,
|
||||
'is_analytic': True,
|
||||
'points_not_interp': 2,
|
||||
'kind_interp': 'linear',
|
||||
'th_fwm': 50,
|
||||
'n_points': 500,
|
||||
'n_points_min': 4,
|
||||
'n_cores': 1
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
# coding=utf-8
|
||||
""" link_description.py contains the full description of that OLE has to emulate.
|
||||
It contains a list of dictionaries, following the structure of the link and each element of the list describes one
|
||||
component.
|
||||
|
||||
'comp_cat': the kind of link component:
|
||||
PC: a passive component defined by a loss at a certain frequency and a loss tilt
|
||||
OA: an optical amplifier defined by a gain at a certain frequency, a gain tilt and a noise figure
|
||||
fiber: a span of fiber described by the type and the length
|
||||
'comp_id': is an id identifying the component. It has to be unique for each component!
|
||||
|
||||
extra fields for PC:
|
||||
'ref_freq': the frequency at which the 'loss' parameter is evaluated [THz]
|
||||
'loss': the loss at the frequency 'ref_freq' [dB]
|
||||
'loss_tlt': the frequency dependent loss [dB/THz]
|
||||
extra fields for OA:
|
||||
'ref_freq': the frequency at which the 'gain' parameter is evaluated [THz]
|
||||
'gain': the gain at the frequency 'ref_freq' [dB]
|
||||
'gain_tlt': the frequency dependent gain [dB/THz]
|
||||
'noise_figure': the noise figure of the optical amplifier [dB]
|
||||
extra fields for fiber:
|
||||
'fiber_type': a string calling the type of fiber described in the file fiber_parameters.py
|
||||
'length': the fiber length [km]
|
||||
|
||||
"""
|
||||
smf = {
|
||||
'comp_cat': 'fiber',
|
||||
'comp_id': '',
|
||||
'fiber_type': 'SMF',
|
||||
'length': 100
|
||||
}
|
||||
|
||||
oa = {
|
||||
'comp_cat': 'OA',
|
||||
'comp_id': '',
|
||||
'ref_freq': 193.5,
|
||||
'gain': 20,
|
||||
'gain_tlt': 0.0,
|
||||
'noise_figure': 5
|
||||
}
|
||||
|
||||
pc = {
|
||||
'comp_cat': 'PC',
|
||||
'comp_id': '04',
|
||||
'ref_freq': 193.,
|
||||
'loss': 2.0,
|
||||
'loss_tlt': 0.0
|
||||
}
|
||||
|
||||
link = []
|
||||
|
||||
for index in range(20):
|
||||
smf['comp_id'] = '%03d' % (2 * index)
|
||||
oa['comp_id'] = '%03d' % (2 * index + 1)
|
||||
link += [dict(smf)]
|
||||
link += [dict(oa)]
|
||||
|
||||
pc['comp_id'] = '%03d' % 40
|
||||
link += [dict(pc)]
|
||||
30
gnpy/core/__init__.py
Normal file
30
gnpy/core/__init__.py
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
########################################################################
|
||||
# _____ ___ ____ ____ ____ _____ #
|
||||
# |_ _|_ _| _ \ | _ \/ ___|| ____| #
|
||||
# | | | || |_) | | |_) \___ \| _| #
|
||||
# | | | || __/ | __/ ___) | |___ #
|
||||
# |_| |___|_| |_| |____/|_____| #
|
||||
# #
|
||||
# == Physical Simulation Environment == #
|
||||
# #
|
||||
########################################################################
|
||||
|
||||
|
||||
'''
|
||||
gnpy route planning and optimization library
|
||||
============================================
|
||||
|
||||
gnpy is a route planning and optimization library, written in Python, for
|
||||
operators of large-scale mesh optical networks.
|
||||
|
||||
:copyright: © 2018, Telecom Infra Project
|
||||
:license: BSD 3-Clause, see LICENSE for more details.
|
||||
'''
|
||||
|
||||
from . import elements
|
||||
from .execute import *
|
||||
from .network import *
|
||||
from .utils import *
|
||||
581
gnpy/core/convert.py
Executable file
581
gnpy/core/convert.py
Executable file
@@ -0,0 +1,581 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
gnpy.core.convert
|
||||
=================
|
||||
|
||||
This module contains utilities for converting between XLS and JSON.
|
||||
|
||||
The input XLS file must contain sheets named "Nodes" and "Links".
|
||||
It may optionally contain a sheet named "Eqpt".
|
||||
|
||||
In the "Nodes" sheet, only the "City" column is mandatory. The column "Type"
|
||||
can be determined automatically given the topology (e.g., if degree 2, ILA;
|
||||
otherwise, ROADM.) Incorrectly specified types (e.g., ILA for node of
|
||||
degree ≠ 2) will be automatically corrected.
|
||||
|
||||
In the "Links" sheet, only the first three columns ("Node A", "Node Z" and
|
||||
"east Distance (km)") are mandatory. Missing "west" information is copied from
|
||||
the "east" information so that it is possible to input undirected data.
|
||||
"""
|
||||
|
||||
from sys import exit
|
||||
try:
|
||||
from xlrd import open_workbook
|
||||
except ModuleNotFoundError:
|
||||
exit('Required: `pip install xlrd`')
|
||||
from argparse import ArgumentParser
|
||||
from collections import namedtuple, Counter, defaultdict
|
||||
from itertools import chain
|
||||
from json import dumps
|
||||
from pathlib import Path
|
||||
from difflib import get_close_matches
|
||||
import time
|
||||
|
||||
all_rows = lambda sh, start=0: (sh.row(x) for x in range(start, sh.nrows))
|
||||
|
||||
class Node(object):
|
||||
def __init__(self, **kwargs):
|
||||
super(Node, self).__init__()
|
||||
self.update_attr(kwargs)
|
||||
|
||||
def update_attr(self, kwargs):
|
||||
clean_kwargs = {k:v for k,v in kwargs.items() if v !=''}
|
||||
for k,v in self.default_values.items():
|
||||
v = clean_kwargs.get(k,v)
|
||||
setattr(self, k, v)
|
||||
|
||||
default_values = \
|
||||
{
|
||||
'city': '',
|
||||
'state': '',
|
||||
'country': '',
|
||||
'region': '',
|
||||
'latitude': 0,
|
||||
'longitude': 0,
|
||||
'node_type': 'ILA'
|
||||
}
|
||||
|
||||
class Link(object):
|
||||
"""attribtes from west parse_ept_headers dict
|
||||
+node_a, node_z, west_fiber_con_in, east_fiber_con_in
|
||||
"""
|
||||
def __init__(self, **kwargs):
|
||||
super(Link, self).__init__()
|
||||
self.update_attr(kwargs)
|
||||
self.distance_units = 'km'
|
||||
|
||||
def update_attr(self, kwargs):
|
||||
clean_kwargs = {k:v for k,v in kwargs.items() if v !=''}
|
||||
for k,v in self.default_values.items():
|
||||
v = clean_kwargs.get(k,v)
|
||||
setattr(self, k, v)
|
||||
k = 'west' + k.split('east')[-1]
|
||||
v = clean_kwargs.get(k,v)
|
||||
setattr(self, k, v)
|
||||
|
||||
def __eq__(self, link):
|
||||
return (self.from_city == link.from_city and self.to_city == link.to_city) \
|
||||
or (self.from_city == link.to_city and self.to_city == link.from_city)
|
||||
|
||||
default_values = \
|
||||
{
|
||||
'from_city': '',
|
||||
'to_city': '',
|
||||
'east_distance': 80,
|
||||
'east_fiber': 'SSMF',
|
||||
'east_lineic': 0.2,
|
||||
'east_con_in': None,
|
||||
'east_con_out': None,
|
||||
'east_pmd': 0.1,
|
||||
'east_cable': ''
|
||||
}
|
||||
|
||||
|
||||
class Eqpt(object):
|
||||
def __init__(self, **kwargs):
|
||||
super(Eqpt, self).__init__()
|
||||
self.update_attr(kwargs)
|
||||
|
||||
def update_attr(self, kwargs):
|
||||
clean_kwargs = {k:v for k,v in kwargs.items() if v !=''}
|
||||
for k,v in self.default_values.items():
|
||||
v_east = clean_kwargs.get(k,v)
|
||||
setattr(self, k, v_east)
|
||||
k = 'west' + k.split('east')[-1]
|
||||
v_west = clean_kwargs.get(k,v)
|
||||
setattr(self, k, v_west)
|
||||
|
||||
default_values = \
|
||||
{
|
||||
'from_city': '',
|
||||
'to_city': '',
|
||||
'east_amp_type': '',
|
||||
'east_att_in': 0,
|
||||
'east_amp_gain': 0,
|
||||
'east_tilt': 0,
|
||||
'east_att_out': 0
|
||||
}
|
||||
|
||||
|
||||
def read_header(my_sheet, line, slice_):
|
||||
""" return the list of headers !:= ''
|
||||
header_i = [(header, header_column_index), ...]
|
||||
in a {line, slice1_x, slice_y} range
|
||||
"""
|
||||
Param_header = namedtuple('Param_header', 'header colindex')
|
||||
try:
|
||||
header = [x.value.strip() for x in my_sheet.row_slice(line, slice_[0], slice_[1])]
|
||||
header_i = [Param_header(header,i+slice_[0]) for i, header in enumerate(header) if header != '']
|
||||
except:
|
||||
header_i = []
|
||||
if header_i != [] and header_i[-1].colindex != slice_[1]:
|
||||
header_i.append(Param_header('',slice_[1]))
|
||||
return header_i
|
||||
|
||||
def read_slice(my_sheet, line, slice_, header):
|
||||
"""return the slice range of a given header
|
||||
in a defined range {line, slice_x, slice_y}"""
|
||||
header_i = read_header(my_sheet, line, slice_)
|
||||
slice_range = (-1,-1)
|
||||
if header_i != []:
|
||||
try:
|
||||
slice_range = next((h.colindex,header_i[i+1].colindex) \
|
||||
for i,h in enumerate(header_i) if header in h.header)
|
||||
except:
|
||||
pass
|
||||
return slice_range
|
||||
|
||||
|
||||
def parse_headers(my_sheet, input_headers_dict, headers, start_line, slice_in):
|
||||
"""return a dict of header_slice
|
||||
key = column index
|
||||
value = header name"""
|
||||
|
||||
|
||||
for h0 in input_headers_dict:
|
||||
slice_out = read_slice(my_sheet, start_line, slice_in, h0)
|
||||
iteration = 1
|
||||
while slice_out == (-1,-1) and iteration < 10:
|
||||
#try next lines
|
||||
#print(h0, iteration)
|
||||
slice_out = read_slice(my_sheet, start_line+iteration, slice_in, h0)
|
||||
iteration += 1
|
||||
if slice_out == (-1, -1):
|
||||
if h0 == 'east':
|
||||
print(f'\x1b[1;31;40m'+f'CRITICAL: missing _east_ header above other headers (hierarchical) _ ABORT'+ '\x1b[0m')
|
||||
exit()
|
||||
else:
|
||||
print(f'missing header {h0}')
|
||||
elif not isinstance(input_headers_dict[h0], dict):
|
||||
headers[slice_out[0]] = input_headers_dict[h0]
|
||||
else:
|
||||
headers = parse_headers(my_sheet, input_headers_dict[h0], headers, start_line+1, slice_out)
|
||||
if headers == {}:
|
||||
print(f'\x1b[1;31;40m'+f'CRITICAL ERROR: could not find any header to read _ ABORT'+ '\x1b[0m')
|
||||
exit()
|
||||
return headers
|
||||
|
||||
def parse_row(row, headers):
|
||||
#print([label for label in ept.values()])
|
||||
#print([i for i in ept.keys()])
|
||||
#print(row[i for i in ept.keys()])
|
||||
return {f: r.value for f, r in \
|
||||
zip([label for label in headers.values()], [row[i] for i in headers])}
|
||||
#if r.ctype != XL_CELL_EMPTY}
|
||||
|
||||
def parse_sheet(my_sheet, input_headers_dict, header_line, start_line, column):
|
||||
headers = parse_headers(my_sheet, input_headers_dict, {}, header_line, (0,column))
|
||||
for row in all_rows(my_sheet, start=start_line):
|
||||
yield parse_row(row[0: column], headers)
|
||||
|
||||
def sanity_check(nodes, links, nodes_by_city, links_by_city, eqpts_by_city):
|
||||
|
||||
duplicate_links = []
|
||||
for l1 in links:
|
||||
for l2 in links:
|
||||
if l1 is not l2 and l1 == l2 and l2 not in duplicate_links:
|
||||
print(f'\nWARNING\n \
|
||||
link {l1.from_city}-{l1.to_city} is duplicate \
|
||||
\nthe 1st duplicate link will be removed but you should check Links sheet input')
|
||||
duplicate_links.append(l1)
|
||||
#if duplicate_links != []:
|
||||
#time.sleep(3)
|
||||
for l in duplicate_links:
|
||||
links.remove(l)
|
||||
|
||||
try :
|
||||
test_nodes = [n for n in nodes_by_city if not n in links_by_city]
|
||||
test_links = [n for n in links_by_city if not n in nodes_by_city]
|
||||
test_eqpts = [n for n in eqpts_by_city if not n in nodes_by_city]
|
||||
assert (test_nodes == [] or test_nodes == [''])\
|
||||
and (test_links == [] or test_links ==[''])\
|
||||
and (test_eqpts == [] or test_eqpts ==[''])
|
||||
except AssertionError:
|
||||
print(f'CRITICAL error: \nNames in Nodes and Links sheets do no match, check:\
|
||||
\n{test_nodes} in Nodes sheet\
|
||||
\n{test_links} in Links sheet\
|
||||
\n{test_eqpts} in Eqpt sheet')
|
||||
exit(1)
|
||||
|
||||
for city,link in links_by_city.items():
|
||||
if nodes_by_city[city].node_type.lower()=='ila' and len(link) != 2:
|
||||
#wrong input: ILA sites can only be Degree 2
|
||||
# => correct to make it a ROADM and remove entry in links_by_city
|
||||
#TODO : put in log rather than print
|
||||
print(f'invalid node type ({nodes_by_city[city].node_type})\
|
||||
specified in {city}, replaced by ROADM')
|
||||
nodes_by_city[city].node_type = 'ROADM'
|
||||
for n in nodes:
|
||||
if n.city==city:
|
||||
n.node_type='ROADM'
|
||||
return nodes, links
|
||||
|
||||
def convert_file(input_filename, names_matching=False, filter_region=[]):
|
||||
nodes, links, eqpts = parse_excel(input_filename)
|
||||
|
||||
if filter_region:
|
||||
nodes = [n for n in nodes if n.region.lower() in filter_region]
|
||||
cities = {n.city for n in nodes}
|
||||
links = [lnk for lnk in links if lnk.from_city in cities and
|
||||
lnk.to_city in cities]
|
||||
cities = {lnk.from_city for lnk in links} | {lnk.to_city for lnk in links}
|
||||
nodes = [n for n in nodes if n.city in cities]
|
||||
|
||||
|
||||
global nodes_by_city
|
||||
nodes_by_city = {n.city: n for n in nodes}
|
||||
|
||||
#create matching dictionary for node name mismatch analysis
|
||||
|
||||
cities = {''.join(c.strip() for c in n.city.split('C+L')).lower(): n.city for n in nodes}
|
||||
cities_to_match = [k for k in cities]
|
||||
city_match_dic = defaultdict(list)
|
||||
for city in cities:
|
||||
if city in cities_to_match:
|
||||
cities_to_match.remove(city)
|
||||
matches = get_close_matches(city, cities_to_match, 4, 0.85)
|
||||
for m in matches:
|
||||
city_match_dic[cities[city]].append(cities[m])
|
||||
#check lower case/upper case
|
||||
for city in nodes_by_city:
|
||||
for match_city in nodes_by_city:
|
||||
if match_city.lower() == city.lower() and match_city != city:
|
||||
city_match_dic[city].append(match_city)
|
||||
|
||||
if names_matching:
|
||||
print('\ncity match dictionary:',city_match_dic)
|
||||
with open('name_match_dictionary.json', 'w', encoding='utf-8') as city_match_dic_file:
|
||||
city_match_dic_file.write(dumps(city_match_dic, indent=2, ensure_ascii=False))
|
||||
|
||||
global links_by_city
|
||||
links_by_city = defaultdict(list)
|
||||
for link in links:
|
||||
links_by_city[link.from_city].append(link)
|
||||
links_by_city[link.to_city].append(link)
|
||||
|
||||
global eqpts_by_city
|
||||
eqpts_by_city = defaultdict(list)
|
||||
for eqpt in eqpts:
|
||||
eqpts_by_city[eqpt.from_city].append(eqpt)
|
||||
|
||||
nodes, links = sanity_check(nodes, links, nodes_by_city, links_by_city, eqpts_by_city)
|
||||
|
||||
data = {
|
||||
'elements':
|
||||
[{'uid': f'trx {x.city}',
|
||||
'metadata': {'location': {'city': x.city,
|
||||
'region': x.region,
|
||||
'latitude': x.latitude,
|
||||
'longitude': x.longitude}},
|
||||
'type': 'Transceiver'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower() == 'roadm'] +
|
||||
[{'uid': f'roadm {x.city}',
|
||||
'metadata': {'location': {'city': x.city,
|
||||
'region': x.region,
|
||||
'latitude': x.latitude,
|
||||
'longitude': x.longitude}},
|
||||
'type': 'Roadm'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower() == 'roadm'] +
|
||||
[{'uid': f'west fused spans in {x.city}',
|
||||
'metadata': {'location': {'city': x.city,
|
||||
'region': x.region,
|
||||
'latitude': x.latitude,
|
||||
'longitude': x.longitude}},
|
||||
'type': 'Fused'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower() == 'fused'] +
|
||||
[{'uid': f'east fused spans in {x.city}',
|
||||
'metadata': {'location': {'city': x.city,
|
||||
'region': x.region,
|
||||
'latitude': x.latitude,
|
||||
'longitude': x.longitude}},
|
||||
'type': 'Fused'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower() == 'fused'] +
|
||||
[{'uid': f'fiber ({x.from_city} \u2192 {x.to_city})-{x.east_cable}',
|
||||
'metadata': {'location': midpoint(nodes_by_city[x.from_city],
|
||||
nodes_by_city[x.to_city])},
|
||||
'type': 'Fiber',
|
||||
'type_variety': x.east_fiber,
|
||||
'params': {'length': round(x.east_distance, 3),
|
||||
'length_units': x.distance_units,
|
||||
'loss_coef': x.east_lineic,
|
||||
'con_in':x.east_con_in,
|
||||
'con_out':x.east_con_out}
|
||||
}
|
||||
for x in links] +
|
||||
[{'uid': f'fiber ({x.to_city} \u2192 {x.from_city})-{x.west_cable}',
|
||||
'metadata': {'location': midpoint(nodes_by_city[x.from_city],
|
||||
nodes_by_city[x.to_city])},
|
||||
'type': 'Fiber',
|
||||
'type_variety': x.west_fiber,
|
||||
'params': {'length': round(x.west_distance, 3),
|
||||
'length_units': x.distance_units,
|
||||
'loss_coef': x.west_lineic,
|
||||
'con_in':x.west_con_in,
|
||||
'con_out':x.west_con_out}
|
||||
} # missing ILA construction
|
||||
for x in links] +
|
||||
[{'uid': f'east edfa in {e.from_city} to {e.to_city}',
|
||||
'metadata': {'location': {'city': nodes_by_city[e.from_city].city,
|
||||
'region': nodes_by_city[e.from_city].region,
|
||||
'latitude': nodes_by_city[e.from_city].latitude,
|
||||
'longitude': nodes_by_city[e.from_city].longitude}},
|
||||
'type': 'Edfa',
|
||||
'type_variety': e.east_amp_type,
|
||||
'operational': {'gain_target': e.east_amp_gain,
|
||||
'tilt_target': e.east_tilt,
|
||||
'out_voa' : e.east_att_out}
|
||||
}
|
||||
for e in eqpts if e.east_amp_type.lower() != ''] +
|
||||
[{'uid': f'west edfa in {e.from_city} to {e.to_city}',
|
||||
'metadata': {'location': {'city': nodes_by_city[e.from_city].city,
|
||||
'region': nodes_by_city[e.from_city].region,
|
||||
'latitude': nodes_by_city[e.from_city].latitude,
|
||||
'longitude': nodes_by_city[e.from_city].longitude}},
|
||||
'type': 'Edfa',
|
||||
'type_variety': e.west_amp_type,
|
||||
'operational': {'gain_target': e.west_amp_gain,
|
||||
'tilt_target': e.west_tilt,
|
||||
'out_voa' : e.west_att_out}
|
||||
}
|
||||
for e in eqpts if e.west_amp_type.lower() != ''],
|
||||
'connections':
|
||||
list(chain.from_iterable([eqpt_connection_by_city(n.city)
|
||||
for n in nodes]))
|
||||
+
|
||||
list(chain.from_iterable(zip(
|
||||
[{'from_node': f'trx {x.city}',
|
||||
'to_node': f'roadm {x.city}'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower()=='roadm'],
|
||||
[{'from_node': f'roadm {x.city}',
|
||||
'to_node': f'trx {x.city}'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower()=='roadm'])))
|
||||
}
|
||||
|
||||
suffix_filename = str(input_filename.suffixes[0])
|
||||
full_input_filename = str(input_filename)
|
||||
split_filename = [full_input_filename[0:len(full_input_filename)-len(suffix_filename)] , suffix_filename[1:]]
|
||||
output_json_file_name = split_filename[0]+'.json'
|
||||
with open(output_json_file_name, 'w', encoding='utf-8') as edfa_json_file:
|
||||
edfa_json_file.write(dumps(data, indent=2, ensure_ascii=False))
|
||||
return output_json_file_name
|
||||
|
||||
def parse_excel(input_filename):
|
||||
link_headers = \
|
||||
{ 'Node A': 'from_city',
|
||||
'Node Z': 'to_city',
|
||||
'east':{
|
||||
'Distance (km)': 'east_distance',
|
||||
'Fiber type': 'east_fiber',
|
||||
'lineic att': 'east_lineic',
|
||||
'Con_in': 'east_con_in',
|
||||
'Con_out': 'east_con_out',
|
||||
'PMD': 'east_pmd',
|
||||
'Cable id': 'east_cable'
|
||||
},
|
||||
'west':{
|
||||
'Distance (km)': 'west_distance',
|
||||
'Fiber type': 'west_fiber',
|
||||
'lineic att': 'west_lineic',
|
||||
'Con_in': 'west_con_in',
|
||||
'Con_out': 'west_con_out',
|
||||
'PMD': 'west_pmd',
|
||||
'Cable id': 'west_cable'
|
||||
}
|
||||
}
|
||||
node_headers = \
|
||||
{ 'City': 'city',
|
||||
'State': 'state',
|
||||
'Country': 'country',
|
||||
'Region': 'region',
|
||||
'Latitude': 'latitude',
|
||||
'Longitude': 'longitude',
|
||||
'Type': 'node_type'
|
||||
}
|
||||
eqpt_headers = \
|
||||
{ 'Node A': 'from_city',
|
||||
'Node Z': 'to_city',
|
||||
'east':{
|
||||
'amp type': 'east_amp_type',
|
||||
'att_in': 'east_att_in',
|
||||
'amp gain': 'east_amp_gain',
|
||||
'tilt': 'east_tilt',
|
||||
'att_out': 'east_att_out'
|
||||
},
|
||||
'west':{
|
||||
'amp type': 'west_amp_type',
|
||||
'att_in': 'west_att_in',
|
||||
'amp gain': 'west_amp_gain',
|
||||
'tilt': 'west_tilt',
|
||||
'att_out': 'west_att_out'
|
||||
}
|
||||
}
|
||||
|
||||
with open_workbook(input_filename) as wb:
|
||||
nodes_sheet = wb.sheet_by_name('Nodes')
|
||||
links_sheet = wb.sheet_by_name('Links')
|
||||
try:
|
||||
eqpt_sheet = wb.sheet_by_name('Eqpt')
|
||||
except:
|
||||
#eqpt_sheet is optional
|
||||
eqpt_sheet = None
|
||||
|
||||
nodes = []
|
||||
for node in parse_sheet(nodes_sheet, node_headers, NODES_LINE, NODES_LINE+1, NODES_COLUMN):
|
||||
nodes.append(Node(**node))
|
||||
expected_node_types = ('ROADM', 'ILA', 'FUSED')
|
||||
for n in nodes:
|
||||
if not (n.node_type in expected_node_types):
|
||||
n.node_type='ILA'
|
||||
|
||||
links = []
|
||||
for link in parse_sheet(links_sheet, link_headers, LINKS_LINE, LINKS_LINE+2, LINKS_COLUMN):
|
||||
links.append(Link(**link))
|
||||
#print('\n', [l.__dict__ for l in links])
|
||||
|
||||
eqpts = []
|
||||
if eqpt_sheet != None:
|
||||
for eqpt in parse_sheet(eqpt_sheet, eqpt_headers, EQPTS_LINE, EQPTS_LINE+2, EQPTS_COLUMN):
|
||||
eqpts.append(Eqpt(**eqpt))
|
||||
|
||||
# sanity check
|
||||
all_cities = Counter(n.city for n in nodes)
|
||||
if len(all_cities) != len(nodes):
|
||||
ValueError(f'Duplicate city: {all_cities}')
|
||||
if any(ln.from_city not in all_cities or
|
||||
ln.to_city not in all_cities for ln in links):
|
||||
ValueError(f'Bad link.')
|
||||
|
||||
return nodes, links, eqpts
|
||||
|
||||
|
||||
def eqpt_connection_by_city(city_name):
|
||||
other_cities = fiber_dest_from_source(city_name)
|
||||
subdata = []
|
||||
if nodes_by_city[city_name].node_type.lower() in ('ila', 'fused'):
|
||||
# Then len(other_cities) == 2
|
||||
direction = ['west', 'east']
|
||||
for i in range(2):
|
||||
from_ = fiber_link(other_cities[i], city_name)
|
||||
in_ = eqpt_in_city_to_city(city_name, other_cities[0],direction[i])
|
||||
to_ = fiber_link(city_name, other_cities[1-i])
|
||||
subdata += connect_eqpt(from_, in_, to_)
|
||||
elif nodes_by_city[city_name].node_type.lower() == 'roadm':
|
||||
for other_city in other_cities:
|
||||
from_ = f'roadm {city_name}'
|
||||
in_ = eqpt_in_city_to_city(city_name, other_city)
|
||||
to_ = fiber_link(city_name, other_city)
|
||||
subdata += connect_eqpt(from_, in_, to_)
|
||||
|
||||
from_ = fiber_link(other_city, city_name)
|
||||
in_ = eqpt_in_city_to_city(city_name, other_city, "west")
|
||||
to_ = f'roadm {city_name}'
|
||||
subdata += connect_eqpt(from_, in_, to_)
|
||||
return subdata
|
||||
|
||||
|
||||
def connect_eqpt(from_, in_, to_):
|
||||
connections = []
|
||||
if in_ !='':
|
||||
connections = [{'from_node': from_, 'to_node': in_},
|
||||
{'from_node': in_, 'to_node': to_}]
|
||||
else:
|
||||
connections = [{'from_node': from_, 'to_node': to_}]
|
||||
return connections
|
||||
|
||||
|
||||
def eqpt_in_city_to_city(in_city, to_city, direction='east'):
|
||||
rev_direction = 'west' if direction == 'east' else 'east'
|
||||
amp_direction = f'{direction}_amp_type'
|
||||
amp_rev_direction = f'{rev_direction}_amp_type'
|
||||
return_eqpt = ''
|
||||
if in_city in eqpts_by_city:
|
||||
for e in eqpts_by_city[in_city]:
|
||||
if nodes_by_city[in_city].node_type.lower() == 'roadm':
|
||||
if e.to_city == to_city and getattr(e, amp_direction) != '':
|
||||
return_eqpt = f'{direction} edfa in {e.from_city} to {e.to_city}'
|
||||
elif nodes_by_city[in_city].node_type.lower() == 'ila':
|
||||
if e.to_city != to_city:
|
||||
direction = rev_direction
|
||||
amp_direction = amp_rev_direction
|
||||
if getattr(e, amp_direction) != '':
|
||||
return_eqpt = f'{direction} edfa in {e.from_city} to {e.to_city}'
|
||||
if nodes_by_city[in_city].node_type.lower() == 'fused':
|
||||
return_eqpt = f'{direction} fused spans in {in_city}'
|
||||
return return_eqpt
|
||||
|
||||
|
||||
def fiber_dest_from_source(city_name):
|
||||
destinations = []
|
||||
links_from_city = links_by_city[city_name]
|
||||
for l in links_from_city:
|
||||
if l.from_city == city_name:
|
||||
destinations.append(l.to_city)
|
||||
else:
|
||||
destinations.append(l.from_city)
|
||||
return destinations
|
||||
|
||||
|
||||
def fiber_link(from_city, to_city):
|
||||
source_dest = (from_city, to_city)
|
||||
link = links_by_city[from_city]
|
||||
l = next(l for l in link if l.from_city in source_dest and l.to_city in source_dest)
|
||||
if l.from_city == from_city:
|
||||
fiber = f'fiber ({l.from_city} \u2192 {l.to_city})-{l.east_cable}'
|
||||
else:
|
||||
fiber = f'fiber ({l.to_city} \u2192 {l.from_city})-{l.west_cable}'
|
||||
return fiber
|
||||
|
||||
|
||||
def midpoint(city_a, city_b):
|
||||
lats = city_a.latitude, city_b.latitude
|
||||
longs = city_a.longitude, city_b.longitude
|
||||
try:
|
||||
result = {
|
||||
'latitude': sum(lats) / 2,
|
||||
'longitude': sum(longs) / 2
|
||||
}
|
||||
except :
|
||||
result = {
|
||||
'latitude': 0,
|
||||
'longitude': 0
|
||||
}
|
||||
return result
|
||||
|
||||
#output_json_file_name = 'coronet_conus_example.json'
|
||||
#TODO get column size automatically from tupple size
|
||||
|
||||
NODES_COLUMN = 7
|
||||
NODES_LINE = 4
|
||||
LINKS_COLUMN = 16
|
||||
LINKS_LINE = 3
|
||||
EQPTS_LINE = 3
|
||||
EQPTS_COLUMN = 12
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('workbook', nargs='?', type=Path , default='meshTopologyExampleV2.xls')
|
||||
parser.add_argument('-f', '--filter-region', action='append', default=[])
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
convert_file(args.workbook, args.filter_region)
|
||||
800
gnpy/core/elements.py
Normal file
800
gnpy/core/elements.py
Normal file
@@ -0,0 +1,800 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.elements
|
||||
==================
|
||||
|
||||
This module contains standard network elements.
|
||||
|
||||
A network element is a Python callable. It takes a .info.SpectralInformation
|
||||
object and returns a copy with appropriate fields affected. This structure
|
||||
represents spectral information that is "propogated" by this network element.
|
||||
Network elements must have only a local "view" of the network and propogate
|
||||
SpectralInformation using only this information. They should be independent and
|
||||
self-contained.
|
||||
|
||||
Network elements MUST implement two attributes .uid and .name representing a
|
||||
unique identifier and a printable name.
|
||||
'''
|
||||
|
||||
from numpy import abs, arange, arcsinh, array, exp, divide, errstate
|
||||
from numpy import interp, log10, mean, pi, polyfit, polyval, sum
|
||||
from scipy.constants import c, h
|
||||
from collections import namedtuple
|
||||
|
||||
from gnpy.core.node import Node
|
||||
from gnpy.core.units import UNITS
|
||||
from gnpy.core.utils import lin2db, db2lin, itufs, snr_sum
|
||||
|
||||
class Transceiver(Node):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.osnr_ase_01nm = None
|
||||
self.osnr_ase = None
|
||||
self.osnr_nli = None
|
||||
self.snr = None
|
||||
self.passive = False
|
||||
self.baud_rate = None
|
||||
|
||||
def _calc_snr(self, spectral_info):
|
||||
with errstate(divide='ignore'):
|
||||
self.baud_rate = [c.baud_rate for c in spectral_info.carriers]
|
||||
ratio_01nm = [lin2db(12.5e9/b_rate) for b_rate in self.baud_rate]
|
||||
|
||||
#set raw values to record original calculation, before update_snr()
|
||||
self.raw_osnr_ase = [lin2db(divide(c.power.signal, c.power.ase))
|
||||
for c in spectral_info.carriers]
|
||||
self.raw_osnr_ase_01nm = [ase - ratio for ase, ratio
|
||||
in zip(self.raw_osnr_ase, ratio_01nm)]
|
||||
self.raw_osnr_nli = [lin2db(divide(c.power.signal, c.power.nli))
|
||||
for c in spectral_info.carriers]
|
||||
self.raw_snr = [lin2db(divide(c.power.signal, c.power.nli+c.power.ase))
|
||||
for c in spectral_info.carriers]
|
||||
|
||||
self.osnr_ase = self.raw_osnr_ase
|
||||
self.osnr_ase_01nm = self.raw_osnr_ase_01nm
|
||||
self.osnr_nli = self.raw_osnr_nli
|
||||
self.snr = self.raw_snr
|
||||
|
||||
def update_snr(self, *args):
|
||||
"""
|
||||
snr_added in 0.1nm
|
||||
compute SNR penalties such as transponder Tx_osnr or Roadm add_drop_osnr
|
||||
only applied in request.py / propagate on the last Trasceiver node of the path
|
||||
all penalties are added in a single call because to avoid uncontrolled cumul
|
||||
"""
|
||||
#use raw_values so that the added snr penalties are not cumulated
|
||||
snr_added = 0
|
||||
for s in args:
|
||||
snr_added += db2lin(-s)
|
||||
snr_added = -lin2db(snr_added)
|
||||
self.osnr_ase = list(map(lambda x,y:snr_sum(x,y,snr_added),
|
||||
self.raw_osnr_ase, self.baud_rate))
|
||||
self.snr = list(map(lambda x,y:snr_sum(x,y,snr_added),
|
||||
self.raw_snr, self.baud_rate))
|
||||
self.osnr_ase_01nm = list(map(lambda x:snr_sum(x,12.5e9,snr_added),
|
||||
self.raw_osnr_ase_01nm))
|
||||
|
||||
@property
|
||||
def to_json(self):
|
||||
return {'uid' : self.uid,
|
||||
'type' : type(self).__name__,
|
||||
'metadata' : {
|
||||
'location': self.metadata['location']._asdict()
|
||||
}
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return (f'{type(self).__name__}('
|
||||
f'uid={self.uid!r}, '
|
||||
f'osnr_ase_01nm={self.osnr_ase_01nm!r}, '
|
||||
f'osnr_ase={self.osnr_ase!r}, '
|
||||
f'osnr_nli={self.osnr_nli!r}, '
|
||||
f'snr={self.snr!r})')
|
||||
|
||||
def __str__(self):
|
||||
if self.snr is None or self.osnr_ase is None:
|
||||
return f'{type(self).__name__} {self.uid}'
|
||||
|
||||
snr = round(mean(self.snr),2)
|
||||
osnr_ase = round(mean(self.osnr_ase),2)
|
||||
osnr_ase_01nm = round(mean(self.osnr_ase_01nm), 2)
|
||||
|
||||
return '\n'.join([f'{type(self).__name__} {self.uid}',
|
||||
|
||||
f' OSNR ASE (0.1nm): {osnr_ase_01nm:.2f}',
|
||||
f' OSNR ASE (signal bw): {osnr_ase:.2f}',
|
||||
f' SNR total (signal bw): {snr:.2f}'])
|
||||
|
||||
|
||||
def __call__(self, spectral_info):
|
||||
self._calc_snr(spectral_info)
|
||||
return spectral_info
|
||||
|
||||
RoadmParams = namedtuple('RoadmParams', 'loss')
|
||||
|
||||
class Roadm(Node):
|
||||
def __init__(self, *args, params=None, **kwargs):
|
||||
if params is None:
|
||||
# default loss value if not mentioned in loaded network json
|
||||
params = {'loss':None}
|
||||
super().__init__(*args, params=RoadmParams(**params), **kwargs)
|
||||
self.loss = self.params.loss
|
||||
self.target_pch_out_db = None #set in Networks.py by def set_roadm_loss
|
||||
self.effective_pch_out_db = None
|
||||
self.effective_loss = None #set in self.propagate
|
||||
self.passive = True
|
||||
|
||||
@property
|
||||
def to_json(self):
|
||||
return {'uid' : self.uid,
|
||||
'type' : type(self).__name__,
|
||||
'params' : {'loss' : self.loss},
|
||||
'metadata' : {
|
||||
'location': self.metadata['location']._asdict()
|
||||
}
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self).__name__}(uid={self.uid!r}, loss={self.loss!r})'
|
||||
|
||||
def __str__(self):
|
||||
return '\n'.join([f'{type(self).__name__} {self.uid}',
|
||||
f' loss (dB): {self.effective_loss:.2f}',
|
||||
f' pch out (dBm): {self.effective_pch_out_db!r}'])
|
||||
|
||||
def propagate(self, pref, *carriers):
|
||||
#pin_target and loss are read from eqpt_config.json['Roadm']
|
||||
#all ingress channels in xpress are set to this power level
|
||||
#but add channels are not, so we define an effective loss
|
||||
#in the case of add channels
|
||||
if self.target_pch_out_db:
|
||||
self.effective_loss = pref.pi - self.target_pch_out_db
|
||||
else:
|
||||
self.effective_loss = self.loss
|
||||
self.effective_pch_out_db = pref.pi - self.effective_loss
|
||||
attenuation = db2lin(self.effective_loss)
|
||||
|
||||
for carrier in carriers:
|
||||
pwr = carrier.power
|
||||
pwr = pwr._replace(signal=pwr.signal/attenuation,
|
||||
nonlinear_interference=pwr.nli/attenuation,
|
||||
amplified_spontaneous_emission=pwr.ase/attenuation)
|
||||
yield carrier._replace(power=pwr)
|
||||
|
||||
def update_pref(self, pref):
|
||||
return pref._replace(p_span0=pref.p0, p_spani=self.effective_pch_out_db)
|
||||
|
||||
def __call__(self, spectral_info):
|
||||
carriers = tuple(self.propagate(spectral_info.pref, *spectral_info.carriers))
|
||||
pref = self.update_pref(spectral_info.pref)
|
||||
return spectral_info.update(carriers=carriers, pref=pref)
|
||||
|
||||
FusedParams = namedtuple('FusedParams', 'loss')
|
||||
|
||||
class Fused(Node):
|
||||
def __init__(self, *args, params=None, **kwargs):
|
||||
if params is None:
|
||||
# default loss value if not mentioned in loaded network json
|
||||
params = {'loss':1}
|
||||
super().__init__(*args, params=FusedParams(**params), **kwargs)
|
||||
self.loss = self.params.loss
|
||||
self.passive = True
|
||||
|
||||
@property
|
||||
def to_json(self):
|
||||
return {'uid' : self.uid,
|
||||
'type' : type(self).__name__,
|
||||
'metadata' : {
|
||||
'location': self.metadata['location']._asdict()
|
||||
}
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self).__name__}(uid={self.uid!r}, loss={self.loss!r})'
|
||||
|
||||
def __str__(self):
|
||||
return '\n'.join([f'{type(self).__name__} {self.uid}',
|
||||
f' loss (dB): {self.loss:.2f}'])
|
||||
|
||||
def propagate(self, *carriers):
|
||||
attenuation = db2lin(self.loss)
|
||||
|
||||
for carrier in carriers:
|
||||
pwr = carrier.power
|
||||
pwr = pwr._replace(signal=pwr.signal/attenuation,
|
||||
nonlinear_interference=pwr.nli/attenuation,
|
||||
amplified_spontaneous_emission=pwr.ase/attenuation)
|
||||
yield carrier._replace(power=pwr)
|
||||
|
||||
def update_pref(self, pref):
|
||||
return pref._replace(p_span0=pref.p0, p_spani=pref.pi - self.loss)
|
||||
|
||||
def __call__(self, spectral_info):
|
||||
carriers = tuple(self.propagate(*spectral_info.carriers))
|
||||
pref = self.update_pref(spectral_info.pref)
|
||||
return spectral_info.update(carriers=carriers, pref=pref)
|
||||
|
||||
FiberParams = namedtuple('FiberParams', 'type_variety length loss_coef length_units \
|
||||
att_in con_in con_out dispersion gamma')
|
||||
|
||||
class Fiber(Node):
|
||||
def __init__(self, *args, params=None, **kwargs):
|
||||
if params is None:
|
||||
params = {}
|
||||
if 'con_in' not in params:
|
||||
# if not defined in the network json connector loss in/out
|
||||
# the None value will be updated in network.py[build_network]
|
||||
# with default values from eqpt_config.json[Spans]
|
||||
params['con_in'] = None
|
||||
params['con_out'] = None
|
||||
if 'att_in' not in params:
|
||||
#fixed attenuator for padding
|
||||
params['att_in'] = 0
|
||||
|
||||
super().__init__(*args, params=FiberParams(**params), **kwargs)
|
||||
self.type_variety = self.params.type_variety
|
||||
self.length = self.params.length * UNITS[self.params.length_units] # in m
|
||||
self.loss_coef = self.params.loss_coef * 1e-3 # lineic loss dB/m
|
||||
self.lin_loss_coef = self.params.loss_coef / (20 * log10(exp(1)))
|
||||
self.att_in = self.params.att_in
|
||||
self.con_in = self.params.con_in
|
||||
self.con_out = self.params.con_out
|
||||
self.dispersion = self.params.dispersion # s/m/m
|
||||
self.gamma = self.params.gamma # 1/W/m
|
||||
self.pch_out_db = None
|
||||
self.carriers_in = None
|
||||
self.carriers_out = None
|
||||
# TODO|jla: discuss factor 2 in the linear lineic attenuation
|
||||
|
||||
@property
|
||||
def to_json(self):
|
||||
return {'uid' : self.uid,
|
||||
'type' : type(self).__name__,
|
||||
'type_variety' : self.type_variety,
|
||||
'params' : {
|
||||
#have to specify each because namedtupple cannot be updated :(
|
||||
'type_variety' : self.type_variety,
|
||||
'length' : self.length/UNITS[self.params.length_units],
|
||||
'loss_coef' : self.loss_coef*1e3,
|
||||
'length_units' : self.params.length_units,
|
||||
'att_in' : self.att_in,
|
||||
'con_in' : self.con_in,
|
||||
'con_out' : self.con_out
|
||||
},
|
||||
'metadata' : {
|
||||
'location': self.metadata['location']._asdict()
|
||||
}
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self).__name__}(uid={self.uid!r}, length={round(self.length*1e-3,1)!r}km, loss={round(self.loss,1)!r}dB)'
|
||||
|
||||
def __str__(self):
|
||||
return '\n'.join([f'{type(self).__name__} {self.uid}',
|
||||
f' type_variety: {self.type_variety}',
|
||||
f' length (km): {round(self.length*1e-3):.2f}',
|
||||
f' pad att_in (dB): {self.att_in:.2f}',
|
||||
f' total loss (dB): {self.loss:.2f}',
|
||||
f' (includes conn loss (dB) in: {self.con_in:.2f} out: {self.con_out:.2f})',
|
||||
f' (conn loss out includes EOL margin defined in eqpt_config.json)',
|
||||
f' pch out (dBm): {self.pch_out_db!r}'])
|
||||
|
||||
@property
|
||||
def fiber_loss(self):
|
||||
# dB fiber loss, not including padding attenuator
|
||||
return self.loss_coef * self.length + self.con_in + self.con_out
|
||||
|
||||
@property
|
||||
def loss(self):
|
||||
#total loss incluiding padding att_in: useful for polymorphism with roadm loss
|
||||
return self.loss_coef * self.length + self.con_in + self.con_out + self.att_in
|
||||
|
||||
@property
|
||||
def passive(self):
|
||||
return True
|
||||
|
||||
@property
|
||||
def lin_attenuation(self):
|
||||
return db2lin(self.length * self.loss_coef)
|
||||
|
||||
@property
|
||||
def effective_length(self):
|
||||
_, alpha = self.dbkm_2_lin()
|
||||
leff = (1 - exp(-2 * alpha * self.length)) / (2 * alpha)
|
||||
return leff
|
||||
|
||||
@property
|
||||
def asymptotic_length(self):
|
||||
_, alpha = self.dbkm_2_lin()
|
||||
aleff = 1 / (2 * alpha)
|
||||
return aleff
|
||||
|
||||
def carriers(self, loc, attr):
|
||||
"""retrieve carriers information
|
||||
loc = (in, out) of the class element
|
||||
attr = (ase, nli, signal, total) power information"""
|
||||
if not (loc in ('in', 'out') and attr in ('nli', 'signal', 'total', 'ase')):
|
||||
yield None
|
||||
return
|
||||
power_dict = {
|
||||
'nli': 'nonlinear_interference',
|
||||
'ase': 'amplified_spontaneous_emission'
|
||||
}
|
||||
attr = power_dict.get(attr, attr)
|
||||
loc_attr = 'carriers_'+loc
|
||||
for c in getattr(self, loc_attr) :
|
||||
if attr == 'total':
|
||||
yield c.power.ase+c.power.nli+c.power.signal
|
||||
else:
|
||||
yield c.power._asdict().get(attr, None)
|
||||
|
||||
def beta2(self, ref_wavelength=None):
|
||||
""" Returns beta2 from dispersion parameter.
|
||||
Dispersion is entered in ps/nm/km.
|
||||
Disperion can be a numpy array or a single value. If a
|
||||
value ref_wavelength is not entered 1550e-9m will be assumed.
|
||||
ref_wavelength can be a numpy array.
|
||||
"""
|
||||
# TODO|jla: discuss beta2 as method or attribute
|
||||
wl = 1550e-9 if ref_wavelength is None else ref_wavelength
|
||||
D = abs(self.dispersion)
|
||||
b2 = (wl ** 2) * D / (2 * pi * c) # 10^21 scales [ps^2/km]
|
||||
return b2 # s/Hz/m
|
||||
|
||||
def dbkm_2_lin(self):
|
||||
""" calculates the linear loss coefficient
|
||||
"""
|
||||
# alpha_pcoef is linear loss coefficient in dB/km^-1
|
||||
# alpha_acoef is linear loss field amplitude coefficient in m^-1
|
||||
alpha_pcoef = self.loss_coef
|
||||
alpha_acoef = alpha_pcoef / (2 * 10 * log10(exp(1)))
|
||||
return alpha_pcoef, alpha_acoef
|
||||
|
||||
def _psi(self, carrier, interfering_carrier):
|
||||
""" Calculates eq. 123 from arXiv:1209.0394.
|
||||
"""
|
||||
if carrier.num_chan == interfering_carrier.num_chan: # SCI
|
||||
psi = arcsinh(0.5 * pi**2 * self.asymptotic_length
|
||||
* abs(self.beta2()) * carrier.baud_rate**2)
|
||||
else: # XCI
|
||||
delta_f = carrier.freq - interfering_carrier.freq
|
||||
psi = arcsinh(pi**2 * self.asymptotic_length * abs(self.beta2())
|
||||
* carrier.baud_rate * (delta_f + 0.5 * interfering_carrier.baud_rate))
|
||||
psi -= arcsinh(pi**2 * self.asymptotic_length * abs(self.beta2())
|
||||
* carrier.baud_rate * (delta_f - 0.5 * interfering_carrier.baud_rate))
|
||||
|
||||
return psi
|
||||
|
||||
def _gn_analytic(self, carrier, *carriers):
|
||||
""" Computes the nonlinear interference power on a single carrier.
|
||||
The method uses eq. 120 from arXiv:1209.0394.
|
||||
:param carrier: the signal under analysis
|
||||
:param carriers: the full WDM comb
|
||||
:return: carrier_nli: the amount of nonlinear interference in W on the under analysis
|
||||
"""
|
||||
|
||||
g_nli = 0
|
||||
for interfering_carrier in carriers:
|
||||
psi = self._psi(carrier, interfering_carrier)
|
||||
g_nli += (interfering_carrier.power.signal/interfering_carrier.baud_rate)**2 \
|
||||
* (carrier.power.signal/carrier.baud_rate) * psi
|
||||
|
||||
g_nli *= (16 / 27) * (self.gamma * self.effective_length)**2 \
|
||||
/ (2 * pi * abs(self.beta2()) * self.asymptotic_length)
|
||||
|
||||
carrier_nli = carrier.baud_rate * g_nli
|
||||
return carrier_nli
|
||||
|
||||
def propagate(self, *carriers):
|
||||
|
||||
# apply connector_att_in on all carriers before computing gn analytics premiere partie pas bonne
|
||||
attenuation = db2lin(self.con_in + self.att_in)
|
||||
|
||||
chan = []
|
||||
for carrier in carriers:
|
||||
pwr = carrier.power
|
||||
pwr = pwr._replace(signal=pwr.signal/attenuation,
|
||||
nonlinear_interference=pwr.nli/attenuation,
|
||||
amplified_spontaneous_emission=pwr.ase/attenuation)
|
||||
carrier = carrier._replace(power=pwr)
|
||||
chan.append(carrier)
|
||||
|
||||
carriers = tuple(f for f in chan)
|
||||
|
||||
# propagate in the fiber and apply attenuation out
|
||||
attenuation = db2lin(self.con_out)
|
||||
for carrier in carriers:
|
||||
pwr = carrier.power
|
||||
carrier_nli = self._gn_analytic(carrier, *carriers)
|
||||
pwr = pwr._replace(signal=pwr.signal/self.lin_attenuation/attenuation,
|
||||
nonlinear_interference=(pwr.nli+carrier_nli)/self.lin_attenuation/attenuation,
|
||||
amplified_spontaneous_emission=pwr.ase/self.lin_attenuation/attenuation)
|
||||
yield carrier._replace(power=pwr)
|
||||
|
||||
def update_pref(self, pref):
|
||||
self.pch_out_db = round(pref.pi - self.loss, 2)
|
||||
return pref._replace(p_span0=pref.p0, p_spani=self.pch_out_db)
|
||||
|
||||
def __call__(self, spectral_info):
|
||||
self.carriers_in = spectral_info.carriers
|
||||
carriers = tuple(self.propagate(*spectral_info.carriers))
|
||||
pref = self.update_pref(spectral_info.pref)
|
||||
self.carriers_out = carriers
|
||||
return spectral_info.update(carriers=carriers, pref=pref)
|
||||
|
||||
class EdfaParams:
|
||||
def __init__(self, **params):
|
||||
self.update_params(params)
|
||||
if params == {}:
|
||||
self.type_variety = ''
|
||||
self.type_def = ''
|
||||
self.gain_flatmax = 0
|
||||
self.gain_min = 0
|
||||
self.p_max = 0
|
||||
self.nf_model = None
|
||||
self.nf_fit_coeff = None
|
||||
self.nf_ripple = None
|
||||
self.dgt = None
|
||||
self.gain_ripple = None
|
||||
self.out_voa_auto = False
|
||||
self.allowed_for_design = None
|
||||
|
||||
def update_params(self, kwargs):
|
||||
for k,v in kwargs.items() :
|
||||
setattr(self, k, update_params(**v)
|
||||
if isinstance(v, dict) else v)
|
||||
|
||||
class EdfaOperational:
|
||||
def __init__(self, gain_target, tilt_target, out_voa=None):
|
||||
self.gain_target = gain_target
|
||||
self.tilt_target = tilt_target
|
||||
self.out_voa = out_voa
|
||||
def __repr__(self):
|
||||
return (f'{type(self).__name__}('
|
||||
f'gain_target={self.gain_target!r}, '
|
||||
f'tilt_target={self.tilt_target!r})')
|
||||
|
||||
class Edfa(Node):
|
||||
def __init__(self, *args, params={}, operational={}, **kwargs):
|
||||
#TBC is this useful? put in comment for now:
|
||||
#if params is None:
|
||||
# params = {}
|
||||
#if operational is None:
|
||||
# operational = {}
|
||||
super().__init__(
|
||||
*args,
|
||||
params=EdfaParams(**params),
|
||||
operational=EdfaOperational(**operational),
|
||||
**kwargs
|
||||
)
|
||||
self.interpol_dgt = None # interpolated dynamic gain tilt
|
||||
self.interpol_gain_ripple = None # gain ripple
|
||||
self.interpol_nf_ripple = None # nf_ripple
|
||||
self.channel_freq = None # SI channel frequencies
|
||||
# nf, gprofile, pin and pout attributes are set by interpol_params
|
||||
self.nf = None # dB edfa nf at operational.gain_target
|
||||
self.gprofile = None
|
||||
self.pin_db = None
|
||||
self.nch = None
|
||||
self.pout_db = None
|
||||
self.dp_db = None #delta P with Pref (power swwep) in power mode
|
||||
self.target_pch_out_db = None
|
||||
self.effective_pch_out_db = None
|
||||
self.passive = False
|
||||
self.effective_gain = self.operational.gain_target
|
||||
self.att_in = None
|
||||
self.carriers_in = None
|
||||
self.carriers_out = None
|
||||
|
||||
@property
|
||||
def to_json(self):
|
||||
return {'uid' : self.uid,
|
||||
'type' : type(self).__name__,
|
||||
'type_variety' : self.params.type_variety,
|
||||
'operational' : {
|
||||
'gain_target' : self.operational.gain_target,
|
||||
'tilt_target' : self.operational.tilt_target,
|
||||
'out_voa' : self.operational.out_voa
|
||||
},
|
||||
'metadata' : {
|
||||
'location': self.metadata['location']._asdict()
|
||||
}
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return (f'{type(self).__name__}(uid={self.uid!r}, '
|
||||
f'type_variety={self.params.type_variety!r}'
|
||||
f'interpol_dgt={self.interpol_dgt!r}, '
|
||||
f'interpol_gain_ripple={self.interpol_gain_ripple!r}, '
|
||||
f'interpol_nf_ripple={self.interpol_nf_ripple!r}, '
|
||||
f'channel_freq={self.channel_freq!r}, '
|
||||
f'nf={self.nf!r}, '
|
||||
f'gprofile={self.gprofile!r}, '
|
||||
f'pin_db={self.pin_db!r}, '
|
||||
f'pout_db={self.pout_db!r})')
|
||||
|
||||
def __str__(self):
|
||||
if self.pin_db is None or self.pout_db is None:
|
||||
return f'{type(self).__name__} {self.uid}'
|
||||
nf = mean(self.nf)
|
||||
return '\n'.join([f'{type(self).__name__} {self.uid}',
|
||||
f' type_variety: {self.params.type_variety}',
|
||||
f' effective gain(dB): {self.effective_gain:.2f}',
|
||||
f' (before att_in and before output VOA)',
|
||||
f' noise figure (dB): {nf:.2f}',
|
||||
f' (including att_in)',
|
||||
f' pad att_in (dB): {self.att_in:.2f}',
|
||||
f' Power In (dBm): {self.pin_db:.2f}',
|
||||
f' Power Out (dBm): {self.pout_db:.2f}',
|
||||
f' Delta_P (dB): {self.dp_db!r}',
|
||||
f' target pch (dBm): {self.target_pch_out_db!r}',
|
||||
f' effective pch (dBm): {self.effective_pch_out_db!r}',
|
||||
f' output VOA (dB): {self.operational.out_voa:.2f}'])
|
||||
|
||||
def carriers(self, loc, attr):
|
||||
"""retrieve carriers information
|
||||
loc = (in, out) of the class element
|
||||
attr = (ase, nli, signal, total) power information"""
|
||||
if not (loc in ('in', 'out') and attr in ('nli', 'signal', 'total', 'ase')):
|
||||
yield None
|
||||
return
|
||||
power_dict = {
|
||||
'nli': 'nonlinear_interference',
|
||||
'ase': 'amplified_spontaneous_emission'
|
||||
}
|
||||
attr = power_dict.get(attr, attr)
|
||||
loc_attr = 'carriers_'+loc
|
||||
for c in getattr(self, loc_attr) :
|
||||
if attr == 'total':
|
||||
yield c.power.ase+c.power.nli+c.power.signal
|
||||
else:
|
||||
yield c.power._asdict().get(attr, None)
|
||||
|
||||
def interpol_params(self, frequencies, pin, baud_rates, pref):
|
||||
"""interpolate SI channel frequencies with the edfa dgt and gain_ripple frquencies from json
|
||||
set the edfa class __init__ None parameters :
|
||||
self.channel_freq, self.nf, self.interpol_dgt and self.interpol_gain_ripple
|
||||
"""
|
||||
# TODO|jla: read amplifier actual frequencies from additional params in json
|
||||
amplifier_freq = itufs(0.05) * 1e12 # Hz
|
||||
self.channel_freq = frequencies
|
||||
self.interpol_dgt = interp(self.channel_freq, amplifier_freq, self.params.dgt)
|
||||
self.interpol_gain_ripple = interp(self.channel_freq, amplifier_freq, self.params.gain_ripple)
|
||||
self.interpol_nf_ripple =interp(self.channel_freq, amplifier_freq, self.params.nf_ripple)
|
||||
|
||||
self.nch = frequencies.size
|
||||
self.pin_db = lin2db(sum(pin*1e3))
|
||||
|
||||
"""in power mode: dp_db is defined and can be used to calculate the power target
|
||||
This power target is used calculate the amplifier gain"""
|
||||
if self.dp_db is not None:
|
||||
self.target_pch_out_db = round(self.dp_db + pref.p0, 2)
|
||||
self.effective_gain = self.target_pch_out_db - pref.pi
|
||||
else:
|
||||
self.effective_gain = self.operational.gain_target
|
||||
|
||||
"""check power saturation and correct target_gain accordingly:"""
|
||||
self.effective_gain = min(self.effective_gain, self.params.p_max - self.pin_db)
|
||||
self.effective_pch_out_db = round(pref.pi + self.effective_gain, 2)
|
||||
|
||||
self.nf = self._calc_nf()
|
||||
self.gprofile = self._gain_profile(pin)
|
||||
|
||||
pout = (pin + self.noise_profile(baud_rates))*db2lin(self.gprofile)
|
||||
self.pout_db = lin2db(sum(pout*1e3))
|
||||
self.operational.gain_target = self.effective_gain
|
||||
# ase & nli are only calculated in signal bandwidth
|
||||
# pout_db is not the absolute full output power (negligible if sufficient channels)
|
||||
|
||||
def _calc_nf(self, avg = False):
|
||||
"""nf calculation based on 2 models: self.params.nf_model.enabled from json import:
|
||||
True => 2 stages amp modelling based on precalculated nf1, nf2 and delta_p in build_OA_json
|
||||
False => polynomial fit based on self.params.nf_fit_coeff"""
|
||||
# TODO|jla: TBD alarm rising or input VOA padding in case
|
||||
# gain_min > gain_target TBD:
|
||||
pad = max(self.params.gain_min - self.effective_gain, 0)
|
||||
self.att_in = pad
|
||||
gain_target = self.effective_gain + pad
|
||||
dg = max(self.params.gain_flatmax - gain_target, 0)
|
||||
if self.params.type_def == 'variable_gain':
|
||||
g1a = gain_target - self.params.nf_model.delta_p - dg
|
||||
nf_avg = lin2db(db2lin(self.params.nf_model.nf1) + db2lin(self.params.nf_model.nf2)/db2lin(g1a))
|
||||
elif self.params.type_def == 'fixed_gain':
|
||||
nf_avg = self.params.nf_model.nf0
|
||||
elif self.params.type_def == 'openroadm':
|
||||
pin_ch = self.pin_db - lin2db(self.nch)
|
||||
# model OSNR = f(Pin)
|
||||
nf_avg = pin_ch - polyval(self.params.nf_model.nf_coef, pin_ch) + 58
|
||||
else:
|
||||
nf_avg = polyval(self.params.nf_fit_coeff, -dg)
|
||||
if avg:
|
||||
return nf_avg + pad
|
||||
else:
|
||||
return self.interpol_nf_ripple + nf_avg + pad # input VOA = 1 for 1 NF degradation
|
||||
|
||||
def noise_profile(self, df):
|
||||
""" noise_profile(bw) computes amplifier ase (W) in signal bw (Hz)
|
||||
noise is calculated at amplifier input
|
||||
|
||||
:bw: signal bandwidth = baud rate in Hz
|
||||
:type bw: float
|
||||
|
||||
:return: the asepower in W in the signal bandwidth bw for 96 channels
|
||||
:return type: numpy array of float
|
||||
|
||||
ASE POWER USING PER CHANNEL GAIN PROFILE
|
||||
INPUTS:
|
||||
NF_dB - Noise figure in dB, vector of length number of channels or
|
||||
spectral slices
|
||||
G_dB - Actual gain calculated for the EDFA, vector of length number of
|
||||
channels or spectral slices
|
||||
ffs - Center frequency grid of the channels or spectral slices in
|
||||
THz, vector of length number of channels or spectral slices
|
||||
dF - width of each channel or spectral slice in THz,
|
||||
vector of length number of channels or spectral slices
|
||||
OUTPUT:
|
||||
ase_dBm - ase in dBm per channel or spectral slice
|
||||
NOTE: the output is the total ASE in the channel or spectral slice. For
|
||||
50GHz channels the ASE BW is effectively 0.4nm. To get to noise power
|
||||
in 0.1nm, subtract 6dB.
|
||||
|
||||
ONSR is usually quoted as channel power divided by
|
||||
the ASE power in 0.1nm RBW, regardless of the width of the actual
|
||||
channel. This is a historical convention from the days when optical
|
||||
signals were much smaller (155Mbps, 2.5Gbps, ... 10Gbps) than the
|
||||
resolution of the OSAs that were used to measure spectral power which
|
||||
were set to 0.1nm resolution for convenience. Moving forward into
|
||||
flexible grid and high baud rate signals, it may be convenient to begin
|
||||
quoting power spectral density in the same BW for both signal and ASE,
|
||||
e.g. 12.5GHz."""
|
||||
|
||||
ase = h * df * self.channel_freq * db2lin(self.nf) # W
|
||||
return ase # in W at amplifier input
|
||||
|
||||
def _gain_profile(self, pin, err_tolerance=1.0e-11, simple_opt=True):
|
||||
"""
|
||||
Pin : input power / channel in W
|
||||
|
||||
:param gain_ripple: design flat gain
|
||||
:param dgt: design gain tilt
|
||||
:param Pin: total input power in W
|
||||
:param gp: Average gain setpoint in dB units
|
||||
:param gtp: gain tilt setting
|
||||
:type gain_ripple: numpy.ndarray
|
||||
:type dgt: numpy.ndarray
|
||||
:type Pin: numpy.ndarray
|
||||
:type gp: float
|
||||
:type gtp: float
|
||||
:return: gain profile in dBm
|
||||
:rtype: numpy.ndarray
|
||||
|
||||
AMPLIFICATION USING INPUT PROFILE
|
||||
INPUTS:
|
||||
gain_ripple - vector of length number of channels or spectral slices
|
||||
DGT - vector of length number of channels or spectral slices
|
||||
Pin - input powers vector of length number of channels or
|
||||
spectral slices
|
||||
Gp - provisioned gain length 1
|
||||
GTp - provisioned tilt length 1
|
||||
|
||||
OUTPUT:
|
||||
amp gain per channel or spectral slice
|
||||
NOTE: there is no checking done for violations of the total output
|
||||
power capability of the amp.
|
||||
EDIT OF PREVIOUS NOTE: power violation now added in interpol_params
|
||||
Ported from Matlab version written by David Boerges at Ciena.
|
||||
Based on:
|
||||
R. di Muro, "The Er3+ fiber gain coefficient derived from a dynamic
|
||||
gain
|
||||
tilt technique", Journal of Lightwave Technology, Vol. 18, Iss. 3,
|
||||
Pp. 343-347, 2000.
|
||||
"""
|
||||
|
||||
# TODO|jla: check what param should be used (currently length(dgt))
|
||||
nb_channel = arange(len(self.interpol_dgt))
|
||||
|
||||
# TODO|jla: find a way to use these or lose them. Primarily we should have
|
||||
# a way to determine if exceeding the gain or output power of the amp
|
||||
tot_in_power_db = self.pin_db # Pin in W
|
||||
|
||||
# linear fit to get the
|
||||
p = polyfit(nb_channel, self.interpol_dgt, 1)
|
||||
dgt_slope = p[0]
|
||||
|
||||
# Calculate the target slope - currently assumes equal spaced channels
|
||||
# TODO|jla: support arbitrary channel spacing
|
||||
targ_slope = self.operational.tilt_target / (len(nb_channel) - 1)
|
||||
|
||||
# first estimate of DGT scaling
|
||||
if abs(dgt_slope) > 0.001: # check for zero value due to flat dgt
|
||||
dgts1 = targ_slope / dgt_slope
|
||||
else:
|
||||
dgts1 = 0
|
||||
|
||||
# when simple_opt is true, make 2 attempts to compute gain and
|
||||
# the internal voa value. This is currently here to provide direct
|
||||
# comparison with original Matlab code. Will be removed.
|
||||
# TODO|jla: replace with loop
|
||||
|
||||
if not simple_opt:
|
||||
return
|
||||
|
||||
# first estimate of Er gain & VOA loss
|
||||
g1st = array(self.interpol_gain_ripple) + self.params.gain_flatmax \
|
||||
+ array(self.interpol_dgt) * dgts1
|
||||
voa = lin2db(mean(db2lin(g1st))) - self.effective_gain
|
||||
|
||||
# second estimate of amp ch gain using the channel input profile
|
||||
g2nd = g1st - voa
|
||||
|
||||
pout_db = lin2db(sum(pin*1e3*db2lin(g2nd)))
|
||||
dgts2 = self.effective_gain - (pout_db - tot_in_power_db)
|
||||
|
||||
# center estimate of amp ch gain
|
||||
xcent = dgts2
|
||||
gcent = g1st - voa + array(self.interpol_dgt) * xcent
|
||||
pout_db = lin2db(sum(pin*1e3*db2lin(gcent)))
|
||||
gavg_cent = pout_db - tot_in_power_db
|
||||
|
||||
# Lower estimate of amp ch gain
|
||||
deltax = max(g1st) - min(g1st)
|
||||
# if no ripple deltax = 0 and xlow = xcent: div 0
|
||||
# TODO|jla: add check for flat gain response
|
||||
if abs(deltax) <= 0.05: # not enough ripple to consider calculation
|
||||
return g1st - voa
|
||||
|
||||
xlow = dgts2 - deltax
|
||||
glow = g1st - voa + array(self.interpol_dgt) * xlow
|
||||
pout_db = lin2db(sum(pin * 1e3 * db2lin(glow)))
|
||||
gavg_low = pout_db - tot_in_power_db
|
||||
|
||||
# upper gain estimate
|
||||
xhigh = dgts2 + deltax
|
||||
ghigh = g1st - voa + array(self.interpol_dgt) * xhigh
|
||||
pout_db = lin2db(sum(pin * 1e3 * db2lin(ghigh)))
|
||||
gavg_high = pout_db - tot_in_power_db
|
||||
|
||||
# compute slope
|
||||
slope1 = (gavg_low - gavg_cent) / (xlow - xcent)
|
||||
slope2 = (gavg_cent - gavg_high) / (xcent - xhigh)
|
||||
|
||||
if abs(self.effective_gain - gavg_cent) <= err_tolerance:
|
||||
dgts3 = xcent
|
||||
elif self.effective_gain < gavg_cent:
|
||||
dgts3 = xcent - (gavg_cent - self.effective_gain) / slope1
|
||||
else:
|
||||
dgts3 = xcent + (-gavg_cent + self.effective_gain) / slope2
|
||||
|
||||
return g1st - voa + array(self.interpol_dgt) * dgts3
|
||||
|
||||
def propagate(self, pref, *carriers):
|
||||
"""add ase noise to the propagating carriers of SpectralInformation"""
|
||||
pin = array([c.power.signal+c.power.nli+c.power.ase for c in carriers]) # pin in W
|
||||
freq = array([c.frequency for c in carriers])
|
||||
brate = array([c.baud_rate for c in carriers])
|
||||
# interpolate the amplifier vectors with the carriers freq, calculate nf & gain profile
|
||||
self.interpol_params(freq, pin, brate, pref)
|
||||
|
||||
gains = db2lin(self.gprofile)
|
||||
carrier_ases = self.noise_profile(brate)
|
||||
att = db2lin(self.operational.out_voa)
|
||||
|
||||
for gain, carrier_ase, carrier in zip(gains, carrier_ases, carriers):
|
||||
pwr = carrier.power
|
||||
pwr = pwr._replace(signal=pwr.signal*gain/att,
|
||||
nonlinear_interference=pwr.nli*gain/att,
|
||||
amplified_spontaneous_emission=(pwr.ase+carrier_ase)*gain/att)
|
||||
yield carrier._replace(power=pwr)
|
||||
|
||||
def update_pref(self, pref):
|
||||
return pref._replace(p_span0=pref.p0,
|
||||
p_spani=pref.pi + self.effective_gain - self.operational.out_voa)
|
||||
|
||||
def __call__(self, spectral_info):
|
||||
self.carriers_in = spectral_info.carriers
|
||||
carriers = tuple(self.propagate(spectral_info.pref, *spectral_info.carriers))
|
||||
pref = self.update_pref(spectral_info.pref)
|
||||
self.carriers_out = carriers
|
||||
return spectral_info.update(carriers=carriers, pref=pref)
|
||||
271
gnpy/core/equipment.py
Normal file
271
gnpy/core/equipment.py
Normal file
@@ -0,0 +1,271 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.equipment
|
||||
===================
|
||||
|
||||
This module contains functionality for specifying equipment.
|
||||
'''
|
||||
|
||||
from numpy import clip, polyval
|
||||
from sys import exit
|
||||
from operator import itemgetter
|
||||
from math import isclose
|
||||
from pathlib import Path
|
||||
from json import load
|
||||
from gnpy.core.utils import lin2db, db2lin, load_json
|
||||
from collections import namedtuple
|
||||
from gnpy.core.elements import Edfa
|
||||
|
||||
Model_vg = namedtuple('Model_vg', 'nf1 nf2 delta_p')
|
||||
Model_fg = namedtuple('Model_fg', 'nf0')
|
||||
Model_openroadm = namedtuple('Model_openroadm', 'nf_coef')
|
||||
Fiber = namedtuple('Fiber', 'type_variety dispersion gamma')
|
||||
Spans = namedtuple('Spans', 'power_mode delta_power_range_db max_length length_units \
|
||||
max_loss padding EOL con_in con_out')
|
||||
Transceiver = namedtuple('Transceiver', 'type_variety frequency mode')
|
||||
Roadms = namedtuple('Roadms', 'gain_mode_default_loss power_mode_pout_target add_drop_osnr')
|
||||
SI = namedtuple('SI', 'f_min f_max baud_rate spacing roll_off \
|
||||
power_dbm power_range_db tx_osnr sys_margins')
|
||||
AmpBase = namedtuple(
|
||||
'AmpBase',
|
||||
'type_variety type_def gain_flatmax gain_min p_max'
|
||||
' nf_model nf_fit_coeff nf_ripple dgt gain_ripple out_voa_auto allowed_for_design')
|
||||
class Amp(AmpBase):
|
||||
def __new__(cls,
|
||||
type_variety, type_def, gain_flatmax, gain_min, p_max, nf_model=None,
|
||||
nf_fit_coeff=None, nf_ripple=None, dgt=None, gain_ripple=None,
|
||||
out_voa_auto=False, allowed_for_design=True):
|
||||
return super().__new__(cls,
|
||||
type_variety, type_def, gain_flatmax, gain_min, p_max,
|
||||
nf_model, nf_fit_coeff, nf_ripple, dgt, gain_ripple,
|
||||
out_voa_auto, allowed_for_design)
|
||||
|
||||
@classmethod
|
||||
def from_advanced_json(cls, filename, **kwargs):
|
||||
with open(filename, encoding='utf-8') as f:
|
||||
json_data = load(f)
|
||||
return cls(**{**kwargs, **json_data, 'type_def':None, 'nf_model':None})
|
||||
|
||||
@classmethod
|
||||
def from_default_json(cls, filename, **kwargs):
|
||||
with open(filename, encoding='utf-8') as f:
|
||||
json_data = load(f)
|
||||
type_variety = kwargs['type_variety']
|
||||
type_def = kwargs.get('type_def', 'variable_gain') #default compatibility with older json eqpt files
|
||||
nf_def = None
|
||||
|
||||
if type_def == 'fixed_gain':
|
||||
try:
|
||||
nf0 = kwargs.pop('nf0')
|
||||
except KeyError: #nf0 is expected for a fixed gain amp
|
||||
print(f'missing nf0 value input for amplifier: {type_variety} in eqpt_config.json')
|
||||
exit()
|
||||
try: #remove all remaining nf inputs
|
||||
del kwargs['nf_min']
|
||||
del kwargs['nf_max']
|
||||
except KeyError: pass #nf_min and nf_max are not needed for fixed gain amp
|
||||
nf_def = Model_fg(nf0)
|
||||
elif type_def == 'variable_gain':
|
||||
gain_min, gain_max = kwargs['gain_min'], kwargs['gain_flatmax']
|
||||
try: #nf_min and nf_max are expected for a variable gain amp
|
||||
nf_min = kwargs.pop('nf_min')
|
||||
nf_max = kwargs.pop('nf_max')
|
||||
except KeyError:
|
||||
print(f'missing nf_min/max value input for amplifier: {type_variety} in eqpt_config.json')
|
||||
exit()
|
||||
try: #remove all remaining nf inputs
|
||||
del kwargs['nf0']
|
||||
except KeyError: pass #nf0 is not needed for variable gain amp
|
||||
nf1, nf2, delta_p = nf_model(type_variety, gain_min, gain_max, nf_min, nf_max)
|
||||
nf_def = Model_vg(nf1, nf2, delta_p)
|
||||
elif type_def == 'openroadm':
|
||||
try:
|
||||
nf_coef = kwargs.pop('nf_coef')
|
||||
except KeyError: #nf_coef is expected for openroadm amp
|
||||
print(f'missing nf_coef input for amplifier: {type_variety} in eqpt_config.json')
|
||||
exit()
|
||||
nf_def = Model_openroadm(nf_coef)
|
||||
return cls(**{**kwargs, **json_data, 'nf_model': nf_def})
|
||||
|
||||
|
||||
def nf_model(type_variety, gain_min, gain_max, nf_min, nf_max):
|
||||
if nf_min < -10:
|
||||
print(f'Invalid nf_min value {nf_min!r} for amplifier {type_variety}')
|
||||
exit()
|
||||
if nf_max < -10:
|
||||
print(f'Invalid nf_max value {nf_max!r} for amplifier {type_variety}')
|
||||
exit()
|
||||
|
||||
# NF estimation model based on nf_min and nf_max
|
||||
# delta_p: max power dB difference between first and second stage coils
|
||||
# dB g1a: first stage gain - internal VOA attenuation
|
||||
# nf1, nf2: first and second stage coils
|
||||
# calculated by solving nf_{min,max} = nf1 + nf2 / g1a{min,max}
|
||||
delta_p = 5
|
||||
g1a_min = gain_min - (gain_max - gain_min) - delta_p
|
||||
g1a_max = gain_max - delta_p
|
||||
nf2 = lin2db((db2lin(nf_min) - db2lin(nf_max)) /
|
||||
(1/db2lin(g1a_max) - 1/db2lin(g1a_min)))
|
||||
nf1 = lin2db(db2lin(nf_min) - db2lin(nf2)/db2lin(g1a_max))
|
||||
|
||||
if nf1 < 4:
|
||||
print(f'First coil value too low {nf1} for amplifier {type_variety}')
|
||||
exit()
|
||||
|
||||
# Check 1 dB < delta_p < 6 dB to ensure nf_min and nf_max values make sense.
|
||||
# There shouldn't be high nf differences between the two coils:
|
||||
# nf2 should be nf1 + 0.3 < nf2 < nf1 + 2
|
||||
# If not, recompute and check delta_p
|
||||
if not nf1 + 0.3 < nf2 < nf1 + 2:
|
||||
nf2 = clip(nf2, nf1 + 0.3, nf1 + 2)
|
||||
g1a_max = lin2db(db2lin(nf2) / (db2lin(nf_min) - db2lin(nf1)))
|
||||
delta_p = gain_max - g1a_max
|
||||
g1a_min = gain_min - (gain_max-gain_min) - delta_p
|
||||
if not 1 < delta_p < 6:
|
||||
print(f'Computed \N{greek capital letter delta}P invalid \
|
||||
\n 1st coil vs 2nd coil calculated DeltaP {delta_p:.2f} for \
|
||||
\n amplifier {type_variety} is not valid: revise inputs \
|
||||
\n calculated 1st coil NF = {nf1:.2f}, 2nd coil NF = {nf2:.2f}')
|
||||
exit()
|
||||
# Check calculated values for nf1 and nf2
|
||||
calc_nf_min = lin2db(db2lin(nf1) + db2lin(nf2)/db2lin(g1a_max))
|
||||
if not isclose(nf_min, calc_nf_min, abs_tol=0.01):
|
||||
print(f'nf_min does not match calc_nf_min, {nf_min} vs {calc_nf_min} for amp {type_variety}')
|
||||
exit()
|
||||
calc_nf_max = lin2db(db2lin(nf1) + db2lin(nf2)/db2lin(g1a_min))
|
||||
if not isclose(nf_max, calc_nf_max, abs_tol=0.01):
|
||||
print(f'nf_max does not match calc_nf_max, {nf_max} vs {calc_nf_max} for amp {type_variety}')
|
||||
exit()
|
||||
|
||||
return nf1, nf2, delta_p
|
||||
|
||||
def edfa_nf(gain_target, variety_type, equipment):
|
||||
amp_params = equipment['Edfa'][variety_type]
|
||||
amp = Edfa(
|
||||
uid = f'calc_NF',
|
||||
params = amp_params._asdict(),
|
||||
operational = {
|
||||
'gain_target': gain_target,
|
||||
'tilt_target': 0
|
||||
}
|
||||
)
|
||||
amp.pin_db = 0
|
||||
amp.nch = 88
|
||||
return amp._calc_nf(True)
|
||||
|
||||
def trx_mode_params(equipment, trx_type_variety='', trx_mode='', error_message=False):
|
||||
"""return the trx and SI parameters from eqpt_config for a given type_variety and mode (ie format)"""
|
||||
trx_params = {}
|
||||
default_si_data = equipment['SI']['default']
|
||||
|
||||
try:
|
||||
trxs = equipment['Transceiver']
|
||||
#if called from path_requests_run.py, trx_mode is filled with None when not specified by user
|
||||
#if called from transmission_main.py, trx_mode is ''
|
||||
if trx_mode is not None:
|
||||
mode_params = next(mode for trx in trxs \
|
||||
if trx == trx_type_variety \
|
||||
for mode in trxs[trx].mode \
|
||||
if mode['format'] == trx_mode)
|
||||
trx_params = {**mode_params}
|
||||
# sanity check: spacing baudrate must be smaller than min spacing
|
||||
if trx_params['baud_rate'] > trx_params['min_spacing'] :
|
||||
msg = f'Inconsistency in equipment library:\n Transpoder "{trx_type_variety}" mode "{trx_params["format"]}" '+\
|
||||
f'has baud rate: {trx_params["baud_rate"]*1e-9} GHz greater than min_spacing {trx_params["min_spacing"]*1e-9}.'
|
||||
print(msg)
|
||||
exit()
|
||||
else:
|
||||
mode_params = {"format": "undetermined",
|
||||
"baud_rate": None,
|
||||
"OSNR": None,
|
||||
"bit_rate": None,
|
||||
"roll_off": None,
|
||||
"tx_osnr":None,
|
||||
"min_spacing":None,
|
||||
"cost":None}
|
||||
trx_params = {**mode_params}
|
||||
trx_params['f_min'] = equipment['Transceiver'][trx_type_variety].frequency['min']
|
||||
trx_params['f_max'] = equipment['Transceiver'][trx_type_variety].frequency['max']
|
||||
|
||||
# TODO: novel automatic feature maybe unwanted if spacing is specified
|
||||
# trx_params['spacing'] = automatic_spacing(trx_params['baud_rate'])
|
||||
# temp = trx_params['spacing']
|
||||
# print(f'spacing {temp}')
|
||||
except StopIteration :
|
||||
if error_message:
|
||||
print(f'could not find tsp : {trx_type_variety} with mode: {trx_mode} in eqpt library')
|
||||
print('Computation stopped.')
|
||||
exit()
|
||||
else:
|
||||
# default transponder charcteristics
|
||||
# mainly used with transmission_main_example.py
|
||||
trx_params['f_min'] = default_si_data.f_min
|
||||
trx_params['f_max'] = default_si_data.f_max
|
||||
trx_params['baud_rate'] = default_si_data.baud_rate
|
||||
trx_params['spacing'] = default_si_data.spacing
|
||||
trx_params['OSNR'] = None
|
||||
trx_params['bit_rate'] = None
|
||||
trx_params['cost'] = None
|
||||
trx_params['roll_off'] = default_si_data.roll_off
|
||||
trx_params['tx_osnr'] = default_si_data.tx_osnr
|
||||
trx_params['min_spacing'] = None
|
||||
nch = automatic_nch(trx_params['f_min'], trx_params['f_max'], trx_params['spacing'])
|
||||
trx_params['nb_channel'] = nch
|
||||
print(f'There are {nch} channels propagating')
|
||||
|
||||
trx_params['power'] = db2lin(default_si_data.power_dbm)*1e-3
|
||||
|
||||
return trx_params
|
||||
|
||||
def automatic_spacing(baud_rate):
|
||||
"""return the min possible channel spacing for a given baud rate"""
|
||||
# TODO : this should parametrized in a cfg file
|
||||
spacing_list = [(33e9,37.5e9), (38e9,50e9), (50e9,62.5e9), (67e9,75e9), (92e9,100e9)] #list of possible tuples
|
||||
#[(max_baud_rate, spacing_for_this_baud_rate)]
|
||||
return min((s[1] for s in spacing_list if s[0] > baud_rate), default=baud_rate*1.2)
|
||||
|
||||
def automatic_nch(f_min, f_max, spacing):
|
||||
return int((f_max - f_min)//spacing)
|
||||
|
||||
def automatic_fmax(f_min, spacing, nch):
|
||||
return f_min + spacing * nch
|
||||
|
||||
def load_equipment(filename):
|
||||
json_data = load_json(filename)
|
||||
return equipment_from_json(json_data, filename)
|
||||
|
||||
def update_trx_osnr(equipment):
|
||||
"""add sys_margins to all Transceivers OSNR values"""
|
||||
for trx in equipment['Transceiver'].values():
|
||||
for m in trx.mode:
|
||||
m['OSNR'] = m['OSNR'] + equipment['SI']['default'].sys_margins
|
||||
return equipment
|
||||
|
||||
def equipment_from_json(json_data, filename):
|
||||
"""build global dictionnary eqpt_library that stores all eqpt characteristics:
|
||||
edfa type type_variety, fiber type_variety
|
||||
from the eqpt_config.json (filename parameter)
|
||||
also read advanced_config_from_json file parameters for edfa if they are available:
|
||||
typically nf_ripple, dfg gain ripple, dgt and nf polynomial nf_fit_coeff
|
||||
if advanced_config_from_json file parameter is not present: use nf_model:
|
||||
requires nf_min and nf_max values boundaries of the edfa gain range
|
||||
"""
|
||||
equipment = {}
|
||||
for key, entries in json_data.items():
|
||||
equipment[key] = {}
|
||||
typ = globals()[key]
|
||||
for entry in entries:
|
||||
subkey = entry.get('type_variety', 'default')
|
||||
if key == 'Edfa':
|
||||
if 'advanced_config_from_json' in entry:
|
||||
config = Path(filename).parent / entry.pop('advanced_config_from_json')
|
||||
equipment[key][subkey] = Amp.from_advanced_json(config, **entry)
|
||||
else:
|
||||
config = Path(filename).parent / 'default_edfa_config.json'
|
||||
equipment[key][subkey] = Amp.from_default_json(config, **entry)
|
||||
else:
|
||||
equipment[key][subkey] = typ(**entry)
|
||||
equipment = update_trx_osnr(equipment)
|
||||
return equipment
|
||||
10
gnpy/core/execute.py
Normal file
10
gnpy/core/execute.py
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.execute
|
||||
=================
|
||||
|
||||
This module contains functions for executing the propogation of
|
||||
spectral information on a `gnpy` network.
|
||||
'''
|
||||
91
gnpy/core/info.py
Normal file
91
gnpy/core/info.py
Normal file
@@ -0,0 +1,91 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.info
|
||||
==============
|
||||
|
||||
This module contains classes for modelling SpectralInformation.
|
||||
'''
|
||||
|
||||
|
||||
from collections import namedtuple
|
||||
from numpy import array
|
||||
from gnpy.core.utils import lin2db, db2lin
|
||||
from json import loads
|
||||
from gnpy.core.utils import load_json
|
||||
from gnpy.core.equipment import automatic_nch, automatic_spacing
|
||||
|
||||
class ConvenienceAccess:
|
||||
|
||||
def __init_subclass__(cls):
|
||||
for abbrev, field in getattr(cls, '_ABBREVS', {}).items():
|
||||
setattr(cls, abbrev, property(lambda self, f=field: getattr(self, f)))
|
||||
|
||||
def update(self, **kwargs):
|
||||
for abbrev, field in getattr(self, '_ABBREVS', {}).items():
|
||||
if abbrev in kwargs:
|
||||
kwargs[field] = kwargs.pop(abbrev)
|
||||
return self._replace(**kwargs)
|
||||
|
||||
|
||||
class Power(namedtuple('Power', 'signal nonlinear_interference amplified_spontaneous_emission'), ConvenienceAccess):
|
||||
|
||||
_ABBREVS = {'nli': 'nonlinear_interference',
|
||||
'ase': 'amplified_spontaneous_emission',}
|
||||
|
||||
|
||||
class Channel(namedtuple('Channel', 'channel_number frequency baud_rate roll_off power'), ConvenienceAccess):
|
||||
|
||||
_ABBREVS = {'channel': 'channel_number',
|
||||
'num_chan': 'channel_number',
|
||||
'ffs': 'frequency',
|
||||
'freq': 'frequency',}
|
||||
|
||||
class Pref(namedtuple('Pref', 'p_span0, p_spani'), ConvenienceAccess):
|
||||
|
||||
_ABBREVS = {'p0' : 'p_span0',
|
||||
'pi' : 'p_spani'}
|
||||
|
||||
class SpectralInformation(namedtuple('SpectralInformation', 'pref carriers'), ConvenienceAccess):
|
||||
|
||||
def __new__(cls, pref=Pref(0, 0), *carriers):
|
||||
return super().__new__(cls, pref, carriers)
|
||||
|
||||
def merge_input_spectral_information(*si):
|
||||
"""mix channel combs of different baud rates and power"""
|
||||
#TODO
|
||||
pass
|
||||
|
||||
def create_input_spectral_information(f_min, f_max, roll_off, baud_rate, power, spacing):
|
||||
# pref in dB : convert power lin into power in dB
|
||||
pref = lin2db(power * 1e3)
|
||||
si = SpectralInformation(pref=Pref(pref, pref))
|
||||
nb_channel = automatic_nch(f_min, f_max, spacing)
|
||||
si = si.update(carriers=[
|
||||
Channel(f, (f_min+spacing*f),
|
||||
baud_rate, roll_off, Power(power, 0, 0)) for f in range(1,nb_channel+1)
|
||||
])
|
||||
return si
|
||||
|
||||
if __name__ == '__main__':
|
||||
pref = lin2db(power * 1e3)
|
||||
si = SpectralInformation(
|
||||
Pref(pref, pref),
|
||||
Channel(1, 193.95e12, 32e9, 0.15, # 193.95 THz, 32 Gbaud
|
||||
Power(1e-3, 1e-6, 1e-6)), # 1 mW, 1uW, 1uW
|
||||
Channel(1, 195.95e12, 32e9, 0.15, # 195.95 THz, 32 Gbaud
|
||||
Power(1.2e-3, 1e-6, 1e-6)), # 1.2 mW, 1uW, 1uW
|
||||
)
|
||||
|
||||
si = SpectralInformation()
|
||||
spacing = 0.05 # THz
|
||||
|
||||
si = si.update(carriers=tuple(Channel(f+1, 191.3+spacing*(f+1), 32e9, 0.15, Power(1e-3, f, 1)) for f in range(96)))
|
||||
|
||||
print(f'si = {si}')
|
||||
print(f'si = {si.carriers[0].power.nli}')
|
||||
print(f'si = {si.carriers[20].power.nli}')
|
||||
si2 = si.update(carriers=tuple(c.update(power = c.power.update(nli = c.power.nli * 1e5))
|
||||
for c in si.carriers))
|
||||
print(f'si2 = {si2}')
|
||||
438
gnpy/core/network.py
Normal file
438
gnpy/core/network.py
Normal file
@@ -0,0 +1,438 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.network
|
||||
=================
|
||||
|
||||
This module contains functions for constructing networks of network elements.
|
||||
'''
|
||||
|
||||
from gnpy.core.convert import convert_file
|
||||
from networkx import DiGraph
|
||||
from numpy import arange
|
||||
from logging import getLogger
|
||||
from os import path
|
||||
from operator import itemgetter
|
||||
from gnpy.core import elements
|
||||
from gnpy.core.elements import Fiber, Edfa, Transceiver, Roadm, Fused
|
||||
from gnpy.core.equipment import edfa_nf
|
||||
from gnpy.core.units import UNITS
|
||||
from gnpy.core.utils import load_json, save_json, round2float, db2lin, lin2db
|
||||
from sys import exit
|
||||
from collections import namedtuple
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
def load_network(filename, equipment, name_matching = False):
|
||||
json_filename = ''
|
||||
if filename.suffix.lower() == '.xls':
|
||||
logger.info('Automatically generating topology JSON file')
|
||||
json_filename = convert_file(filename, name_matching)
|
||||
elif filename.suffix.lower() == '.json':
|
||||
json_filename = filename
|
||||
else:
|
||||
raise ValueError(f'unsuported topology filename extension {filename.suffix.lower()}')
|
||||
json_data = load_json(json_filename)
|
||||
return network_from_json(json_data, equipment)
|
||||
|
||||
def save_network(filename, network):
|
||||
filename_output = path.splitext(filename)[0] + '_auto_design.json'
|
||||
json_data = network_to_json(network)
|
||||
save_json(json_data, filename_output)
|
||||
|
||||
def network_from_json(json_data, equipment):
|
||||
# NOTE|dutc: we could use the following, but it would tie our data format
|
||||
# too closely to the graph library
|
||||
# from networkx import node_link_graph
|
||||
g = DiGraph()
|
||||
for el_config in json_data['elements']:
|
||||
typ = el_config.pop('type')
|
||||
variety = el_config.pop('type_variety', 'default')
|
||||
if typ in equipment and variety in equipment[typ]:
|
||||
extra_params = equipment[typ][variety]
|
||||
el_config.setdefault('params', {}).update(extra_params._asdict())
|
||||
elif typ in ['Edfa', 'Fiber']: #catch it now because the code will crash later!
|
||||
print( f'The {typ} of variety type {variety} was not recognized:'
|
||||
'\nplease check it is properly defined in the eqpt_config json file')
|
||||
exit()
|
||||
cls = getattr(elements, typ)
|
||||
el = cls(**el_config)
|
||||
g.add_node(el)
|
||||
|
||||
nodes = {k.uid: k for k in g.nodes()}
|
||||
|
||||
for cx in json_data['connections']:
|
||||
from_node, to_node = cx['from_node'], cx['to_node']
|
||||
try:
|
||||
g.add_edge(nodes[from_node], nodes[to_node])
|
||||
except KeyError:
|
||||
msg = f'In {__name__} network_from_json function:\n\tcan not find {from_node} or {to_node} defined in {cx}'
|
||||
print(msg)
|
||||
exit(1)
|
||||
|
||||
return g
|
||||
|
||||
def network_to_json(network):
|
||||
data = {
|
||||
'elements': [n.to_json for n in network]
|
||||
}
|
||||
connections = {
|
||||
'connections': [{"from_node": n.uid,
|
||||
"to_node": next_n.uid}
|
||||
for n in network
|
||||
for next_n in network.successors(n) if next_n is not None]
|
||||
}
|
||||
data.update(connections)
|
||||
return data
|
||||
|
||||
def select_edfa(gain_target, power_target, equipment):
|
||||
"""amplifer selection algorithm
|
||||
@Orange Jean-Luc Augé
|
||||
"""
|
||||
Edfa_list = namedtuple('Edfa_list', 'variety power gain nf')
|
||||
TARGET_EXTENDED_GAIN = 2.1
|
||||
#MAX_EXTENDED_GAIN = 5
|
||||
edfa_dict = equipment['Edfa']
|
||||
pin = power_target - gain_target
|
||||
|
||||
edfa_list = [Edfa_list(
|
||||
variety=edfa_variety,
|
||||
power=min(
|
||||
pin
|
||||
+edfa.gain_flatmax
|
||||
+TARGET_EXTENDED_GAIN,
|
||||
edfa.p_max
|
||||
)
|
||||
-power_target,
|
||||
gain=edfa.gain_flatmax-gain_target,
|
||||
nf=edfa_nf(gain_target, edfa_variety, equipment)) \
|
||||
for edfa_variety, edfa in edfa_dict.items()
|
||||
if edfa.allowed_for_design]
|
||||
|
||||
acceptable_gain_list = \
|
||||
list(filter(lambda x : x.gain>-TARGET_EXTENDED_GAIN, edfa_list))
|
||||
if len(acceptable_gain_list) < 1:
|
||||
#no amplifier satisfies the required gain, so pick the highest gain:
|
||||
gain_max = max(edfa_list, key=itemgetter(2)).gain
|
||||
#pick up all amplifiers that share this max gain:
|
||||
acceptable_gain_list = \
|
||||
list(filter(lambda x : x.gain-gain_max>-0.1, edfa_list))
|
||||
acceptable_power_list = \
|
||||
list(filter(lambda x : x.power>=0, acceptable_gain_list))
|
||||
if len(acceptable_power_list) < 1:
|
||||
#no amplifier satisfies the required power, so pick the highest power:
|
||||
power_max = \
|
||||
max(acceptable_gain_list, key=itemgetter(1)).power
|
||||
#pick up all amplifiers that share this max gain:
|
||||
acceptable_power_list = \
|
||||
list(filter(lambda x : x.power-power_max>-0.1, acceptable_gain_list))
|
||||
# gain and power requirements are resolved,
|
||||
# =>chose the amp with the best NF among the acceptable ones:
|
||||
return min(acceptable_power_list, key=itemgetter(3)).variety #filter on NF
|
||||
|
||||
|
||||
def set_roadm_loss(network, equipment, pref_ch_db):
|
||||
roadms = [roadm for roadm in network if isinstance(roadm, Roadm)]
|
||||
power_mode = equipment['Spans']['default'].power_mode
|
||||
default_roadm_loss = equipment['Roadms']['default'].gain_mode_default_loss
|
||||
pout_target = equipment['Roadms']['default'].power_mode_pout_target
|
||||
roadm_loss = pref_ch_db - pout_target
|
||||
|
||||
for roadm in roadms:
|
||||
if power_mode:
|
||||
roadm.loss = roadm_loss
|
||||
roadm.target_pch_out_db = pout_target
|
||||
elif roadm.loss == None:
|
||||
roadm.loss = default_roadm_loss
|
||||
|
||||
def target_power(dp_from_gain, network, node, equipment): #get_fiber_dp
|
||||
SPAN_LOSS_REF = 20
|
||||
POWER_SLOPE = 0.3
|
||||
power_mode = equipment['Spans']['default'].power_mode
|
||||
dp_range = list(equipment['Spans']['default'].delta_power_range_db)
|
||||
node_loss = span_loss(network, node)
|
||||
|
||||
dp_gain_mode = 0
|
||||
try:
|
||||
dp_power_mode = round2float((node_loss - SPAN_LOSS_REF) * POWER_SLOPE, dp_range[2])
|
||||
dp_power_mode = max(dp_range[0], dp_power_mode)
|
||||
dp_power_mode = min(dp_range[1], dp_power_mode)
|
||||
except KeyError:
|
||||
print(f'invalid delta_power_range_db definition in eqpt_config[Spans]'
|
||||
f'delta_power_range_db: [lower_bound, upper_bound, step]')
|
||||
exit()
|
||||
|
||||
if dp_from_gain:
|
||||
dp_power_mode = dp_from_gain
|
||||
dp_gain_mode = dp_from_gain
|
||||
if isinstance(node, Roadm):
|
||||
dp_power_mode = 0
|
||||
|
||||
dp = dp_power_mode if power_mode else dp_gain_mode
|
||||
#print(f'{repr(node)} delta power in:\n{dp}dB')
|
||||
|
||||
return dp
|
||||
|
||||
|
||||
def prev_node_generator(network, node):
|
||||
"""fused spans interest:
|
||||
iterate over all predecessors while they are Fused or Fiber type"""
|
||||
try:
|
||||
prev_node = next(n for n in network.predecessors(node))
|
||||
except StopIteration:
|
||||
msg = f'In {__name__} prev_node_generator function:\n\t{node.uid} is not properly connected, please check network topology'
|
||||
print(msg)
|
||||
logger.critical(msg)
|
||||
exit(1)
|
||||
# yield and re-iterate
|
||||
if isinstance(prev_node, Fused) or isinstance(node, Fused):
|
||||
yield prev_node
|
||||
yield from prev_node_generator(network, prev_node)
|
||||
else:
|
||||
StopIteration
|
||||
|
||||
def next_node_generator(network, node):
|
||||
"""fused spans interest:
|
||||
iterate over all successors while they are Fused or Fiber type"""
|
||||
try:
|
||||
next_node = next(n for n in network.successors(node))
|
||||
except StopIteration:
|
||||
print(f'In {__name__} next_node_generator function:\n\t{node.uid} is not properly connected, please check network topology')
|
||||
exit(1)
|
||||
# yield and re-iterate
|
||||
if isinstance(next_node, Fused) or isinstance(node, Fused):
|
||||
yield next_node
|
||||
yield from next_node_generator(network, next_node)
|
||||
else:
|
||||
StopIteration
|
||||
|
||||
def span_loss(network, node):
|
||||
"""Fused span interest:
|
||||
return the total span loss of all the fibers spliced by a Fused node"""
|
||||
loss = node.loss if node.passive else 0
|
||||
try:
|
||||
prev_node = next(n for n in network.predecessors(node))
|
||||
if isinstance(prev_node, Fused):
|
||||
loss += sum(n.loss for n in prev_node_generator(network, node))
|
||||
except StopIteration:
|
||||
pass
|
||||
try:
|
||||
next_node = next(n for n in network.successors(node))
|
||||
if isinstance(next_node, Fused):
|
||||
loss += sum(n.loss for n in next_node_generator(network, node))
|
||||
except StopIteration:
|
||||
pass
|
||||
return loss
|
||||
|
||||
def find_first_node(network, node):
|
||||
"""Fused node interest:
|
||||
returns the 1st node at the origin of a succession of fused nodes
|
||||
(aka no amp in between)"""
|
||||
this_node = node
|
||||
for this_node in prev_node_generator(network, node):
|
||||
pass
|
||||
return this_node
|
||||
|
||||
def find_last_node(network, node):
|
||||
"""Fused node interest:
|
||||
returns the last node in a succession of fused nodes
|
||||
(aka no amp in between)"""
|
||||
this_node = node
|
||||
for this_node in next_node_generator(network, node):
|
||||
pass
|
||||
return this_node
|
||||
|
||||
def set_amplifier_voa(amp, pref_total_db, power_mode):
|
||||
VOA_MARGIN = 0
|
||||
if amp.operational.out_voa is None:
|
||||
if power_mode:
|
||||
gain_target = amp.operational.gain_target
|
||||
pout = pref_total_db + amp.dp_db
|
||||
voa = min(amp.params.p_max-pout,
|
||||
amp.params.gain_flatmax-amp.operational.gain_target)
|
||||
voa = round2float(max(voa, 0), 0.5) - VOA_MARGIN if amp.params.out_voa_auto else 0
|
||||
amp.dp_db = amp.dp_db + voa
|
||||
amp.operational.gain_target = amp.operational.gain_target + voa
|
||||
else:
|
||||
voa = 0 # no output voa optimization in gain mode
|
||||
amp.operational.out_voa = voa
|
||||
|
||||
def set_egress_amplifier(network, roadm, equipment, pref_total_db):
|
||||
power_mode = equipment['Spans']['default'].power_mode
|
||||
next_oms = (n for n in network.successors(roadm) if not isinstance(n, Transceiver))
|
||||
for oms in next_oms:
|
||||
#go through all the OMS departing from the Roadm
|
||||
node = roadm
|
||||
prev_node = roadm
|
||||
next_node = oms
|
||||
# if isinstance(next_node, Fused): #support ROADM wo egress amp for metro applications
|
||||
# node = find_last_node(next_node)
|
||||
# next_node = next(n for n in network.successors(node))
|
||||
# next_node = find_last_node(next_node)
|
||||
prev_dp = 0
|
||||
dp = 0
|
||||
while True:
|
||||
#go through all nodes in the OMS (loop until next Roadm instance)
|
||||
if isinstance(node, Edfa):
|
||||
node_loss = span_loss(network, prev_node)
|
||||
dp_from_gain = prev_dp + node.operational.gain_target - node_loss \
|
||||
if node.operational.gain_target > 0 else None
|
||||
dp = target_power(dp_from_gain, network, next_node, equipment)
|
||||
gain_target = node_loss + dp - prev_dp
|
||||
|
||||
if power_mode:
|
||||
node.dp_db = dp
|
||||
node.operational.gain_target = gain_target
|
||||
|
||||
if node.params.type_variety == '':
|
||||
power_target = pref_total_db + dp
|
||||
edfa_variety = select_edfa(gain_target, power_target, equipment)
|
||||
extra_params = equipment['Edfa'][edfa_variety]
|
||||
node.params.update_params(extra_params._asdict())
|
||||
set_amplifier_voa(node, pref_total_db, power_mode)
|
||||
if isinstance(next_node, Roadm) or isinstance(next_node, Transceiver):
|
||||
break
|
||||
prev_dp = dp
|
||||
prev_node = node
|
||||
node = next_node
|
||||
# print(f'{node.uid}')
|
||||
next_node = next(n for n in network.successors(node))
|
||||
|
||||
|
||||
def add_egress_amplifier(network, node):
|
||||
next_nodes = [n for n in network.successors(node)
|
||||
if not (isinstance(n, Transceiver) or isinstance(n, Fused) or isinstance(n, Edfa))]
|
||||
#no amplification for fused spans or TRX
|
||||
for i, next_node in enumerate(next_nodes):
|
||||
network.remove_edge(node, next_node)
|
||||
amp = Edfa(
|
||||
uid = f'Edfa{i}_{node.uid}',
|
||||
params = {},
|
||||
operational = {
|
||||
'gain_target': 0,
|
||||
'tilt_target': 0,
|
||||
})
|
||||
network.add_node(amp)
|
||||
network.add_edge(node, amp)
|
||||
network.add_edge(amp, next_node)
|
||||
|
||||
|
||||
def calculate_new_length(fiber_length, bounds, target_length):
|
||||
if fiber_length < bounds.stop:
|
||||
return fiber_length, 1
|
||||
|
||||
n_spans = int(fiber_length // target_length)
|
||||
|
||||
length1 = fiber_length / (n_spans+1)
|
||||
delta1 = target_length-length1
|
||||
result1 = (length1, n_spans+1)
|
||||
|
||||
length2 = fiber_length / n_spans
|
||||
delta2 = length2-target_length
|
||||
result2 = (length2, n_spans)
|
||||
|
||||
if (bounds.start<=length1<=bounds.stop) and not(bounds.start<=length2<=bounds.stop):
|
||||
result = result1
|
||||
elif (bounds.start<=length2<=bounds.stop) and not(bounds.start<=length1<=bounds.stop):
|
||||
result = result2
|
||||
else:
|
||||
result = result1 if delta1 < delta2 else result2
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def split_fiber(network, fiber, bounds, target_length, equipment):
|
||||
new_length, n_spans = calculate_new_length(fiber.length, bounds, target_length)
|
||||
if n_spans == 1:
|
||||
return
|
||||
|
||||
try:
|
||||
next_node = next(network.successors(fiber))
|
||||
prev_node = next(network.predecessors(fiber))
|
||||
except StopIteration:
|
||||
|
||||
print(f'In {__name__} split_fiber function:\n\t{fiber.uid} is not properly connected, please check network topology')
|
||||
exit()
|
||||
|
||||
network.remove_node(fiber)
|
||||
|
||||
fiber_params = fiber.params._asdict()
|
||||
fiber_params['length'] = new_length / UNITS[fiber.params.length_units]
|
||||
fiber_params['con_in'] = fiber.con_in
|
||||
fiber_params['con_out'] = fiber.con_out
|
||||
|
||||
for span in range(n_spans):
|
||||
new_span = Fiber(uid = f'{fiber.uid}_({span+1}/{n_spans})',
|
||||
metadata = fiber.metadata,
|
||||
params = fiber_params)
|
||||
network.add_edge(prev_node, new_span)
|
||||
prev_node = new_span
|
||||
network.add_edge(prev_node, next_node)
|
||||
|
||||
def add_connector_loss(fibers, con_in, con_out, EOL):
|
||||
for fiber in fibers:
|
||||
if fiber.con_in is None: fiber.con_in = con_in
|
||||
if fiber.con_out is None:
|
||||
fiber.con_out = con_out #con_out includes EOL
|
||||
else:
|
||||
fiber.con_out = fiber.con_out+EOL
|
||||
|
||||
def add_fiber_padding(network, fibers, padding):
|
||||
"""last_fibers = (fiber for n in network.nodes()
|
||||
if not (isinstance(n, Fiber) or isinstance(n, Fused))
|
||||
for fiber in network.predecessors(n)
|
||||
if isinstance(fiber, Fiber))"""
|
||||
for fiber in fibers:
|
||||
this_span_loss = span_loss(network, fiber)
|
||||
try:
|
||||
next_node = next(network.successors(fiber))
|
||||
except StopIteration:
|
||||
msg = f'In {__name__} add_fiber_padding function:\n\t{fiber.uid} is not properly connected, please check network topology'
|
||||
print(msg)
|
||||
logger.critical(msg)
|
||||
exit(1)
|
||||
if this_span_loss < padding and not (isinstance(next_node, Fused)):
|
||||
#add a padding att_in at the input of the 1st fiber:
|
||||
#address the case when several fibers are spliced together
|
||||
first_fiber = find_first_node(network, fiber)
|
||||
if first_fiber.att_in is None:
|
||||
first_fiber.att_in = padding - this_span_loss
|
||||
else :
|
||||
first_fiber.att_in = first_fiber.att_in + padding - this_span_loss
|
||||
|
||||
def build_network(network, equipment, pref_ch_db, pref_total_db):
|
||||
default_span_data = equipment['Spans']['default']
|
||||
max_length = int(default_span_data.max_length * UNITS[default_span_data.length_units])
|
||||
min_length = max(int(default_span_data.padding/0.2*1e3),50_000)
|
||||
bounds = range(min_length, max_length)
|
||||
target_length = max(min_length, 90_000)
|
||||
con_in = default_span_data.con_in
|
||||
con_out = default_span_data.con_out + default_span_data.EOL
|
||||
padding = default_span_data.padding
|
||||
|
||||
#set raodm loss for gain_mode before to build network
|
||||
set_roadm_loss(network, equipment, pref_ch_db)
|
||||
fibers = [f for f in network.nodes() if isinstance(f, Fiber)]
|
||||
add_connector_loss(fibers, con_in, con_out, default_span_data.EOL)
|
||||
add_fiber_padding(network, fibers, padding)
|
||||
# don't group split fiber and add amp in the same loop
|
||||
# =>for code clarity (at the expense of speed):
|
||||
for fiber in fibers:
|
||||
split_fiber(network, fiber, bounds, target_length, equipment)
|
||||
|
||||
amplified_nodes = [n for n in network.nodes()
|
||||
if isinstance(n, Fiber) or isinstance(n, Roadm)]
|
||||
for node in amplified_nodes:
|
||||
add_egress_amplifier(network, node)
|
||||
|
||||
roadms = [r for r in network.nodes() if isinstance(r, Roadm)]
|
||||
for roadm in roadms:
|
||||
set_egress_amplifier(network, roadm, equipment, pref_total_db)
|
||||
|
||||
#support older json input topology wo Roadms:
|
||||
if len(roadms) == 0:
|
||||
trx = [t for t in network.nodes() if isinstance(t, Transceiver)]
|
||||
for t in trx:
|
||||
set_egress_amplifier(network, t, equipment, pref_total_db)
|
||||
|
||||
54
gnpy/core/node.py
Normal file
54
gnpy/core/node.py
Normal file
@@ -0,0 +1,54 @@
|
||||
#! /bin/usr/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.node
|
||||
==============
|
||||
|
||||
This module contains the base class for a network element.
|
||||
|
||||
Strictly, a network element is any callable which accepts an immutable
|
||||
.info.SpectralInformation object and returns a .info.SpectralInformation object
|
||||
(a copy.)
|
||||
|
||||
Network elements MUST implement two attributes .uid and .name representing a
|
||||
unique identifier and a printable name.
|
||||
|
||||
This base class provides a mode convenient way to define a network element
|
||||
via subclassing.
|
||||
'''
|
||||
|
||||
from uuid import uuid4
|
||||
from collections import namedtuple
|
||||
|
||||
class Location(namedtuple('Location', 'latitude longitude city region')):
|
||||
def __new__(cls, latitude=0, longitude=0, city=None, region=None):
|
||||
return super().__new__(cls, latitude, longitude, city, region)
|
||||
|
||||
class Node:
|
||||
def __init__(self, uid, name=None, params=None, metadata={'location':{}}, operational=None):
|
||||
if name is None:
|
||||
name = uid
|
||||
self.uid, self.name = uid, name
|
||||
if metadata and not isinstance(metadata.get('location'), Location):
|
||||
metadata['location'] = Location(**metadata.pop('location', {}))
|
||||
self.params, self.metadata, self.operational = params, metadata, operational
|
||||
|
||||
@property
|
||||
def coords(self):
|
||||
return self.lng, self.lat
|
||||
|
||||
@property
|
||||
def location(self):
|
||||
return self.metadata['location']
|
||||
loc = location
|
||||
|
||||
@property
|
||||
def longitude(self):
|
||||
return self.location.longitude
|
||||
lng = longitude
|
||||
|
||||
@property
|
||||
def latitude(self):
|
||||
return self.location.latitude
|
||||
lat = latitude
|
||||
907
gnpy/core/request.py
Normal file
907
gnpy/core/request.py
Normal file
@@ -0,0 +1,907 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
gnpy.core.request
|
||||
=================
|
||||
|
||||
This module contains path request functionality.
|
||||
|
||||
This functionality allows the user to provide a JSON request
|
||||
file in accordance with a Yang model for requesting path
|
||||
computations and returns path results in terms of path
|
||||
and feasibility
|
||||
|
||||
See: draft-ietf-teas-yang-path-computation-01.txt
|
||||
"""
|
||||
|
||||
from sys import exit
|
||||
from collections import namedtuple
|
||||
from logging import getLogger, basicConfig, CRITICAL, DEBUG, INFO
|
||||
from networkx import (dijkstra_path, NetworkXNoPath, all_simple_paths)
|
||||
from networkx.utils import pairwise
|
||||
from numpy import mean
|
||||
from gnpy.core.service_sheet import convert_service_sheet, Request_element, Element
|
||||
from gnpy.core.elements import Transceiver, Roadm, Edfa, Fused
|
||||
from gnpy.core.network import set_roadm_loss
|
||||
from gnpy.core.utils import db2lin, lin2db
|
||||
from gnpy.core.info import create_input_spectral_information, SpectralInformation, Channel, Power
|
||||
from copy import copy, deepcopy
|
||||
from csv import writer
|
||||
from math import ceil
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
|
||||
RequestParams = namedtuple('RequestParams','request_id source destination trx_type'+
|
||||
' trx_mode nodes_list loose_list spacing power nb_channel f_min f_max format baud_rate OSNR bit_rate roll_off tx_osnr min_spacing cost path_bandwidth')
|
||||
DisjunctionParams = namedtuple('DisjunctionParams','disjunction_id relaxable link_diverse node_diverse disjunctions_req')
|
||||
|
||||
class Path_request:
|
||||
def __init__(self, *args, **params):
|
||||
params = RequestParams(**params)
|
||||
self.request_id = params.request_id
|
||||
self.source = params.source
|
||||
self.destination = params.destination
|
||||
self.tsp = params.trx_type
|
||||
self.tsp_mode = params.trx_mode
|
||||
self.baud_rate = params.baud_rate
|
||||
self.nodes_list = params.nodes_list
|
||||
self.loose_list = params.loose_list
|
||||
self.spacing = params.spacing
|
||||
self.power = params.power
|
||||
self.nb_channel = params.nb_channel
|
||||
self.f_min = params.f_min
|
||||
self.f_max = params.f_max
|
||||
self.format = params.format
|
||||
self.OSNR = params.OSNR
|
||||
self.bit_rate = params.bit_rate
|
||||
self.roll_off = params.roll_off
|
||||
self.tx_osnr = params.tx_osnr
|
||||
self.min_spacing = params.min_spacing
|
||||
self.cost = params.cost
|
||||
self.path_bandwidth = params.path_bandwidth
|
||||
|
||||
def __str__(self):
|
||||
return '\n\t'.join([ f'{type(self).__name__} {self.request_id}',
|
||||
f'source: {self.source}',
|
||||
f'destination: {self.destination}'])
|
||||
def __repr__(self):
|
||||
if self.baud_rate is not None:
|
||||
temp = self.baud_rate * 1e-9
|
||||
temp2 = self.bit_rate * 1e-9
|
||||
else:
|
||||
temp = self.baud_rate
|
||||
temp2 = self.bit_rate
|
||||
|
||||
return '\n\t'.join([ f'{type(self).__name__} {self.request_id}',
|
||||
f'source: \t{self.source}',
|
||||
f'destination:\t{self.destination}',
|
||||
f'trx type:\t{self.tsp}',
|
||||
f'trx mode:\t{self.tsp_mode}',
|
||||
f'baud_rate:\t{temp} Gbaud',
|
||||
f'bit_rate:\t{temp2} Gb/s',
|
||||
f'spacing:\t{self.spacing * 1e-9} GHz',
|
||||
f'power: \t{round(lin2db(self.power)+30,2)} dBm',
|
||||
f'nb channels: \t{self.nb_channel}',
|
||||
f'path_bandwidth: \t{round(self.path_bandwidth * 1e-9,2)} Gbit/s',
|
||||
f'nodes-list:\t{self.nodes_list}',
|
||||
f'loose-list:\t{self.loose_list}'
|
||||
'\n'])
|
||||
class Disjunction:
|
||||
def __init__(self, *args, **params):
|
||||
params = DisjunctionParams(**params)
|
||||
self.disjunction_id = params.disjunction_id
|
||||
self.relaxable = params.relaxable
|
||||
self.link_diverse = params.link_diverse
|
||||
self.node_diverse = params.node_diverse
|
||||
self.disjunctions_req = params.disjunctions_req
|
||||
|
||||
def __str__(self):
|
||||
return '\n\t'.join([f'relaxable: {self.relaxable}',
|
||||
f'link-diverse: {self.link_diverse}',
|
||||
f'node-diverse: {self.node_diverse}',
|
||||
f'request-id-numbers: {self.disjunctions_req}']
|
||||
)
|
||||
def __repr__(self):
|
||||
return '\n\t'.join([ f'{type(self).__name__} {self.disjunction_id}',
|
||||
f'relaxable: {self.relaxable}',
|
||||
f'link-diverse: {self.link_diverse}',
|
||||
f'node-diverse: {self.node_diverse}',
|
||||
f'request-id-numbers: {self.disjunctions_req}'
|
||||
'\n'])
|
||||
|
||||
class Result_element(Element):
|
||||
def __init__(self,path_request,computed_path):
|
||||
self.path_id = path_request.request_id
|
||||
self.path_request = path_request
|
||||
self.computed_path = computed_path
|
||||
hop_type = []
|
||||
if len(computed_path)>0 :
|
||||
for e in computed_path :
|
||||
if isinstance(e, Transceiver) :
|
||||
hop_type.append(' - '.join([path_request.tsp,path_request.tsp_mode]))
|
||||
else:
|
||||
hop_type.append('not recorded')
|
||||
else:
|
||||
# TODO differentiate empty path in case not feasible because of tsp or not feasible because
|
||||
# ther is no path connecting the nodes (whatever the tsp)
|
||||
mode = 'not feasible with this transponder'
|
||||
hop_type = ' - '.join([path_request.tsp,mode])
|
||||
self.hop_type = hop_type
|
||||
uid = property(lambda self: repr(self))
|
||||
@property
|
||||
def pathresult(self):
|
||||
if not self.computed_path:
|
||||
return {
|
||||
'path-id': self.path_id,
|
||||
'path-properties':{
|
||||
'path-metric': [
|
||||
{
|
||||
'metric-type': 'SNR@bandwidth',
|
||||
'accumulative-value': 'None'
|
||||
},
|
||||
{
|
||||
'metric-type': 'SNR@0.1nm',
|
||||
'accumulative-value': 'None'
|
||||
},
|
||||
{
|
||||
'metric-type': 'OSNR@bandwidth',
|
||||
'accumulative-value': 'None'
|
||||
},
|
||||
{
|
||||
'metric-type': 'OSNR@0.1nm',
|
||||
'accumulative-value': 'None'
|
||||
},
|
||||
{
|
||||
'metric-type': 'reference_power',
|
||||
'accumulative-value': self.path_request.power
|
||||
},
|
||||
{
|
||||
'metric-type': 'path_bandwidth',
|
||||
'accumulative-value': self.path_request.path_bandwidth
|
||||
}
|
||||
],
|
||||
'path-srlgs': {
|
||||
'usage': 'not used yet',
|
||||
'values': 'not used yet'
|
||||
},
|
||||
'path-route-objects': [
|
||||
{
|
||||
'path-route-object': {
|
||||
'index': 0,
|
||||
'unnumbered-hop': {
|
||||
'node-id': self.path_request.source,
|
||||
'link-tp-id': self.path_request.source,
|
||||
'hop-type': self.hop_type,
|
||||
'direction': 'not used'
|
||||
},
|
||||
'label-hop': {
|
||||
'te-label': {
|
||||
'generic': 'not used yet',
|
||||
'direction': 'not used yet'
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
'path-route-object': {
|
||||
'index': 1,
|
||||
'unnumbered-hop': {
|
||||
'node-id': self.path_request.destination,
|
||||
'link-tp-id': self.path_request.destination,
|
||||
'hop-type': self.hop_type,
|
||||
'direction': 'not used'
|
||||
},
|
||||
'label-hop': {
|
||||
'te-label': {
|
||||
'generic': 'not used yet',
|
||||
'direction': 'not used yet'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'path-id': self.path_id,
|
||||
'path-properties':{
|
||||
'path-metric': [
|
||||
{
|
||||
'metric-type': 'SNR@bandwidth',
|
||||
'accumulative-value': round(mean(self.computed_path[-1].snr),2)
|
||||
},
|
||||
{
|
||||
'metric-type': 'SNR@0.1nm',
|
||||
'accumulative-value': round(mean(self.computed_path[-1].snr+lin2db(self.path_request.baud_rate/12.5e9)),2)
|
||||
},
|
||||
{
|
||||
'metric-type': 'OSNR@bandwidth',
|
||||
'accumulative-value': round(mean(self.computed_path[-1].osnr_ase),2)
|
||||
},
|
||||
{
|
||||
'metric-type': 'OSNR@0.1nm',
|
||||
'accumulative-value': round(mean(self.computed_path[-1].osnr_ase_01nm),2)
|
||||
},
|
||||
{
|
||||
'metric-type': 'reference_power',
|
||||
'accumulative-value': self.path_request.power
|
||||
},
|
||||
{
|
||||
'metric-type': 'path_bandwidth',
|
||||
'accumulative-value': self.path_request.path_bandwidth
|
||||
}
|
||||
],
|
||||
'path-srlgs': {
|
||||
'usage': 'not used yet',
|
||||
'values': 'not used yet'
|
||||
},
|
||||
'path-route-objects': [
|
||||
{
|
||||
'path-route-object': {
|
||||
'index': self.computed_path.index(n),
|
||||
'unnumbered-hop': {
|
||||
'node-id': n.uid,
|
||||
'link-tp-id': n.uid,
|
||||
'hop-type': self.hop_type[self.computed_path.index(n)],
|
||||
'direction': 'not used'
|
||||
},
|
||||
'label-hop': {
|
||||
'te-label': {
|
||||
'generic': 'not used yet',
|
||||
'direction': 'not used yet'
|
||||
}
|
||||
}
|
||||
}
|
||||
} for n in self.computed_path
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@property
|
||||
def json(self):
|
||||
return self.pathresult
|
||||
|
||||
def compute_constrained_path(network, req):
|
||||
trx = [n for n in network.nodes() if isinstance(n, Transceiver)]
|
||||
roadm = [n for n in network.nodes() if isinstance(n, Roadm)]
|
||||
edfa = [n for n in network.nodes() if isinstance(n, Edfa)]
|
||||
anytypenode = [n for n in network.nodes()]
|
||||
|
||||
source = next(el for el in trx if el.uid == req.source)
|
||||
|
||||
# This method ensures that the constraint can be satisfied without loops
|
||||
# except when it is not possible : eg if constraints makes a loop
|
||||
# It requires that the source, dest and nodes are correct (no error in the names)
|
||||
destination = next(el for el in trx if el.uid == req.destination)
|
||||
nodes_list = []
|
||||
for n in req.nodes_list :
|
||||
# for debug excel print(n)
|
||||
nodes_list.append(next(el for el in anytypenode if el.uid == n))
|
||||
# nodes_list contains at least the destination
|
||||
if nodes_list is None :
|
||||
msg = f'Request {req.request_id} problem in the constitution of nodes_list: should at least include destination'
|
||||
logger.critical(msg)
|
||||
exit()
|
||||
if req.nodes_list[-1] != req.destination:
|
||||
msg = f'Request {req.request_id} malformed list of nodes: last node should be destination trx'
|
||||
logger.critical(msg)
|
||||
exit()
|
||||
|
||||
if len(nodes_list) == 1 :
|
||||
try :
|
||||
total_path = dijkstra_path(network, source, destination)
|
||||
except NetworkXNoPath:
|
||||
msg = f'\x1b[1;33;40m'+f'Request {req.request_id} could not find a path from {source.uid} to node : {destination.uid} in network topology'+ '\x1b[0m'
|
||||
logger.critical(msg)
|
||||
print(msg)
|
||||
total_path = []
|
||||
else :
|
||||
all_simp_pths = list(all_simple_paths(network,source=source,\
|
||||
target=destination, cutoff=120))
|
||||
candidate = []
|
||||
for p in all_simp_pths :
|
||||
if ispart(nodes_list, p) :
|
||||
# print(f'selection{[el.uid for el in p if el in roadm]}')
|
||||
candidate.append(p)
|
||||
# select the shortest path (in nb of hops)
|
||||
if len(candidate)>0 :
|
||||
candidate.sort(key=lambda x: len(x))
|
||||
total_path = candidate[0]
|
||||
else:
|
||||
if req.loose_list[req.nodes_list.index(n)] == 'loose':
|
||||
print(f'\x1b[1;33;40m'+f'Request {req.request_id} could not find a path crossing {nodes_list} in network topology'+ '\x1b[0m')
|
||||
print(f'constraint ignored')
|
||||
total_path = dijkstra_path(network, source, destination)
|
||||
else:
|
||||
msg = f'\x1b[1;33;40m'+f'Request {req.request_id} could not find a path crossing {nodes_list}.\nNo path computed'+ '\x1b[0m'
|
||||
logger.critical(msg)
|
||||
print(msg)
|
||||
total_path = []
|
||||
|
||||
# obsolete method: this does not guaranty to avoid loops or correct results
|
||||
# Here is the demonstration :
|
||||
# 1 1
|
||||
# eg a----b-----c
|
||||
# |1 |0.5 |1
|
||||
# e----f--h--g
|
||||
# 1 0.5 0.5
|
||||
# if I have to compute a to g with constraint f-c
|
||||
# result will be a concatenation of: a-b-f and f-b-c and c-g
|
||||
# which means a loop.
|
||||
# if to avoid loops I iteratively suppress edges of the segmenst in the topo
|
||||
# segment 1 = a-b-f
|
||||
# 1
|
||||
# eg a b-----c
|
||||
# |1 |1
|
||||
# e----f--h--g
|
||||
# 1 0.5 0.5
|
||||
# then
|
||||
# segment 2 = f-h-g-c
|
||||
# 1
|
||||
# eg a b-----c
|
||||
# |1
|
||||
# e----f h g
|
||||
# 1
|
||||
# then there is no more path to g destination
|
||||
#
|
||||
#
|
||||
# total_path = [source]
|
||||
|
||||
# for n in req.nodes_list:
|
||||
# try :
|
||||
# node = next(el for el in trx if el.uid == n)
|
||||
# except StopIteration:
|
||||
# try:
|
||||
# node = next(el for el in anytypenode if el.uid == n)
|
||||
# except StopIteration:
|
||||
# try:
|
||||
# # TODO this test is not giving good results: full name of the
|
||||
# # amp is required to avoid ambiguity on the direction
|
||||
# node = next(el for el in anytypenode
|
||||
# if n in el.uid)
|
||||
# except StopIteration:
|
||||
# msg = f'could not find node : {n} in network topology: \
|
||||
# not a trx, roadm, edfa, fiber or fused element'
|
||||
# logger.critical(msg)
|
||||
# raise ValueError(msg)
|
||||
# # extend path list without repeating source -> skip first element in the list
|
||||
# try:
|
||||
# # to avoid looping back: use an alternate graph were current path edges and vertex are suppressed
|
||||
|
||||
# total_path.extend(dijkstra_path(network, source, node)[1:])
|
||||
# source = node
|
||||
# except NetworkXNoPath:
|
||||
# if req.loose_list[req.nodes_list.index(n)] == 'loose':
|
||||
# print(f'could not find a path from {source.uid} to loose node : {n} in network topology')
|
||||
# print(f'node {n} is skipped')
|
||||
# else:
|
||||
# msg = f'could not find a path from {source.uid} to node : {n} in network topology'
|
||||
# logger.critical(msg)
|
||||
# print(msg)
|
||||
# total_path = []
|
||||
|
||||
return total_path
|
||||
|
||||
def propagate(path, req, equipment, show=False):
|
||||
#update roadm loss in case of power sweep (power mode only)
|
||||
set_roadm_loss(path, equipment, lin2db(req.power*1e3))
|
||||
si = create_input_spectral_information(
|
||||
req.f_min, req.f_max, req.roll_off, req.baud_rate,
|
||||
req.power, req.spacing)
|
||||
for el in path:
|
||||
si = el(si)
|
||||
if show :
|
||||
print(el)
|
||||
path[-1].update_snr(req.tx_osnr, equipment['Roadms']['default'].add_drop_osnr)
|
||||
return path
|
||||
|
||||
def propagate_and_optimize_mode(path, req, equipment):
|
||||
#update roadm loss in case of power sweep (power mode only)
|
||||
set_roadm_loss(path, equipment, lin2db(req.power*1e3))
|
||||
# if mode is unknown : loops on the modes starting from the highest baudrate fiting in the
|
||||
# step 1: create an ordered list of modes based on baudrate
|
||||
baudrate_to_explore = list(set([m['baud_rate'] for m in equipment['Transceiver'][req.tsp].mode
|
||||
if float(m['min_spacing'])<= req.spacing]))
|
||||
# TODO be carefull on limits cases if spacing very close to req spacing eg 50.001 50.000
|
||||
baudrate_to_explore = sorted(baudrate_to_explore, reverse=True)
|
||||
if baudrate_to_explore :
|
||||
# at least 1 baudrate can be tested wrt spacing
|
||||
for b in baudrate_to_explore :
|
||||
modes_to_explore = [m for m in equipment['Transceiver'][req.tsp].mode
|
||||
if m['baud_rate'] == b]
|
||||
modes_to_explore = sorted(modes_to_explore,
|
||||
key = lambda x: x['bit_rate'], reverse=True)
|
||||
# print(modes_to_explore)
|
||||
# step2 : computes propagation for each baudrate: stop and select the first that passes
|
||||
found_a_feasible_mode = False
|
||||
# TODO : the case of roll of is not included: for now use SI one
|
||||
# TODO : if the loop in mode optimization does not have a feasible path, then bugs
|
||||
si = create_input_spectral_information(
|
||||
req.f_min, req.f_max, equipment['SI']['default'].roll_off,
|
||||
b, req.power, req.spacing)
|
||||
for el in path:
|
||||
si = el(si)
|
||||
for m in modes_to_explore :
|
||||
if path[-1].snr is not None:
|
||||
path[-1].update_snr(m['tx_osnr'], equipment['Roadms']['default'].add_drop_osnr)
|
||||
if round(min(path[-1].snr+lin2db(b/(12.5e9))),2) > m['OSNR'] :
|
||||
found_a_feasible_mode = True
|
||||
return path, m
|
||||
else:
|
||||
return [], None
|
||||
# only get to this point if no baudrate/mode satisfies OSNR requirement
|
||||
|
||||
# returns the last propagated path and mode
|
||||
msg = f'\tWarning! Request {req.request_id}: no mode satisfies path SNR requirement.\n'
|
||||
print(msg)
|
||||
logger.info(msg)
|
||||
return [],None
|
||||
else :
|
||||
# no baudrate satisfying spacing
|
||||
msg = f'\tWarning! Request {req.request_id}: no baudrate satisfies spacing requirement.\n'
|
||||
print(msg)
|
||||
logger.info(msg)
|
||||
return [], None
|
||||
|
||||
|
||||
def jsontocsv(json_data,equipment,fileout):
|
||||
# read json path result file in accordance with:
|
||||
# Yang model for requesting Path Computation
|
||||
# draft-ietf-teas-yang-path-computation-01.txt.
|
||||
# and write results in an CSV file
|
||||
|
||||
mywriter = writer(fileout)
|
||||
mywriter.writerow(('path-id','source','destination','path_bandwidth','Pass?',\
|
||||
'nb of tsp pairs','total cost','transponder-type','transponder-mode',\
|
||||
'OSNR@0.1nm','SNR@0.1nm','SNR@bandwidth','baud rate (Gbaud)',\
|
||||
'input power (dBm)','path'))
|
||||
tspjsondata = equipment['Transceiver']
|
||||
#print(tspjsondata)
|
||||
for p in json_data['path']:
|
||||
path_id = p['path-id']
|
||||
source = p['path-properties']['path-route-objects'][0]\
|
||||
['path-route-object']['unnumbered-hop']['node-id']
|
||||
destination = p['path-properties']['path-route-objects'][-1]\
|
||||
['path-route-object']['unnumbered-hop']['node-id']
|
||||
# selects only roadm nodes
|
||||
pth = ' | '.join([ e['path-route-object']['unnumbered-hop']['node-id']
|
||||
for e in p['path-properties']['path-route-objects']
|
||||
if e['path-route-object']['unnumbered-hop']['node-id'].startswith('roadm') or e['path-route-object']['unnumbered-hop']['node-id'].startswith('Edfa')])
|
||||
|
||||
[tsp,mode] = p['path-properties']['path-route-objects'][0]\
|
||||
['path-route-object']['unnumbered-hop']['hop-type'].split(' - ')
|
||||
|
||||
# find the min acceptable OSNR, baud rate from the eqpt library based on tsp (tupe) and mode (format)
|
||||
# loading equipment already tests the existence of tsp type and mode:
|
||||
if mode !='not feasible with this transponder' :
|
||||
[minosnr, baud_rate, bit_rate, cost] = next([m['OSNR'] , m['baud_rate'] , m['bit_rate'], m['cost']]
|
||||
for m in equipment['Transceiver'][tsp].mode if m['format']==mode)
|
||||
# else:
|
||||
# [minosnr, baud_rate, bit_rate] = ['','','','']
|
||||
output_snr = next(e['accumulative-value']
|
||||
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'SNR@0.1nm')
|
||||
output_snrbandwidth = next(e['accumulative-value']
|
||||
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'SNR@bandwidth')
|
||||
output_osnr = next(e['accumulative-value']
|
||||
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'OSNR@0.1nm')
|
||||
output_osnrbandwidth = next(e['accumulative-value']
|
||||
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'OSNR@bandwidth')
|
||||
power = next(e['accumulative-value']
|
||||
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'reference_power')
|
||||
path_bandwidth = next(e['accumulative-value']
|
||||
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'path_bandwidth')
|
||||
if isinstance(output_snr, str):
|
||||
isok = False
|
||||
nb_tsp = 0
|
||||
pthbdbw = round(path_bandwidth*1e-9,2)
|
||||
rosnr = ''
|
||||
rsnr = ''
|
||||
rsnrb = ''
|
||||
br = ''
|
||||
pw = ''
|
||||
total_cost = ''
|
||||
else:
|
||||
isok = output_snr >= minosnr
|
||||
nb_tsp = ceil(path_bandwidth / bit_rate)
|
||||
pthbdbw = round(path_bandwidth*1e-9,2)
|
||||
rosnr = round(output_osnr,2)
|
||||
rsnr = round(output_snr,2)
|
||||
rsnrb = round(output_snrbandwidth,2)
|
||||
br = round(baud_rate*1e-9,2)
|
||||
pw = round(lin2db(power)+30,2)
|
||||
total_cost = nb_tsp * cost
|
||||
mywriter.writerow((path_id,
|
||||
source,
|
||||
destination,
|
||||
pthbdbw,
|
||||
isok,
|
||||
nb_tsp,
|
||||
total_cost,
|
||||
tsp,
|
||||
mode,
|
||||
rosnr,
|
||||
rsnr,
|
||||
rsnrb,
|
||||
br,
|
||||
pw,
|
||||
pth
|
||||
))
|
||||
|
||||
|
||||
def compute_path_dsjctn(network, equipment, pathreqlist, disjunctions_list):
|
||||
# pathreqlist is a list of Path_request objects
|
||||
# disjunctions_list a list of Disjunction objects
|
||||
|
||||
# given a network, a list of requests with the set of disjunction features between
|
||||
# request, the function computes the set of path satisfying : first the disjunction
|
||||
# constraint and second the routing constraint if the request include an explicit
|
||||
# set of elements to pass through.
|
||||
# the algorithm used allows to specify disjunction for demands not sharing source or
|
||||
# destination.
|
||||
# a request might be declared as disjoint from several requests
|
||||
# it is a iterative process:
|
||||
# first computes a list of all shortest path (this may add computation time)
|
||||
# second elaborate the set of path solution for each synchronization vector
|
||||
# third select only the candidates that satisfy all synchronization vectors they belong to
|
||||
# fourth apply route constraints : remove candidate path that do not satisfy the constraint
|
||||
# fifth select the first candidate among the set of candidates.
|
||||
# the example network used in comments has been added to the set of data tests files
|
||||
|
||||
# define the list to be returned
|
||||
path_res_list = []
|
||||
|
||||
# all disjctn must be computed at once together to avoid blocking
|
||||
# 1 1
|
||||
# eg a----b-----c
|
||||
# |1 |0.5 |1
|
||||
# e----f--h--g
|
||||
# 1 0.5 0.5
|
||||
# if I have to compute a to g and a to h
|
||||
# I must not compute a-b-f-h-g, otherwise there is no disjoint path remaining for a to h
|
||||
# instead I should list all most disjoint path and select the one that have the less
|
||||
# number of commonalities
|
||||
# \ path abfh aefh abcgh
|
||||
# \___cost 2 2.5 3.5
|
||||
# path| cost
|
||||
# abfhg| 2.5 x x x
|
||||
# abcg | 3 x x
|
||||
# aefhg| 3 x x x
|
||||
# from this table abcg and aefh have no common links and should be preferred
|
||||
# even they are not the shortest paths
|
||||
|
||||
# build the list of pathreqlist elements not concerned by disjunction
|
||||
global_disjunctions_list = [e for d in disjunctions_list for e in d.disjunctions_req ]
|
||||
pathreqlist_simple = [e for e in pathreqlist if e.request_id not in global_disjunctions_list]
|
||||
pathreqlist_disjt = [e for e in pathreqlist if e.request_id in global_disjunctions_list]
|
||||
|
||||
# use a mirror class to record path and the corresponding requests
|
||||
class Pth:
|
||||
def __init__(self, req, pth, simplepth):
|
||||
self.req = req
|
||||
self.pth = pth
|
||||
self.simplepth = simplepth
|
||||
|
||||
# step 1
|
||||
# for each remaining request compute a set of simple path
|
||||
allpaths = {}
|
||||
rqs = {}
|
||||
simple_rqs = {}
|
||||
simple_rqs_reversed = {}
|
||||
for pathreq in pathreqlist_disjt :
|
||||
all_simp_pths = list(all_simple_paths(network,\
|
||||
source=next(el for el in network.nodes() if el.uid == pathreq.source),\
|
||||
target=next(el for el in network.nodes() if el.uid == pathreq.destination),\
|
||||
cutoff=80))
|
||||
# sort them
|
||||
all_simp_pths = sorted(all_simp_pths, key=lambda path: len(path))
|
||||
# reversed direction paths required to check disjunction on both direction
|
||||
all_simp_pths_reversed = []
|
||||
for pth in all_simp_pths:
|
||||
all_simp_pths_reversed.append(find_reversed_path(pth,network))
|
||||
rqs[pathreq.request_id] = all_simp_pths
|
||||
temp =[]
|
||||
for p in all_simp_pths :
|
||||
# build a short list representing each roadm+direction with the first item
|
||||
# start enumeration at 1 to avoid Trx in the list
|
||||
s = [e.uid for i,e in enumerate(p[1:-1]) \
|
||||
if (isinstance(e,Roadm) | (isinstance(p[i],Roadm) ))]
|
||||
temp.append(s)
|
||||
# id(s) is unique even if path is the same: two objects with same
|
||||
# path have two different ids
|
||||
allpaths[id(s)] = Pth(pathreq,p,s)
|
||||
simple_rqs[pathreq.request_id] = temp
|
||||
temp =[]
|
||||
for p in all_simp_pths_reversed :
|
||||
# build a short list representing each roadm+direction with the first item
|
||||
# start enumeration at 1 to avoid Trx in the list
|
||||
temp.append([e.uid for i,e in enumerate(p[1:-1]) \
|
||||
if (isinstance(e,Roadm) | (isinstance(p[i],Roadm) ))] )
|
||||
simple_rqs_reversed[pathreq.request_id] = temp
|
||||
# step 2
|
||||
# for each set of requests that need to be disjoint
|
||||
# select the disjoint path combination
|
||||
|
||||
candidates = {}
|
||||
for d in disjunctions_list :
|
||||
dlist = d.disjunctions_req.copy()
|
||||
# each line of dpath is one combination of path that satisfies disjunction
|
||||
dpath = []
|
||||
for i,p in enumerate(simple_rqs[dlist[0]]):
|
||||
dpath.append([p])
|
||||
# allpaths[id(p)].d_id = d.disjunction_id
|
||||
# in each loop, dpath is updated with a path for rq that satisfies
|
||||
# disjunction with each path in dpath
|
||||
# for example, assume set of requests in the vector (disjunction_list) is {rq1,rq2, rq3}
|
||||
# rq1 p1: abfhg
|
||||
# p2: aefhg
|
||||
# p3: abcg
|
||||
# rq2 p8: bf
|
||||
# rq3 p4: abcgh
|
||||
# p6: aefh
|
||||
# p7: abfh
|
||||
# initiate with rq1
|
||||
# dpath = [[p1]
|
||||
# [p2]
|
||||
# [p3]]
|
||||
# after first loop:
|
||||
# dpath = [[p1 p8]
|
||||
# [p3 p8]]
|
||||
# since p2 and p8 are not disjoint
|
||||
# after second loop:
|
||||
# dpath = [ p3 p8 p6 ]
|
||||
# since p1 and p4 are not disjoint
|
||||
# p1 and p7 are not disjoint
|
||||
# p3 and p4 are not disjoint
|
||||
# p3 and p7 are not disjoint
|
||||
|
||||
for e1 in dlist[1:] :
|
||||
temp = []
|
||||
for j,p1 in enumerate(simple_rqs[e1]):
|
||||
# allpaths[id(p1)].d_id = d.disjunction_id
|
||||
# can use index j in simple_rqs_reversed because index
|
||||
# of direct and reversed paths have been kept identical
|
||||
p1_reversed = simple_rqs_reversed[e1][j]
|
||||
# print(p1_reversed)
|
||||
# print('\n\n')
|
||||
for k,c in enumerate(dpath) :
|
||||
# print(f' c: \t{c}')
|
||||
temp2 = c.copy()
|
||||
all_disjoint = 0
|
||||
for p in c :
|
||||
all_disjoint += isdisjoint(p1,p)+ isdisjoint(p1_reversed,p)
|
||||
if all_disjoint ==0:
|
||||
temp2.append(p1)
|
||||
temp.append(temp2)
|
||||
# print(f' coucou {e1}: \t{temp}')
|
||||
dpath = temp
|
||||
# print(dpath)
|
||||
candidates[d.disjunction_id] = dpath
|
||||
|
||||
# for i in disjunctions_list :
|
||||
# print(f'\n{candidates[i.disjunction_id]}')
|
||||
|
||||
# step 3
|
||||
# now for each request, select the path that satisfies all disjunctions
|
||||
# path must be in candidates[id] for all concerned ids
|
||||
# for example, assume set of sync vectors (disjunction groups) is
|
||||
# s1 = {rq1 rq2} s2 = {rq1 rq3}
|
||||
# candidate[s1] = [[p1 p8]
|
||||
# [p3 p8]]
|
||||
# candidate[s2] = [[p3 p6]]
|
||||
# for rq1 p3 should be preferred
|
||||
|
||||
|
||||
for pathreq in pathreqlist_disjt:
|
||||
concerned_d_id = [d.disjunction_id for d in disjunctions_list if pathreq.request_id in d.disjunctions_req]
|
||||
# for each set of solution, verify that the same path is used for the same request
|
||||
candidate_paths = simple_rqs[pathreq.request_id]
|
||||
# print('coucou')
|
||||
# print(pathreq.request_id)
|
||||
for p in candidate_paths :
|
||||
iscandidate = 0
|
||||
for sol in concerned_d_id :
|
||||
test = 1
|
||||
# for each solution test if p is part of the solution
|
||||
# if yes, then p can remain a candidate
|
||||
for i,m in enumerate(candidates[sol]) :
|
||||
if p in m:
|
||||
if allpaths[id(m[m.index(p)])].req.request_id == pathreq.request_id :
|
||||
test = 0
|
||||
break
|
||||
iscandidate += test
|
||||
if iscandidate != 0:
|
||||
for l in concerned_d_id :
|
||||
for m in candidates[l] :
|
||||
if p in m :
|
||||
candidates[l].remove(m)
|
||||
|
||||
# for i in disjunctions_list :
|
||||
# print(i.disjunction_id)
|
||||
# print(f'\n{candidates[i.disjunction_id]}')
|
||||
|
||||
# step 4 apply route constraints : remove candidate path that do not satisfy the constraint
|
||||
# only in the case of disjounction: the simple path is processed in request.compute_constrained_path
|
||||
# TODO : keep a version without the loose constraint
|
||||
for d in disjunctions_list :
|
||||
temp = []
|
||||
for j,sol in enumerate(candidates[d.disjunction_id]) :
|
||||
testispartok = True
|
||||
for i,p in enumerate(sol) :
|
||||
# print(f'test {allpaths[id(p)].req.request_id}')
|
||||
# print(f'length of route {len(allpaths[id(p)].req.nodes_list)}')
|
||||
if allpaths[id(p)].req.nodes_list :
|
||||
# if p does not containt the ordered list node, remove sol from the candidate
|
||||
# except if this was the last solution: then check if the constraint is loose or not
|
||||
if not ispart(allpaths[id(p)].req.nodes_list, p) :
|
||||
# print(f'nb of solutions {len(temp)}')
|
||||
if j < len(candidates[d.disjunction_id])-1 :
|
||||
msg = f'removing {sol}'
|
||||
logger.info(msg)
|
||||
testispartok = False
|
||||
#break
|
||||
else:
|
||||
if 'loose' in allpaths[id(p)].req.loose_list:
|
||||
logger.info(f'Could not apply route constraint'+
|
||||
f'{allpaths[id(p)].req.nodes_list} on request {allpaths[id(p)].req.request_id}')
|
||||
else :
|
||||
logger.info(f'removing last solution from candidate paths\n{sol}')
|
||||
testispartok = False
|
||||
if testispartok :
|
||||
temp.append(sol)
|
||||
candidates[d.disjunction_id] = temp
|
||||
|
||||
# step 5 select the first combination that works
|
||||
pathreslist_disjoint = {}
|
||||
for d in disjunctions_list :
|
||||
test_sol = True
|
||||
while test_sol:
|
||||
# print('coucou')
|
||||
if candidates[d.disjunction_id] :
|
||||
for p in candidates[d.disjunction_id][0]:
|
||||
if allpaths[id(p)].req in pathreqlist_disjt:
|
||||
# print(f'selected path :{p} for req {allpaths[id(p)].req.request_id}')
|
||||
pathreslist_disjoint[allpaths[id(p)].req] = allpaths[id(p)].pth
|
||||
pathreqlist_disjt.remove(allpaths[id(p)].req)
|
||||
candidates = remove_candidate(candidates, allpaths, allpaths[id(p)].req, p)
|
||||
test_sol = False
|
||||
else:
|
||||
msg = f'No disjoint path found with added constraint'
|
||||
logger.critical(msg)
|
||||
print(f'{msg}\nComputation stopped.')
|
||||
# TODO in this case: replay step 5 with the candidate without constraints
|
||||
exit()
|
||||
|
||||
# for i in disjunctions_list :
|
||||
# print(i.disjunction_id)
|
||||
# print(f'\n{candidates[i.disjunction_id]}')
|
||||
|
||||
# list the results in the same order as initial pathreqlist
|
||||
for req in pathreqlist :
|
||||
req.nodes_list.append(req.destination)
|
||||
# we assume that the destination is a strict constraint
|
||||
req.loose_list.append('strict')
|
||||
if req in pathreqlist_simple:
|
||||
path_res_list.append(compute_constrained_path(network, req))
|
||||
else:
|
||||
path_res_list.append(pathreslist_disjoint[req])
|
||||
return path_res_list
|
||||
|
||||
def isdisjoint(p1,p2) :
|
||||
# returns 0 if disjoint
|
||||
edge1 = list(pairwise(p1))
|
||||
edge2 = list(pairwise(p2))
|
||||
for e in edge1 :
|
||||
if e in edge2 :
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def find_reversed_path(p,network) :
|
||||
# select of intermediate roadms and find the path between them
|
||||
# note that this function may not give an exact result in case of multiple
|
||||
# links between two adjacent nodes.
|
||||
# TODO add some indication on elements to indicate from which other they
|
||||
# are the reversed direction
|
||||
reversed_roadm_path = list(reversed([e for e in p if isinstance (e,Roadm)]))
|
||||
source = p[-1]
|
||||
destination = p[0]
|
||||
total_path = [source]
|
||||
for node in reversed_roadm_path :
|
||||
total_path.extend(dijkstra_path(network, source, node)[1:])
|
||||
source = node
|
||||
total_path.append(destination)
|
||||
return total_path
|
||||
|
||||
def ispart(a,b) :
|
||||
# the functions takes two paths a and b and retrns True
|
||||
# if all a elements are part of b and in the same order
|
||||
j = 0
|
||||
for i, el in enumerate(a):
|
||||
if el in b :
|
||||
if b.index(el) >= j :
|
||||
j = b.index(el)
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
def remove_candidate(candidates, allpaths, rq, pth) :
|
||||
# print(f'coucou {rq.request_id}')
|
||||
for key, candidate in candidates.items() :
|
||||
temp = candidate.copy()
|
||||
for i,sol in enumerate(candidate) :
|
||||
for p in sol :
|
||||
if allpaths[id(p)].req.request_id == rq.request_id :
|
||||
if id(p) != id(pth) :
|
||||
temp.remove(sol)
|
||||
break
|
||||
candidates[key] = temp
|
||||
return candidates
|
||||
|
||||
def compare_reqs(req1,req2,disjlist) :
|
||||
dis1 = [d for d in disjlist if req1.request_id in d.disjunctions_req]
|
||||
dis2 = [d for d in disjlist if req2.request_id in d.disjunctions_req]
|
||||
same_disj = False
|
||||
if dis1 and dis2 :
|
||||
temp1 = []
|
||||
for d in dis1:
|
||||
temp1.extend(d.disjunctions_req)
|
||||
temp1.remove(req1.request_id)
|
||||
temp2 = []
|
||||
for d in dis2:
|
||||
temp2.extend(d.disjunctions_req)
|
||||
temp2.remove(req2.request_id)
|
||||
if set(temp1) == set(temp2) :
|
||||
same_disj = True
|
||||
elif not dis2 and not dis1:
|
||||
same_disj = True
|
||||
|
||||
if req1.source == req2.source and \
|
||||
req1.destination == req2.destination and \
|
||||
req1.tsp == req2.tsp and \
|
||||
req1.tsp_mode == req2.tsp_mode and \
|
||||
req1.baud_rate == req2.baud_rate and \
|
||||
req1.nodes_list == req2.nodes_list and \
|
||||
req1.loose_list == req2.loose_list and \
|
||||
req1.spacing == req2.spacing and \
|
||||
req1.power == req2.power and \
|
||||
req1.nb_channel == req2.nb_channel and \
|
||||
req1.f_min == req2.f_min and \
|
||||
req1.f_max == req2.f_max and \
|
||||
req1.format == req2.format and \
|
||||
req1.OSNR == req2.OSNR and \
|
||||
req1.roll_off == req2.roll_off and \
|
||||
same_disj :
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def requests_aggregation(pathreqlist,disjlist) :
|
||||
# this function aggregates requests so that if several requests
|
||||
# exist between same source and destination and with same transponder type
|
||||
# todo maybe add conditions on mode ??, spacing ...
|
||||
# currently if undefined takes the default values
|
||||
local_list = pathreqlist.copy()
|
||||
for req in pathreqlist:
|
||||
for r in local_list :
|
||||
if req.request_id != r.request_id and compare_reqs(req, r, disjlist):
|
||||
# aggregate
|
||||
r.path_bandwidth += req.path_bandwidth
|
||||
temp_r_id = r.request_id
|
||||
r.request_id = ' | '.join((r.request_id,req.request_id))
|
||||
# remove request from list
|
||||
local_list.remove(req)
|
||||
# todo change also disjunction req with new demand
|
||||
|
||||
for d in disjlist :
|
||||
if req.request_id in d.disjunctions_req :
|
||||
d.disjunctions_req.remove(req.request_id)
|
||||
d.disjunctions_req.append(r.request_id)
|
||||
for d in disjlist :
|
||||
if temp_r_id in d.disjunctions_req :
|
||||
disjlist.remove(d)
|
||||
break
|
||||
return local_list, disjlist
|
||||
256
gnpy/core/service_sheet.py
Normal file
256
gnpy/core/service_sheet.py
Normal file
@@ -0,0 +1,256 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
gnpy.core.service_sheet
|
||||
========================
|
||||
|
||||
XLS parser that can be called to create a JSON request file in accordance with
|
||||
Yang model for requesting path computation.
|
||||
|
||||
See: draft-ietf-teas-yang-path-computation-01.txt
|
||||
"""
|
||||
|
||||
from sys import exit
|
||||
try:
|
||||
from xlrd import open_workbook, XL_CELL_EMPTY
|
||||
except ModuleNotFoundError:
|
||||
exit('Required: `pip install xlrd`')
|
||||
from collections import namedtuple
|
||||
from logging import getLogger, basicConfig, CRITICAL, DEBUG, INFO
|
||||
from json import dumps
|
||||
from pathlib import Path
|
||||
from gnpy.core.equipment import load_equipment
|
||||
from gnpy.core.utils import db2lin, lin2db
|
||||
|
||||
SERVICES_COLUMN = 12
|
||||
#EQPT_LIBRARY_FILENAME = Path(__file__).parent / 'eqpt_config.json'
|
||||
|
||||
all_rows = lambda sheet, start=0: (sheet.row(x) for x in range(start, sheet.nrows))
|
||||
logger = getLogger(__name__)
|
||||
|
||||
# Type for input data
|
||||
class Request(namedtuple('Request', 'request_id source destination trx_type mode \
|
||||
spacing power nb_channel disjoint_from nodes_list is_loose path_bandwidth')):
|
||||
def __new__(cls, request_id, source, destination, trx_type, mode=None , spacing= None , power = None, nb_channel = None , disjoint_from ='' , nodes_list = None, is_loose = '', path_bandwidth = None):
|
||||
return super().__new__(cls, request_id, source, destination, trx_type, mode, spacing, power, nb_channel, disjoint_from, nodes_list, is_loose, path_bandwidth)
|
||||
|
||||
# Type for output data: // from dutc
|
||||
class Element:
|
||||
def __eq__(self, other):
|
||||
return type(self) == type(other) and self.uid == other.uid
|
||||
def __hash__(self):
|
||||
return hash((type(self), self.uid))
|
||||
|
||||
class Request_element(Element):
|
||||
def __init__(self,Request,eqpt_filename):
|
||||
# request_id is str
|
||||
# excel has automatic number formatting that adds .0 on integer values
|
||||
# the next lines recover the pure int value, assuming this .0 is unwanted
|
||||
self.request_id = correct_xlrd_int_to_str_reading(Request.request_id)
|
||||
self.source = Request.source
|
||||
self.destination = Request.destination
|
||||
# TODO: the automatic naming generated by excel parser requires that source and dest name
|
||||
# be a string starting with 'trx' : this is manually added here.
|
||||
self.srctpid = f'trx {Request.source}'
|
||||
self.dsttpid = f'trx {Request.destination}'
|
||||
# test that trx_type belongs to eqpt_config.json
|
||||
# if not replace it with a default
|
||||
equipment = load_equipment(eqpt_filename)
|
||||
try :
|
||||
if equipment['Transceiver'][Request.trx_type]:
|
||||
self.trx_type = correct_xlrd_int_to_str_reading(Request.trx_type)
|
||||
if Request.mode is not None :
|
||||
Requestmode = correct_xlrd_int_to_str_reading(Request.mode)
|
||||
if [mode for mode in equipment['Transceiver'][Request.trx_type].mode if mode['format'] == Requestmode]:
|
||||
self.mode = Requestmode
|
||||
else :
|
||||
msg = f'Request Id: {self.request_id} - could not find tsp : \'{Request.trx_type}\' with mode: \'{Requestmode}\' in eqpt library \nComputation stopped.'
|
||||
#print(msg)
|
||||
logger.critical(msg)
|
||||
exit(1)
|
||||
else:
|
||||
Requestmode = None
|
||||
self.mode = Request.mode
|
||||
except KeyError:
|
||||
msg = f'Request Id: {self.request_id} - could not find tsp : \'{Request.trx_type}\' with mode: \'{Requestmode}\' in eqpt library \nComputation stopped.'
|
||||
#print(msg)
|
||||
logger.critical(msg)
|
||||
exit()
|
||||
# excel input are in GHz and dBm
|
||||
if Request.spacing is not None:
|
||||
self.spacing = Request.spacing * 1e9
|
||||
else:
|
||||
msg = f'Request {self.request_id} missing spacing: spacing is mandatory.\ncomputation stopped'
|
||||
logger.critical(msg)
|
||||
exit()
|
||||
if Request.power is not None:
|
||||
self.power = db2lin(Request.power) * 1e-3
|
||||
else:
|
||||
self.power = None
|
||||
if Request.nb_channel is not None :
|
||||
self.nb_channel = int(Request.nb_channel)
|
||||
else:
|
||||
self.nb_channel = None
|
||||
|
||||
value = correct_xlrd_int_to_str_reading(Request.disjoint_from)
|
||||
self.disjoint_from = [n for n in value.split(' | ') if value]
|
||||
self.nodes_list = []
|
||||
if Request.nodes_list :
|
||||
self.nodes_list = Request.nodes_list.split(' | ')
|
||||
|
||||
# cleaning the list of nodes to remove source and destination
|
||||
# (because the remaining of the program assumes that the nodes list are nodes
|
||||
# on the path and should not include source and destination)
|
||||
try :
|
||||
self.nodes_list.remove(self.source)
|
||||
msg = f'{self.source} removed from explicit path node-list'
|
||||
logger.info(msg)
|
||||
except ValueError:
|
||||
msg = f'{self.source} already removed from explicit path node-list'
|
||||
logger.info(msg)
|
||||
|
||||
try :
|
||||
self.nodes_list.remove(self.destination)
|
||||
msg = f'{self.destination} removed from explicit path node-list'
|
||||
logger.info(msg)
|
||||
except ValueError:
|
||||
msg = f'{self.destination} already removed from explicit path node-list'
|
||||
logger.info(msg)
|
||||
|
||||
# the excel parser applies the same hop-type to all nodes in the route nodes_list.
|
||||
# user can change this per node in the generated json
|
||||
self.loose = 'loose'
|
||||
if Request.is_loose == 'no' :
|
||||
self.loose = 'strict'
|
||||
self.path_bandwidth = None
|
||||
if Request.path_bandwidth is not None:
|
||||
self.path_bandwidth = Request.path_bandwidth * 1e9
|
||||
else:
|
||||
self.path_bandwidth = 0
|
||||
|
||||
uid = property(lambda self: repr(self))
|
||||
@property
|
||||
def pathrequest(self):
|
||||
req_dictionnary = {
|
||||
'request-id':self.request_id,
|
||||
'source': self.source,
|
||||
'destination': self.destination,
|
||||
'src-tp-id': self.srctpid,
|
||||
'dst-tp-id': self.dsttpid,
|
||||
'path-constraints':{
|
||||
'te-bandwidth': {
|
||||
'technology': 'flexi-grid',
|
||||
'trx_type' : self.trx_type,
|
||||
'trx_mode' : self.mode,
|
||||
'effective-freq-slot':[{'n': 'null','m': 'null'}] ,
|
||||
'spacing' : self.spacing,
|
||||
'max-nb-of-channel' : self.nb_channel,
|
||||
'output-power' : self.power
|
||||
# 'path_bandwidth' : self.path_bandwidth
|
||||
}
|
||||
},
|
||||
'optimizations': {
|
||||
'explicit-route-include-objects': [
|
||||
{
|
||||
'index': self.nodes_list.index(node),
|
||||
'unnumbered-hop':{
|
||||
'node-id': f'{node}',
|
||||
'link-tp-id': 'link-tp-id is not used',
|
||||
'hop-type': f'{self.loose}',
|
||||
'direction': 'direction is not used'
|
||||
},
|
||||
'label-hop':{
|
||||
'te-label': {
|
||||
'generic': 'generic is not used',
|
||||
'direction': 'direction is not used'
|
||||
}
|
||||
}
|
||||
}
|
||||
for node in self.nodes_list
|
||||
]
|
||||
|
||||
}
|
||||
}
|
||||
if self.path_bandwidth is not None:
|
||||
req_dictionnary['path-constraints']['te-bandwidth']['path_bandwidth'] = self.path_bandwidth
|
||||
|
||||
return req_dictionnary
|
||||
@property
|
||||
def pathsync(self):
|
||||
if self.disjoint_from :
|
||||
return {'synchronization-id':self.request_id,
|
||||
'svec': {
|
||||
'relaxable' : 'False',
|
||||
'link-diverse': 'True',
|
||||
'node-diverse': 'True',
|
||||
'request-id-number': [self.request_id]+ [n for n in self.disjoint_from]
|
||||
}
|
||||
}
|
||||
# TO-DO: avoid multiple entries with same synchronisation vectors
|
||||
@property
|
||||
def json(self):
|
||||
return self.pathrequest , self.pathsync
|
||||
|
||||
def convert_service_sheet(input_filename, eqpt_filename, output_filename='', filter_region=[]):
|
||||
service = parse_excel(input_filename)
|
||||
req = [Request_element(n,eqpt_filename) for n in service]
|
||||
# dumps the output into a json file with name
|
||||
# split_filename = [input_filename[0:len(input_filename)-len(suffix_filename)] , suffix_filename[1:]]
|
||||
if output_filename=='':
|
||||
output_filename = f'{str(input_filename)[0:len(str(input_filename))-len(str(input_filename.suffixes[0]))]}_services.json'
|
||||
# for debug
|
||||
# print(json_filename)
|
||||
data = {
|
||||
'path-request': [n.json[0] for n in req],
|
||||
'synchronization': [n.json[1] for n in req
|
||||
if n.json[1] is not None]
|
||||
}
|
||||
with open(output_filename, 'w', encoding='utf-8') as f:
|
||||
f.write(dumps(data, indent=2, ensure_ascii=False))
|
||||
return data
|
||||
|
||||
def correct_xlrd_int_to_str_reading(v) :
|
||||
if not isinstance(v,str):
|
||||
value = str(int(v))
|
||||
if value.endswith('.0'):
|
||||
value = value[:-2]
|
||||
else:
|
||||
value = v
|
||||
return value
|
||||
|
||||
# to be used from dutc
|
||||
def parse_row(row, fieldnames):
|
||||
return {f: r.value for f, r in zip(fieldnames, row[0:SERVICES_COLUMN])
|
||||
if r.ctype != XL_CELL_EMPTY}
|
||||
#
|
||||
|
||||
def parse_excel(input_filename):
|
||||
with open_workbook(input_filename) as wb:
|
||||
service_sheet = wb.sheet_by_name('Service')
|
||||
services = list(parse_service_sheet(service_sheet))
|
||||
return services
|
||||
|
||||
def parse_service_sheet(service_sheet):
|
||||
logger.info(f'Validating headers on {service_sheet.name!r}')
|
||||
# add a test on field to enable the '' field case that arises when columns on the
|
||||
# right hand side are used as comments or drawing in the excel sheet
|
||||
header = [x.value.strip() for x in service_sheet.row(4)[0:SERVICES_COLUMN] if len(x.value.strip())>0]
|
||||
|
||||
# create a service_fieldname independant from the excel column order
|
||||
# to be compatible with any version of the sheet
|
||||
# the following dictionnary records the excel field names and the corresponding parameter's name
|
||||
|
||||
authorized_fieldnames = {'route id':'request_id', 'Source':'source', 'Destination':'destination', \
|
||||
'TRX type':'trx_type', 'Mode' : 'mode', 'System: spacing':'spacing', \
|
||||
'System: input power (dBm)':'power', 'System: nb of channels':'nb_channel',\
|
||||
'routing: disjoint from': 'disjoint_from', 'routing: path':'nodes_list',\
|
||||
'routing: is loose?':'is_loose', 'path bandwidth':'path_bandwidth'}
|
||||
try :
|
||||
service_fieldnames = [authorized_fieldnames[e] for e in header]
|
||||
except KeyError:
|
||||
msg = f'Malformed header on Service sheet: {header} field not in {authorized_fieldnames}'
|
||||
logger.critical(msg)
|
||||
raise ValueError(msg)
|
||||
for row in all_rows(service_sheet, start=5):
|
||||
yield Request(**parse_row(row[0:SERVICES_COLUMN], service_fieldnames))
|
||||
5
gnpy/core/units.py
Normal file
5
gnpy/core/units.py
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
UNITS = {'m': 1,
|
||||
'km': 1E3}
|
||||
187
gnpy/core/utils.py
Normal file
187
gnpy/core/utils.py
Normal file
@@ -0,0 +1,187 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.utils
|
||||
===============
|
||||
|
||||
This module contains utility functions that are used with gnpy.
|
||||
'''
|
||||
|
||||
|
||||
import json
|
||||
|
||||
import numpy as np
|
||||
from csv import writer
|
||||
from numpy import pi, cos, sqrt, log10
|
||||
from scipy import constants
|
||||
|
||||
|
||||
def load_json(filename):
|
||||
with open(filename, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
return data
|
||||
|
||||
|
||||
def save_json(obj, filename):
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
json.dump(obj, f, indent=2, ensure_ascii=False)
|
||||
|
||||
def write_csv(obj, filename):
|
||||
"""
|
||||
convert dictionary items to a csv file
|
||||
the dictionary format :
|
||||
|
||||
{'result category 1':
|
||||
[
|
||||
# 1st line of results
|
||||
{'header 1' : value_xxx,
|
||||
'header 2' : value_yyy},
|
||||
# 2nd line of results: same headers, different results
|
||||
{'header 1' : value_www,
|
||||
'header 2' : value_zzz}
|
||||
],
|
||||
'result_category 2':
|
||||
[
|
||||
{},{}
|
||||
]
|
||||
}
|
||||
|
||||
the generated csv file will be:
|
||||
result_category 1
|
||||
header 1 header 2
|
||||
value_xxx value_yyy
|
||||
value_www value_zzz
|
||||
result_category 2
|
||||
...
|
||||
"""
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
w = writer(f)
|
||||
for data_key, data_list in obj.items():
|
||||
#main header
|
||||
w.writerow([data_key])
|
||||
#sub headers:
|
||||
headers = [_ for _ in data_list[0].keys()]
|
||||
w.writerow(headers)
|
||||
for data_dict in data_list:
|
||||
w.writerow([_ for _ in data_dict.values()])
|
||||
|
||||
def c():
|
||||
"""
|
||||
Returns the speed of light in meters per second
|
||||
"""
|
||||
return constants.c
|
||||
|
||||
|
||||
def itufs(spacing, startf=191.35, stopf=196.10):
|
||||
"""Creates an array of frequencies whose default range is
|
||||
191.35-196.10 THz
|
||||
|
||||
:param spacing: Frequency spacing in THz
|
||||
:param starf: Start frequency in THz
|
||||
:param stopf: Stop frequency in THz
|
||||
:type spacing: float
|
||||
:type startf: float
|
||||
:type stopf: float
|
||||
:return an array of frequnecies determined by the spacing parameter
|
||||
:rtype: numpy.ndarray
|
||||
"""
|
||||
return np.arange(startf, stopf + spacing / 2, spacing)
|
||||
|
||||
|
||||
def h():
|
||||
"""
|
||||
Returns plank's constant in J*s
|
||||
"""
|
||||
return constants.h
|
||||
|
||||
|
||||
def lin2db(value):
|
||||
return 10 * log10(value)
|
||||
|
||||
|
||||
def db2lin(value):
|
||||
return 10**(value / 10)
|
||||
|
||||
def round2float(number, step):
|
||||
step = round(step, 1)
|
||||
if step >= 0.01:
|
||||
number = round(number / step, 0)
|
||||
number = round(number * step, 1)
|
||||
else:
|
||||
number = round(number, 2)
|
||||
return number
|
||||
|
||||
wavelength2freq = constants.lambda2nu
|
||||
freq2wavelength = constants.nu2lambda
|
||||
|
||||
def freq2wavelength(value):
|
||||
""" Converts frequency units to wavelength units.
|
||||
"""
|
||||
return c() / value
|
||||
|
||||
def snr_sum(snr, bw, snr_added, bw_added=12.5e9):
|
||||
snr_added = snr_added - lin2db(bw/bw_added)
|
||||
snr = -lin2db(db2lin(-snr)+db2lin(-snr_added))
|
||||
return snr
|
||||
|
||||
def deltawl2deltaf(delta_wl, wavelength):
|
||||
""" deltawl2deltaf(delta_wl, wavelength):
|
||||
delta_wl is BW in wavelength units
|
||||
wavelength is the center wl
|
||||
units for delta_wl and wavelength must be same
|
||||
|
||||
:param delta_wl: delta wavelength BW in same units as wavelength
|
||||
:param wavelength: wavelength BW is relevant for
|
||||
:type delta_wl: float or numpy.ndarray
|
||||
:type wavelength: float
|
||||
:return: The BW in frequency units
|
||||
:rtype: float or ndarray
|
||||
|
||||
"""
|
||||
f = wavelength2freq(wavelength)
|
||||
return delta_wl * f / wavelength
|
||||
|
||||
|
||||
def deltaf2deltawl(delta_f, frequency):
|
||||
""" deltawl2deltaf(delta_f, frequency):
|
||||
converts delta frequency to delta wavelength
|
||||
units for delta_wl and wavelength must be same
|
||||
|
||||
:param delta_f: delta frequency in same units as frequency
|
||||
:param frequency: frequency BW is relevant for
|
||||
:type delta_f: float or numpy.ndarray
|
||||
:type frequency: float
|
||||
:return: The BW in wavelength units
|
||||
:rtype: float or ndarray
|
||||
|
||||
"""
|
||||
wl = freq2wavelength(frequency)
|
||||
return delta_f * wl / frequency
|
||||
|
||||
|
||||
def rrc(ffs, baud_rate, alpha):
|
||||
""" rrc(ffs, baud_rate, alpha): computes the root-raised cosine filter
|
||||
function.
|
||||
|
||||
:param ffs: A numpy array of frequencies
|
||||
:param baud_rate: The Baud Rate of the System
|
||||
:param alpha: The roll-off factor of the filter
|
||||
:type ffs: numpy.ndarray
|
||||
:type baud_rate: float
|
||||
:type alpha: float
|
||||
:return: hf a numpy array of the filter shape
|
||||
:rtype: numpy.ndarray
|
||||
|
||||
"""
|
||||
Ts = 1 / baud_rate
|
||||
l_lim = (1 - alpha) / (2 * Ts)
|
||||
r_lim = (1 + alpha) / (2 * Ts)
|
||||
hf = np.zeros(np.shape(ffs))
|
||||
slope_inds = np.where(
|
||||
np.logical_and(np.abs(ffs) > l_lim, np.abs(ffs) < r_lim))
|
||||
hf[slope_inds] = 0.5 * (1 + cos((pi * Ts / alpha) *
|
||||
(np.abs(ffs[slope_inds]) - l_lim)))
|
||||
p_inds = np.where(np.logical_and(np.abs(ffs) > 0, np.abs(ffs) < l_lim))
|
||||
hf[p_inds] = 1
|
||||
return sqrt(hf)
|
||||
904
gnpy/gnpy.py
904
gnpy/gnpy.py
@@ -1,904 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Top-level package for gnpy."""
|
||||
|
||||
__author__ = """<TBD>"""
|
||||
__email__ = '<TBD>@<TBD>.com'
|
||||
__version__ = '0.1.0'
|
||||
|
||||
import numpy as np
|
||||
import multiprocessing as mp
|
||||
import scipy.interpolate as interp
|
||||
|
||||
"""
|
||||
GNPy: a Python 3 implementation of the Gaussian Noise (GN) Model of nonlinear
|
||||
propagation, developed by the OptCom group, Department of Electronics and
|
||||
Telecommunications, Politecnico di Torino, Italy
|
||||
"""
|
||||
|
||||
__credits__ = ["Mattia Cantono", "Vittorio Curri", "Alessio Ferrari"]
|
||||
|
||||
|
||||
def raised_cosine_comb(f, rs, roll_off, center_freq, power):
|
||||
""" Returns an array storing the PSD of a WDM comb of raised cosine shaped
|
||||
channels at the input frequencies defined in array f
|
||||
|
||||
:param f: Array of frequencies in THz
|
||||
:param rs: Array of Symbol Rates in TBaud. One Symbol rate for each channel
|
||||
:param roll_off: Array of roll-off factors [0,1). One per channel
|
||||
:param center_freq: Array of channels central frequencies in THz. One per channel
|
||||
:param power: Array of channel powers in W. One per channel
|
||||
:return: PSD of the WDM comb evaluated over f
|
||||
"""
|
||||
ts_arr = 1.0 / rs
|
||||
passband_arr = (1.0 - roll_off) / (2.0 * ts_arr)
|
||||
stopband_arr = (1.0 + roll_off) / (2.0 * ts_arr)
|
||||
g = power / rs
|
||||
psd = np.zeros(np.shape(f))
|
||||
for ind in range(np.size(center_freq)):
|
||||
f_nch = center_freq[ind]
|
||||
g_ch = g[ind]
|
||||
ts = ts_arr[ind]
|
||||
passband = passband_arr[ind]
|
||||
stopband = stopband_arr[ind]
|
||||
ff = np.abs(f - f_nch)
|
||||
tf = ff - passband
|
||||
if roll_off[ind] == 0:
|
||||
psd = np.where(tf <= 0, g_ch, 0.) + psd
|
||||
else:
|
||||
psd = g_ch * (np.where(tf <= 0, 1., 0.) + 1.0 / 2.0 * (1 + np.cos(np.pi * ts / roll_off[ind] *
|
||||
tf)) * np.where(tf > 0, 1., 0.) *
|
||||
np.where(np.abs(ff) <= stopband, 1., 0.)) + psd
|
||||
|
||||
return psd
|
||||
|
||||
|
||||
def fwm_eff(a, Lspan, b2, ff):
|
||||
""" Computes the four-wave mixing efficiency given the fiber characteristics
|
||||
over a given frequency set ff
|
||||
:param a: Fiber loss coefficient in 1/km
|
||||
:param Lspan: Fiber length in km
|
||||
:param b2: Fiber Dispersion coefficient in ps/THz/km
|
||||
:param ff: Array of Frequency points in THz
|
||||
:return: FWM efficiency rho
|
||||
"""
|
||||
rho = np.power(np.abs((1.0 - np.exp(-2.0 * a * Lspan + 1j * 4.0 * np.pi * np.pi * b2 * Lspan * ff)) / (
|
||||
2.0 * a - 1j * 4.0 * np.pi * np.pi * b2 * ff)), 2)
|
||||
return rho
|
||||
|
||||
|
||||
def get_freqarray(f, Bopt, fmax, max_step, f_dense_low, f_dense_up, df_dense):
|
||||
""" Returns a non-uniformly spaced frequency array useful for fast GN-model.
|
||||
integration. The frequency array is made of a denser area, sided by two
|
||||
log-spaced arrays
|
||||
:param f: Central frequency at which NLI is evaluated in THz
|
||||
:param Bopt: Total optical bandwidth of the system in THz
|
||||
:param fmax: Upper limit of the integration domain in THz
|
||||
:param max_step: Maximum step size for frequency array definition in THz
|
||||
:param f_dense_low: Lower limit of denser frequency region in THz
|
||||
:param f_dense_up: Upper limit of denser frequency region in THz
|
||||
:param df_dense: Step size to be used in the denser frequency region in THz
|
||||
:return: Non uniformly defined frequency array
|
||||
"""
|
||||
f_dense = np.arange(f_dense_low, f_dense_up, df_dense)
|
||||
k = Bopt / 2.0 / (Bopt / 2.0 - max_step) # Compute Step ratio for log-spaced array definition
|
||||
if f < 0:
|
||||
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_low)) / np.log(k) + 1.0)
|
||||
f1_short = -(np.abs(f_dense_low) * np.power(k, np.arange(Nlog_short, 0.0, -1.0) - 1.0))
|
||||
k = (Bopt / 2 + (np.abs(f_dense_up) - f_dense_low)) / (Bopt / 2.0 - max_step + (np.abs(f_dense_up) - f_dense_up))
|
||||
Nlog_long = np.ceil(np.log((fmax + (np.abs(f_dense_up) - f_dense_up)) / abs(f_dense_up)) * 1.0 / np.log(k) + 1.0)
|
||||
f1_long = np.abs(f_dense_up) * np.power(k, (np.arange(1, Nlog_long + 1) - 1.0)) - (
|
||||
np.abs(f_dense_up) - f_dense_up)
|
||||
f1_array = np.concatenate([f1_short, f_dense[1:], f1_long])
|
||||
else:
|
||||
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_up)) / np.log(k) + 1.0)
|
||||
f1_short = f_dense_up * np.power(k, np.arange(1, Nlog_short + 1.0) - 1.0)
|
||||
k = (Bopt / 2.0 + (abs(f_dense_low) + f_dense_low)) / (Bopt / 2.0 - max_step + (abs(f_dense_low) + f_dense_low))
|
||||
Nlog_long = np.ceil(np.log((fmax + (np.abs(f_dense_low) + f_dense_low)) / np.abs(f_dense_low)) / np.log(k) + 1)
|
||||
f1_long = -(np.abs(f_dense_low) * np.power(k, np.arange(Nlog_long, 0, -1) - 1.0)) + (
|
||||
abs(f_dense_low) + f_dense_low)
|
||||
f1_array = np.concatenate([f1_long, f_dense[1:], f1_short])
|
||||
return f1_array
|
||||
|
||||
|
||||
def GN_integral(b2, Lspan, a_db, gam, f_ch, b_ch, roll_off, power, Nch, model_param):
|
||||
""" GN_integral computes the GN reference formula via smart brute force integration. The Gaussian Noise model is
|
||||
applied in its incoherent form (phased-array factor =1). The function computes the integral by columns: for each f1,
|
||||
a non-uniformly spaced f2 array is generated, and the integrand function is computed there. At the end of the loop
|
||||
on f1, the overall GNLI is computed. Accuracy can be tuned by operating on model_param argument.
|
||||
|
||||
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
:param Lspan: Fiber Span length in km. Scalar
|
||||
:param a_db: Fiber loss coeffiecient in dB/km. Scalar
|
||||
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param b_ch: Channels' -3 dB bandwidth. Array of size 1xNch
|
||||
:param roll_off: Channels' Roll-off factors [0,1). Array of size 1xNch
|
||||
:param power: Channels' power values in W. Array of size 1xNch
|
||||
:param Nch: Number of channels. Scalar
|
||||
:param model_param: Dictionary with model parameters for accuracy tuning
|
||||
model_param['min_FWM_inv']: Minimum FWM efficiency value to be considered for high density
|
||||
integration in dB
|
||||
model_param['n_grid']: Maximum Number of integration points to be used in each frequency slot of
|
||||
the spectrum
|
||||
model_param['n_grid_min']: Minimum Number of integration points to be used in each frequency
|
||||
slot of the spectrum
|
||||
model_param['f_array']: Frequencies at which evaluate GNLI, expressed in THz
|
||||
:return: GNLI: power spectral density in W/THz of the nonlinear interference at frequencies model_param['f_array']
|
||||
"""
|
||||
alpha_lin = a_db / 20.0 / np.log10(np.e) # Conversion in linear units 1/km
|
||||
min_FWM_inv = np.power(10, model_param['min_FWM_inv'] / 10) # Conversion in linear units
|
||||
n_grid = model_param['n_grid']
|
||||
n_grid_min = model_param['n_grid_min']
|
||||
f_array = model_param['f_array']
|
||||
fmax = (f_ch[-1] - (b_ch[-1] / 2.0)) - (f_ch[0] - (b_ch[0] / 2.0)) # Get frequency limit
|
||||
f2eval = np.max(np.diff(f_ch))
|
||||
Bopt = f2eval * Nch # Overall optical bandwidth [THz]
|
||||
min_step = f2eval / n_grid # Minimum integration step
|
||||
max_step = f2eval / n_grid_min # Maximum integration step
|
||||
f_dense_start = np.abs(
|
||||
np.sqrt(np.power(alpha_lin, 2) / (4.0 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1.0)) / f2eval)
|
||||
f_ind_eval = 0
|
||||
GNLI = np.full(f_array.size, np.nan) # Pre-allocate results
|
||||
for f in f_array: # Loop over f
|
||||
f_dense_low = f - f_dense_start
|
||||
f_dense_up = f + f_dense_start
|
||||
if f_dense_low < -fmax:
|
||||
f_dense_low = -fmax
|
||||
if f_dense_low == 0.0:
|
||||
f_dense_low = -min_step
|
||||
if f_dense_up == 0.0:
|
||||
f_dense_up = min_step
|
||||
if f_dense_up > fmax:
|
||||
f_dense_up = fmax
|
||||
f_dense_width = np.abs(f_dense_up - f_dense_low)
|
||||
n_grid_dense = np.ceil(f_dense_width / min_step)
|
||||
df = f_dense_width / n_grid_dense
|
||||
# Get non-uniformly spaced f1 array
|
||||
f1_array = get_freqarray(f, Bopt, fmax, max_step, f_dense_low, f_dense_up, df)
|
||||
G1 = raised_cosine_comb(f1_array, b_ch, roll_off, f_ch, power) # Get corresponding spectrum
|
||||
Gpart = np.zeros(f1_array.size) # Pre-allocate partial result for inner integral
|
||||
f_ind = 0
|
||||
for f1 in f1_array: # Loop over f1
|
||||
if f1 != f:
|
||||
f_lim = np.sqrt(np.power(alpha_lin, 2) / (4.0 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1.0)) / (
|
||||
f1 - f) + f
|
||||
f2_dense_up = np.maximum(f_lim, -f_lim)
|
||||
f2_dense_low = np.minimum(f_lim, -f_lim)
|
||||
if f2_dense_low == 0:
|
||||
f2_dense_low = -min_step
|
||||
if f2_dense_up == 0:
|
||||
f2_dense_up = min_step
|
||||
if f2_dense_low < -fmax:
|
||||
f2_dense_low = -fmax
|
||||
if f2_dense_up > fmax:
|
||||
f2_dense_up = fmax
|
||||
else:
|
||||
f2_dense_up = fmax
|
||||
f2_dense_low = -fmax
|
||||
f2_dense_width = np.abs(f2_dense_up - f2_dense_low)
|
||||
n2_grid_dense = np.ceil(f2_dense_width / min_step)
|
||||
df2 = f2_dense_width / n2_grid_dense
|
||||
# Get non-uniformly spaced f2 array
|
||||
f2_array = get_freqarray(f, Bopt, fmax, max_step, f2_dense_low, f2_dense_up, df2)
|
||||
f2_array = f2_array[f2_array >= f1] # Do not consider points below the bisector of quadrants I and III
|
||||
if f2_array.size > 0:
|
||||
G2 = raised_cosine_comb(f2_array, b_ch, roll_off, f_ch, power) # Get spectrum there
|
||||
f3_array = f1 + f2_array - f # Compute f3
|
||||
G3 = raised_cosine_comb(f3_array, b_ch, roll_off, f_ch, power) # Get spectrum over f3
|
||||
G = G2 * G3 * G1[f_ind]
|
||||
if np.count_nonzero(G):
|
||||
FWM_eff = fwm_eff(alpha_lin, Lspan, b2, (f1 - f) * (f2_array - f)) # Compute FWM efficiency
|
||||
Gpart[f_ind] = 2.0 * np.trapz(FWM_eff * G, f2_array) # Compute inner integral
|
||||
f_ind += 1
|
||||
# Compute outer integral. Nominal span loss already compensated
|
||||
GNLI[f_ind_eval] = 16.0 / 27.0 * gam * gam * np.trapz(Gpart, f1_array)
|
||||
f_ind_eval += 1 # Next frequency
|
||||
return GNLI # Return GNLI array in W/THz and the array of the corresponding frequencies
|
||||
|
||||
|
||||
def compute_psi(b2, l_eff_a, f_ch, channel_index, interfering_index, b_ch):
|
||||
""" compute_psi computes the psi coefficient of the analytical formula.
|
||||
|
||||
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
:param l_eff_a: Asymptotic effective length in km. Scalar
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param channel_index: Index of the channel. Scalar
|
||||
:param interfering_index: Index of the interfering signal. Scalar
|
||||
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
|
||||
:return: psi: the coefficient
|
||||
"""
|
||||
b2 = np.abs(b2)
|
||||
|
||||
if channel_index == interfering_index: # The signal interferes with itself
|
||||
b_ch_sig = b_ch[channel_index]
|
||||
psi = np.arcsinh(0.5 * np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig ** 2.0)
|
||||
else:
|
||||
f_sig = f_ch[channel_index]
|
||||
b_ch_sig = b_ch[channel_index]
|
||||
f_int = f_ch[interfering_index]
|
||||
b_ch_int = b_ch[interfering_index]
|
||||
del_f = f_sig - f_int
|
||||
psi = np.arcsinh(np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig * (del_f + 0.5 * b_ch_int))
|
||||
psi -= np.arcsinh(np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig * (del_f - 0.5 * b_ch_int))
|
||||
|
||||
return psi
|
||||
|
||||
|
||||
def analytic_formula(ind, b2, l_eff, l_eff_a, gam, f_ch, g_ch, b_ch, n_ch):
|
||||
""" analytic_formula computes the analytical formula.
|
||||
|
||||
:param ind: index of the channel at which g_nli is computed. Scalar
|
||||
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
:param l_eff: Effective length in km. Scalar
|
||||
:param l_eff_a: Asymptotic effective length in km. Scalar
|
||||
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param g_ch: Power spectral density W/THz. Array of size 1xNch
|
||||
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
|
||||
:param n_ch: Number of channels. Scalar
|
||||
:return: g_nli: power spectral density in W/THz of the nonlinear interference
|
||||
"""
|
||||
ch_psd = g_ch[ind]
|
||||
b2 = abs(b2)
|
||||
|
||||
g_nli = 0.0
|
||||
for n in np.arange(0, n_ch):
|
||||
psi = compute_psi(b2, l_eff_a, f_ch, ind, n, b_ch)
|
||||
g_nli += g_ch[n] * ch_psd ** 2.0 * psi
|
||||
|
||||
g_nli *= (16.0 / 27.0) * (gam * l_eff) ** 2.0 / (2.0 * np.pi * b2 * l_eff_a)
|
||||
|
||||
return g_nli
|
||||
|
||||
|
||||
def gn_analytic(b2, l_span, a_db, gam, f_ch, b_ch, power, n_ch):
|
||||
""" gn_analytic computes the GN reference formula via analytical solution.
|
||||
|
||||
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
:param l_span: Fiber Span length in km. Scalar
|
||||
:param a_db: Fiber loss coefficient in dB/km. Scalar
|
||||
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
|
||||
:param power: Channels' power values in W. Array of size 1xNch
|
||||
:param n_ch: Number of channels. Scalar
|
||||
:return: g_nli: power spectral density in W/THz of the nonlinear interference at frequencies model_param['f_array']
|
||||
"""
|
||||
g_ch = power / b_ch
|
||||
alpha_lin = a_db / 20.0 / np.log10(np.e) # Conversion in linear units 1/km
|
||||
l_eff = (1.0 - np.exp(-2.0 * alpha_lin * l_span)) / (2.0 * alpha_lin) # Effective length
|
||||
l_eff_a = 1.0 / (2.0 * alpha_lin) # Asymptotic effective length
|
||||
g_nli = np.zeros(f_ch.size)
|
||||
for ind in np.arange(0, f_ch.size):
|
||||
g_nli[ind] = analytic_formula(ind, b2, l_eff, l_eff_a, gam, f_ch, g_ch, b_ch, n_ch)
|
||||
|
||||
return g_nli
|
||||
|
||||
|
||||
def get_f_computed_interp(f_ch, n_not_interp):
|
||||
""" get_f_computed_array returns the arrays containing the frequencies at which g_nli is computed and interpolated.
|
||||
|
||||
:param f_ch: the overall frequency array. Array of size 1xnum_ch
|
||||
:param n_not_interp: the number of points at which g_nli has to be computed
|
||||
:return: f_nli_comp: the array containing the frequencies at which g_nli is computed
|
||||
:return: f_nli_interp: the array containing the frequencies at which g_nli is interpolated
|
||||
"""
|
||||
num_ch = len(f_ch)
|
||||
if num_ch < n_not_interp: # It's useless to compute g_nli in a number of points larger than num_ch
|
||||
n_not_interp = num_ch
|
||||
|
||||
# Compute f_nli_comp
|
||||
n_not_interp_left = np.ceil((n_not_interp - 1.0) / 2.0)
|
||||
n_not_interp_right = np.floor((n_not_interp - 1.0) / 2.0)
|
||||
central_index = len(f_ch) // 2
|
||||
print(central_index)
|
||||
|
||||
f_nli_central = np.array([f_ch[central_index]], copy=True)
|
||||
|
||||
if n_not_interp_left > 0:
|
||||
index = np.linspace(0, central_index - 1, n_not_interp_left, dtype='int')
|
||||
f_nli_left = np.array(f_ch[index], copy=True)
|
||||
else:
|
||||
f_nli_left = np.array([])
|
||||
|
||||
if n_not_interp_right > 0:
|
||||
index = np.linspace(-1, -central_index, n_not_interp_right, dtype='int')
|
||||
f_nli_right = np.array(f_ch[index], copy=True)
|
||||
f_nli_right = f_nli_right[::-1] # Reverse the order of the array
|
||||
else:
|
||||
f_nli_right = np.array([])
|
||||
|
||||
f_nli_comp = np.concatenate([f_nli_left, f_nli_central, f_nli_right])
|
||||
|
||||
# Compute f_nli_interp
|
||||
f_ch_sorted = np.sort(f_ch)
|
||||
index = np.searchsorted(f_ch_sorted, f_nli_comp)
|
||||
|
||||
f_nli_interp = np.array(f_ch, copy=True)
|
||||
f_nli_interp = np.delete(f_nli_interp, index)
|
||||
return f_nli_comp, f_nli_interp
|
||||
|
||||
|
||||
def interpolate_in_range(x, y, x_new, kind_interp):
|
||||
""" Given some samples y of the function y(x), interpolate_in_range returns the interpolation of values y(x_new)
|
||||
|
||||
:param x: The points at which y(x) is evaluated. Array
|
||||
:param y: The values of y(x). Array
|
||||
:param x_new: The values at which y(x) has to be interpolated. Array
|
||||
:param kind_interp: The interpolation method of the function scipy.interpolate.interp1d. String
|
||||
:return: y_new: the new interpolates samples
|
||||
"""
|
||||
if x.size == 1:
|
||||
y_new = y * np.ones(x_new.size)
|
||||
elif x.size == 2:
|
||||
x = np.append(x, x_new[-1])
|
||||
y = np.append(y, y[-1])
|
||||
func = interp.interp1d(x, y, kind=kind_interp, bounds_error=False)
|
||||
y_new = func(x_new)
|
||||
else:
|
||||
func = interp.interp1d(x, y, kind=kind_interp, bounds_error=False)
|
||||
y_new = func(x_new)
|
||||
|
||||
return y_new
|
||||
|
||||
|
||||
def gn_model(spectrum_param, fiber_param, accuracy_param, n_cores):
|
||||
""" gn_model can compute the gn model both analytically or through the smart brute force
|
||||
integral.
|
||||
|
||||
:param spectrum_param: Dictionary with spectrum parameters
|
||||
spectrum_param['num_ch']: Number of channels. Scalar
|
||||
spectrum_param['f_ch']: Baseband channels center frequencies in THz. Array of size 1xnum_ch
|
||||
spectrum_param['b_ch']: Channels' -3 dB band [THz]. Array of size 1xnum_ch
|
||||
spectrum_param['roll_off']: Channels' Roll-off factors [0,1). Array of size 1xnum_ch
|
||||
spectrum_param['power']: Channels' power values in W. Array of size 1xnum_ch
|
||||
:param fiber_param: Dictionary with the parameters of the fiber
|
||||
fiber_param['alpha']: Fiber loss coefficient in dB/km. Scalar
|
||||
fiber_param['span_length']: Fiber Span length in km. Scalar
|
||||
fiber_param['beta_2']: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
fiber_param['gamma']: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param accuracy_param: Dictionary with model parameters for accuracy tuning
|
||||
accuracy_param['is_analytic']: A boolean indicating if you want to compute the NLI through
|
||||
the analytic formula (is_analytic = True) of the smart brute force integration (is_analytic =
|
||||
False). Boolean
|
||||
accuracy_param['points_not_interp']: The number of NLI which will be calculated. Others are
|
||||
interpolated
|
||||
accuracy_param['kind_interp']: The kind of interpolation using the function
|
||||
scipy.interpolate.interp1d
|
||||
accuracy_param['th_fwm']: Minimum FWM efficiency value to be considered for high density
|
||||
integration in dB
|
||||
accuracy_param['n_points']: Maximum Number of integration points to be used in each frequency
|
||||
slot of the spectrum
|
||||
accuracy_param['n_points_min']: Minimum Number of integration points to be used in each
|
||||
frequency
|
||||
slot of the spectrum
|
||||
:return: g_nli_comp: the NLI power spectral density in W/THz computed through GN model
|
||||
:return: f_nli_comp: the frequencies at which g_nli_comp is evaluated
|
||||
:return: g_nli_interp: the NLI power spectral density in W/THz computed through interpolation of g_nli_comp
|
||||
:return: f_nli_interp: the frequencies at which g_nli_interp is estimated
|
||||
"""
|
||||
# Take signal parameters
|
||||
num_ch = spectrum_param['num_ch']
|
||||
f_ch = spectrum_param['f_ch']
|
||||
b_ch = spectrum_param['b_ch']
|
||||
roll_off = spectrum_param['roll_off']
|
||||
power = spectrum_param['power']
|
||||
|
||||
# Take fiber parameters
|
||||
a_db = fiber_param['alpha']
|
||||
l_span = fiber_param['span_length']
|
||||
beta2 = fiber_param['beta_2']
|
||||
gam = fiber_param['gamma']
|
||||
|
||||
# Take accuracy parameters
|
||||
is_analytic = accuracy_param['is_analytic']
|
||||
n_not_interp = accuracy_param['points_not_interp']
|
||||
kind_interp = accuracy_param['kind_interp']
|
||||
th_fwm = accuracy_param['th_fwm']
|
||||
n_points = accuracy_param['n_points']
|
||||
n_points_min = accuracy_param['n_points_min']
|
||||
|
||||
# Computing NLI
|
||||
if is_analytic: # Analytic solution
|
||||
g_nli_comp = gn_analytic(beta2, l_span, a_db, gam, f_ch, b_ch, power, num_ch)
|
||||
f_nli_comp = np.copy(f_ch)
|
||||
g_nli_interp = []
|
||||
f_nli_interp = []
|
||||
else: # Smart brute force integration
|
||||
f_nli_comp, f_nli_interp = get_f_computed_interp(f_ch, n_not_interp)
|
||||
|
||||
model_param = {'min_FWM_inv': th_fwm, 'n_grid': n_points, 'n_grid_min': n_points_min,
|
||||
'f_array': np.array(f_nli_comp, copy=True)}
|
||||
|
||||
g_nli_comp = GN_integral(beta2, l_span, a_db, gam, f_ch, b_ch, roll_off, power, num_ch, model_param)
|
||||
|
||||
# Interpolation
|
||||
g_nli_interp = interpolate_in_range(f_nli_comp, g_nli_comp, f_nli_interp, kind_interp)
|
||||
|
||||
a_zero = fiber_param['alpha'] * fiber_param['span_length']
|
||||
a_tilting = fiber_param['alpha_1st'] * fiber_param['span_length']
|
||||
|
||||
attenuation_db_comp = compute_attenuation_profile(a_zero, a_tilting, f_nli_comp)
|
||||
attenuation_lin_comp = 10 ** (-abs(attenuation_db_comp) / 10)
|
||||
|
||||
g_nli_comp *= attenuation_lin_comp
|
||||
|
||||
attenuation_db_interp = compute_attenuation_profile(a_zero, a_tilting, f_nli_interp)
|
||||
attenuation_lin_interp = 10 ** (-np.abs(attenuation_db_interp) / 10)
|
||||
|
||||
g_nli_interp *= attenuation_lin_interp
|
||||
|
||||
return g_nli_comp, f_nli_comp, g_nli_interp, f_nli_interp
|
||||
|
||||
|
||||
def compute_gain_profile(gain_zero, gain_tilting, freq):
|
||||
""" compute_gain_profile evaluates the gain at the frequencies freq.
|
||||
|
||||
:param gain_zero: the gain at f=0 in dB. Scalar
|
||||
:param gain_tilting: the gain tilt in dB/THz. Scalar
|
||||
:param freq: the baseband frequencies at which the gain profile is computed in THz. Array
|
||||
:return: gain: the gain profile in dB
|
||||
"""
|
||||
gain = gain_zero + gain_tilting * freq
|
||||
return gain
|
||||
|
||||
|
||||
def compute_ase_noise(noise_fig, gain, central_freq, freq):
|
||||
""" compute_ase_noise evaluates the ASE spectral density at the frequencies freq.
|
||||
|
||||
:param noise_fig: the amplifier noise figure in dB. Scalar
|
||||
:param gain: the gain profile in dB at the frequencies contained in freq array. Array
|
||||
:param central_freq: the central frequency of the WDM comb. Scalar
|
||||
:param freq: the baseband frequencies at which the ASE noise is computed in THz. Array
|
||||
:return: g_ase: the ase noise profile
|
||||
"""
|
||||
# the Planck constant in W/THz^2
|
||||
planck = (6.62607004 * 1e-34) * 1e24
|
||||
|
||||
# Conversion from dB to linear
|
||||
gain_lin = np.power(10, gain / 10.0)
|
||||
noise_fig_lin = np.power(10, noise_fig / 10.0)
|
||||
|
||||
g_ase = (gain_lin - 1) * noise_fig_lin * planck * (central_freq + freq)
|
||||
return g_ase
|
||||
|
||||
|
||||
def compute_edfa_profile(gain_zero, gain_tilting, noise_fig, central_freq, freq):
|
||||
""" compute_edfa_profile evaluates the gain profile and the ASE spectral density at the frequencies freq.
|
||||
|
||||
:param gain_zero: the gain at f=0 in dB. Scalar
|
||||
:param gain_tilting: the gain tilt in dB/THz. Scalar
|
||||
:param noise_fig: the amplifier noise figure in dB. Scalar
|
||||
:param central_freq: the central frequency of the WDM comb. Scalar
|
||||
:param freq: the baseband frequencies at which the ASE noise is computed in THz. Array
|
||||
:return: gain: the gain profile in dB
|
||||
:return: g_ase: the ase noise profile in W/THz
|
||||
"""
|
||||
gain = compute_gain_profile(gain_zero, gain_tilting, freq)
|
||||
g_ase = compute_ase_noise(noise_fig, gain, central_freq, freq)
|
||||
|
||||
return gain, g_ase
|
||||
|
||||
|
||||
def compute_attenuation_profile(a_zero, a_tilting, freq):
|
||||
"""compute_attenuation_profile returns the attenuation profile at the frequencies freq
|
||||
|
||||
:param a_zero: the attenuation [dB] @ the baseband central frequency. Scalar
|
||||
:param a_tilting: the attenuation tilt in dB/THz. Scalar
|
||||
:param freq: the baseband frequencies at which attenuation is computed [THz]. Array
|
||||
:return: attenuation: the attenuation profile in dB
|
||||
"""
|
||||
|
||||
if len(freq):
|
||||
attenuation = a_zero + a_tilting * freq
|
||||
|
||||
# abs in order to avoid ambiguity due to the sign convention
|
||||
attenuation = abs(attenuation)
|
||||
else:
|
||||
attenuation = []
|
||||
|
||||
return attenuation
|
||||
|
||||
|
||||
def passive_component(spectrum, a_zero, a_tilting, freq):
|
||||
"""passive_component updates the input spectrum with the attenuation described by a_zero and a_tilting
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param a_zero: attenuation at the central frequency [dB]. Scalar
|
||||
:param a_tilting: attenuation tilting [dB/THz]. Scalar
|
||||
:param freq: the baseband frequency of each WDM channel [THz]. Array
|
||||
:return: None
|
||||
"""
|
||||
attenuation_db = compute_attenuation_profile(a_zero, a_tilting, freq)
|
||||
attenuation_lin = 10 ** np.divide(-abs(attenuation_db), 10.0)
|
||||
|
||||
for index, s in enumerate(spectrum['signals']):
|
||||
spectrum['signals'][index]['p_ch'] *= attenuation_lin[index]
|
||||
spectrum['signals'][index]['p_nli'] *= attenuation_lin[index]
|
||||
spectrum['signals'][index]['p_ase'] *= attenuation_lin[index]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def optical_amplifier(spectrum, gain_zero, gain_tilting, noise_fig, central_freq, freq, b_eq):
|
||||
"""optical_amplifier updates the input spectrum with the gain described by gain_zero and gain_tilting plus ASE noise
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param gain_zero: gain at the central frequency [dB]. Scalar
|
||||
:param gain_tilting: gain tilting [dB/THz]. Scalar
|
||||
:param noise_fig: the noise figure of the amplifier [dB]. Scalar
|
||||
:param central_freq: the central frequency of the optical band [THz]. Scalar
|
||||
:param freq: the central frequency of each WDM channel [THz]. Array
|
||||
:param b_eq: the equivalent -3 dB bandwidth of each WDM channel [THZ]. Array
|
||||
:return: None
|
||||
"""
|
||||
|
||||
gain_db, g_ase = compute_edfa_profile(gain_zero, gain_tilting, noise_fig, central_freq, freq)
|
||||
|
||||
p_ase = np.multiply(g_ase, b_eq)
|
||||
|
||||
gain_lin = 10 ** np.divide(gain_db, 10.0)
|
||||
|
||||
for index, s in enumerate(spectrum['signals']):
|
||||
spectrum['signals'][index]['p_ch'] *= gain_lin[index]
|
||||
spectrum['signals'][index]['p_nli'] *= gain_lin[index]
|
||||
spectrum['signals'][index]['p_ase'] *= gain_lin[index]
|
||||
spectrum['signals'][index]['p_ase'] += p_ase[index]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def fiber(spectrum, fiber_param, fiber_length, f_ch, b_ch, roll_off, control_param):
|
||||
""" fiber updates spectrum with the effects of the fiber
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param fiber_param: Dictionary with the parameters of the fiber
|
||||
fiber_param['alpha']: Fiber loss coeffiecient in dB/km. Scalar
|
||||
fiber_param['beta_2']: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
fiber_param['n_2']: second-order nonlinear refractive index [m^2/W]. Scalar
|
||||
fiber_param['a_eff']: the effective area of the fiber [um^2]. Scalar
|
||||
:param fiber_length: the span length [km]. Scalar
|
||||
:param f_ch: the baseband frequencies of the WDM channels [THz]. Scalar
|
||||
:param b_ch: the -3 dB bandwidth of each WDM channel [THz]. Array
|
||||
:param roll_off: the roll off of each WDM channel. Array
|
||||
:param control_param: Dictionary with the control parameters
|
||||
control_param['save_each_comp']: a boolean flag. If true, it saves in output folder one spectrum file at
|
||||
the output of each component, otherwise it saves just the last spectrum. Boolean
|
||||
control_param['is_linear']: a bool flag. If true, is doesn't compute NLI, if false, OLE will consider
|
||||
NLI. Boolean
|
||||
control_param['is_analytic']: a boolean flag. If true, the NLI is computed through the analytic
|
||||
formula, otherwise it uses the double integral. Warning: the double integral is very slow. Boolean
|
||||
control_param['points_not_interp']: if the double integral is used, it indicates how much points are
|
||||
calculated, others will be interpolated. Scalar
|
||||
control_param['kind_interp']: the interpolation method when double integral is used. String
|
||||
control_param['th_fwm']: he threshold of the four wave mixing efficiency for the double integral. Scalar
|
||||
control_param['n_points']: number of points in the high FWM efficiency region in which the double
|
||||
integral is computed. Scalar
|
||||
control_param['n_points_min']: number of points in which the double integral is computed in the low FWM
|
||||
efficiency region. Scalar
|
||||
control_param['n_cores']: number of cores for parallel computation [not yet implemented]. Scalar
|
||||
:return: None
|
||||
"""
|
||||
|
||||
n_cores = control_param['n_cores']
|
||||
|
||||
# Evaluation of NLI
|
||||
if not control_param['is_linear']:
|
||||
num_ch = len(spectrum['signals'])
|
||||
spectrum_param = {
|
||||
'num_ch': num_ch,
|
||||
'f_ch': f_ch,
|
||||
'b_ch': b_ch,
|
||||
'roll_off': roll_off
|
||||
}
|
||||
|
||||
p_ch = np.zeros(num_ch)
|
||||
for index, signal in enumerate(spectrum['signals']):
|
||||
p_ch[index] = signal['p_ch']
|
||||
|
||||
spectrum_param['power'] = p_ch
|
||||
fiber_param['span_length'] = fiber_length
|
||||
|
||||
nli_cmp, f_nli_cmp, nli_int, f_nli_int = gn_model(spectrum_param, fiber_param, control_param, n_cores)
|
||||
f_nli = np.concatenate((f_nli_cmp, f_nli_int))
|
||||
order = np.argsort(f_nli)
|
||||
g_nli = np.concatenate((nli_cmp, nli_int))
|
||||
g_nli = np.array(g_nli)[order]
|
||||
|
||||
p_nli = np.multiply(g_nli, b_ch)
|
||||
|
||||
a_zero = fiber_param['alpha'] * fiber_length
|
||||
a_tilting = fiber_param['alpha_1st'] * fiber_length
|
||||
|
||||
# Apply attenuation
|
||||
passive_component(spectrum, a_zero, a_tilting, f_ch)
|
||||
|
||||
# Apply NLI
|
||||
if not control_param['is_linear']:
|
||||
for index, s in enumerate(spectrum['signals']):
|
||||
spectrum['signals'][index]['p_nli'] += p_nli[index]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_frequencies_wdm(spectrum, sys_param):
|
||||
""" the function computes the central frequency of the WDM comb and the frequency of each channel.
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param sys_param: a dictionary containing the system parameters:
|
||||
'f0': the starting frequency, i.e the frequency of the first spectral slot [THz]
|
||||
'ns': the number of spectral slots. The space between two slots is 6.25 GHz
|
||||
:return: f_cent: the central frequency of the WDM comb [THz]
|
||||
:return: f_ch: the baseband frequency of each WDM channel [THz]
|
||||
"""
|
||||
|
||||
delta_f = 6.25E-3
|
||||
# Evaluate the central frequency
|
||||
f0 = sys_param['f0']
|
||||
ns = sys_param['ns']
|
||||
|
||||
f_cent = f0 + ((ns // 2.0) * delta_f)
|
||||
|
||||
# Evaluate the baseband frequencies
|
||||
n_ch = spectrum['laser_position'].count(1)
|
||||
f_ch = np.zeros(n_ch)
|
||||
count = 0
|
||||
for index, bool_laser in enumerate(spectrum['laser_position']):
|
||||
if bool_laser:
|
||||
f_ch[count] = (f0 - f_cent) + delta_f * index
|
||||
count += 1
|
||||
|
||||
return f_cent, f_ch
|
||||
|
||||
|
||||
def get_spectrum_param(spectrum):
|
||||
""" the function returns the number of WDM channels and 3 arrays containing the power, the equivalent bandwidth
|
||||
and the roll off of each WDM channel.
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:return: power: the power of each WDM channel [W]
|
||||
:return: b_eq: the equivalent bandwidth of each WDM channel [THz]
|
||||
:return: roll_off: the roll off of each WDM channel
|
||||
:return: p_ase: the power of the ASE noise [W]
|
||||
:return: p_nli: the power of NLI [W]
|
||||
:return: n_ch: the number of WDM channels
|
||||
"""
|
||||
|
||||
n_ch = spectrum['laser_position'].count(1)
|
||||
roll_off = np.zeros(n_ch)
|
||||
b_eq = np.zeros(n_ch)
|
||||
power = np.zeros(n_ch)
|
||||
p_ase = np.zeros(n_ch)
|
||||
p_nli = np.zeros(n_ch)
|
||||
for index, signal in enumerate(spectrum['signals']):
|
||||
b_eq[index] = signal['b_ch']
|
||||
roll_off[index] = signal['roll_off']
|
||||
power[index] = signal['p_ch']
|
||||
p_ase[index] = signal['p_ase']
|
||||
p_nli[index] = signal['p_nli']
|
||||
|
||||
return power, b_eq, roll_off, p_ase, p_nli, n_ch
|
||||
|
||||
|
||||
def change_component_ref(f_ref, link, fibers):
|
||||
""" it updates the reference frequency of OA gain, PC attenuation and fiber attenuation coefficient
|
||||
|
||||
:param f_ref: the new reference frequency [THz]. Scalar
|
||||
:param link: the link structure. A list in which each element indicates one link component (PC, OA or fiber). List
|
||||
:param fibers: a dictionary containing the description of each fiber type. Dictionary
|
||||
:return: None
|
||||
"""
|
||||
|
||||
light_speed = 3e8 # [m/s]
|
||||
|
||||
# Change reference to the central frequency f_cent for OA and PC
|
||||
for index, component in enumerate(link):
|
||||
if component['comp_cat'] is 'PC':
|
||||
|
||||
old_loss = component['loss']
|
||||
delta_loss = component['loss_tlt']
|
||||
old_ref = component['ref_freq']
|
||||
new_loss = old_loss + delta_loss * (f_ref - old_ref)
|
||||
|
||||
link[index]['ref_freq'] = f_ref
|
||||
link[index]['loss'] = new_loss
|
||||
|
||||
elif component['comp_cat'] is 'OA':
|
||||
|
||||
old_gain = component['gain']
|
||||
delta_gain = component['gain_tlt']
|
||||
old_ref = component['ref_freq']
|
||||
new_gain = old_gain + delta_gain * (f_ref - old_ref)
|
||||
|
||||
link[index]['ref_freq'] = f_ref
|
||||
link[index]['gain'] = new_gain
|
||||
|
||||
elif not component['comp_cat'] is 'fiber':
|
||||
|
||||
error_string = 'Error in link structure: the ' + str(index+1) + '-th component have unknown category \n'\
|
||||
+ 'allowed values are (case sensitive): PC, OA and fiber'
|
||||
print(error_string)
|
||||
|
||||
# Change reference to the central frequency f_cent for fiber
|
||||
for fib_type in fibers:
|
||||
old_ref = fibers[fib_type]['reference_frequency']
|
||||
old_alpha = fibers[fib_type]['alpha']
|
||||
alpha_1st = fibers[fib_type]['alpha_1st']
|
||||
new_alpha = old_alpha + alpha_1st * (f_ref - old_ref)
|
||||
|
||||
fibers[fib_type]['reference_frequency'] = f_ref
|
||||
fibers[fib_type]['alpha'] = new_alpha
|
||||
|
||||
fibers[fib_type]['gamma'] = (2 * np.pi) * (f_ref / light_speed) * \
|
||||
(fibers[fib_type]['n_2'] / fibers[fib_type]['a_eff']) * 1e27
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def compute_and_save_osnr(spectrum, flag_save=False, file_name='00', output_path='./output/'):
|
||||
""" Given the spectrum structure, the function returns the linear and non linear OSNR. If the boolean variable
|
||||
flag_save is true, the function also saves the osnr values for the central channel, the osnr for each channel and
|
||||
spectrum in a file with the name file_name, in the folder indicated by output_path
|
||||
|
||||
:param spectrum: the spectrum dictionary containing the laser position (a list of boolean) and the list signals,
|
||||
which is a list of dictionaries (one for each channel) containing:
|
||||
'b_ch': the -3 dB bandwidth of the signal [THz]
|
||||
'roll_off': the roll off of the signal
|
||||
'p_ch': the signal power [W]
|
||||
'p_nli': the equivalent nli power [W]
|
||||
'p_ase': the ASE noise [W]
|
||||
:param flag_save: if True it saves all the data, otherwise it doesn't
|
||||
:param file_name: the name of the file in which the variables are saved
|
||||
:param output_path: the path in which you want to save the file
|
||||
:return: osnr_lin_db: the linear OSNR [dB]
|
||||
:return: osnr_nli_db: the non-linear equivalent OSNR (in linear units, NOT in [dB]
|
||||
"""
|
||||
|
||||
# Get the parameters from spectrum
|
||||
p_ch, b_eq, roll_off, p_ase, p_nli, n_ch = get_spectrum_param(spectrum)
|
||||
|
||||
# Compute the linear OSNR
|
||||
if (p_ase == 0).any():
|
||||
osnr_lin = np.zeros(n_ch)
|
||||
for index, p_noise in enumerate(p_ase):
|
||||
if p_noise == 0:
|
||||
osnr_lin[index] = float('inf')
|
||||
else:
|
||||
osnr_lin[index] = p_ch[index] / p_noise
|
||||
|
||||
else:
|
||||
osnr_lin = np.divide(p_ch, p_ase)
|
||||
|
||||
# Compute the non-linear OSNR
|
||||
if ((p_ase + p_nli) == 0).any():
|
||||
osnr_nli = np.zeros(n_ch)
|
||||
for index, p_noise in enumerate(p_ase + p_nli):
|
||||
|
||||
if p_noise == 0:
|
||||
osnr_nli[index] = float('inf')
|
||||
else:
|
||||
osnr_nli[index] = p_ch[index] / p_noise
|
||||
else:
|
||||
osnr_nli = np.divide(p_ch, p_ase + p_nli)
|
||||
|
||||
# Compute linear and non linear OSNR for the central channel
|
||||
ind_c = n_ch // 2
|
||||
osnr_lin_central_channel_db = 10 * np.log10(osnr_lin[ind_c])
|
||||
osnr_nl_central_channel_db = 10 * np.log10(osnr_nli[ind_c])
|
||||
|
||||
# Conversion in dB
|
||||
osnr_lin_db = 10 * np.log10(osnr_lin)
|
||||
osnr_nli_db = 10 * np.log10(osnr_nli)
|
||||
|
||||
# Save spectrum, the non linear OSNR and the linear OSNR
|
||||
out_fle_name = output_path + file_name
|
||||
|
||||
if flag_save:
|
||||
|
||||
f = open(out_fle_name, 'w')
|
||||
f.write(''.join(('# Output parameters. The values of OSNR are evaluated in the -3 dB channel band', '\n\n')))
|
||||
f.write(''.join(('osnr_lin_central_channel_db = ', str(osnr_lin_central_channel_db), '\n\n')))
|
||||
f.write(''.join(('osnr_nl_central_channel_db = ', str(osnr_nl_central_channel_db), '\n\n')))
|
||||
f.write(''.join(('osnr_lin_db = ', str(osnr_lin_db), '\n\n')))
|
||||
f.write(''.join(('osnr_nl_db = ', str(osnr_nli_db), '\n\n')))
|
||||
f.write(''.join(('spectrum = ', str(spectrum), '\n')))
|
||||
|
||||
f.close()
|
||||
|
||||
return osnr_nli_db, osnr_lin_db
|
||||
|
||||
|
||||
def ole(spectrum, link, fibers, sys_param, control_param, output_path='./output/'):
|
||||
""" The function takes the input spectrum, the link description, the fiber description, the system parameters,
|
||||
the control parameters and a string describing the destination folder of the output files. After the function is
|
||||
executed the spectrum is updated with all the impairments of the link. The function also returns the linear and
|
||||
non linear OSNR, computed in the equivalent bandwidth.
|
||||
|
||||
:param spectrum: the spectrum dictionary containing the laser position (a list of boolean) and the list signals,
|
||||
which is a list of dictionaries (one for each channel) containing:
|
||||
'b_ch': the -3 dB bandwidth of the signal [THz]
|
||||
'roll_off': the roll off of the signal
|
||||
'p_ch': the signal power [W]
|
||||
'p_nli': the equivalent nli power [W]
|
||||
'p_ase': the ASE noise [W]
|
||||
:param link: the link structure. A list in which each element is a dictionary and it indicates one link component
|
||||
(PC, OA or fiber). List
|
||||
:param fibers: fibers is a dictionary containing a dictionary for each kind of fiber. Each dictionary has to report:
|
||||
reference_frequency: the frequency at which the parameters are evaluated [THz]
|
||||
alpha: the attenuation coefficient [dB/km]
|
||||
alpha_1st: the first derivative of alpha indicating the alpha slope [dB/km/THz]
|
||||
if you assume a flat attenuation with respect to the frequency you put it as zero
|
||||
beta_2: the dispersion coefficient [ps^2/km]
|
||||
n_2: second-order nonlinear refractive index [m^2/W]
|
||||
a typical value is 2.5E-20 m^2/W
|
||||
a_eff: the effective area of the fiber [um^2]
|
||||
:param sys_param: a dictionary containing the general system parameters:
|
||||
f0: the starting frequency of the laser grid used to describe the WDM system
|
||||
ns: the number of 6.25 GHz slots in the grid
|
||||
:param control_param: a dictionary containing the following parameters:
|
||||
save_each_comp: a boolean flag. If true, it saves in output folder one spectrum file at the output of each
|
||||
component, otherwise it saves just the last spectrum
|
||||
is_linear: a bool flag. If true, is doesn't compute NLI, if false, OLE will consider NLI
|
||||
is_analytic: a boolean flag. If true, the NLI is computed through the analytic formula, otherwise it uses
|
||||
the double integral. Warning: the double integral is very slow.
|
||||
points_not_interp: if the double integral is used, it indicates how much points are calculated, others will
|
||||
be interpolated
|
||||
kind_interp: a string indicating the interpolation method for the double integral
|
||||
th_fwm: the threshold of the four wave mixing efficiency for the double integral
|
||||
n_points: number of points in which the double integral is computed in the high FWM efficiency region
|
||||
n_points_min: number of points in which the double integral is computed in the low FWM efficiency region
|
||||
n_cores: number of cores for parallel computation [not yet implemented]
|
||||
:param output_path: the path in which the output files are saved. String
|
||||
:return: osnr_nli_db: an array containing the non-linear OSNR [dB], one value for each WDM channel. Array
|
||||
:return: osnr_lin_db: an array containing the linear OSNR [dB], one value for each WDM channel. Array
|
||||
"""
|
||||
|
||||
# Take control parameters
|
||||
flag_save_each_comp = control_param['save_each_comp']
|
||||
|
||||
# Evaluate frequency parameters
|
||||
f_cent, f_ch = get_frequencies_wdm(spectrum, sys_param)
|
||||
|
||||
# Evaluate spectrum parameters
|
||||
power, b_eq, roll_off, p_ase, p_nli, n_ch = get_spectrum_param(spectrum)
|
||||
|
||||
# Change reference to the central frequency f_cent for OA, PC and fibers
|
||||
change_component_ref(f_cent, link, fibers)
|
||||
|
||||
# Emulate the link
|
||||
for component in link:
|
||||
if component['comp_cat'] is 'PC':
|
||||
a_zero = component['loss']
|
||||
a_tilting = component['loss_tlt']
|
||||
|
||||
passive_component(spectrum, a_zero, a_tilting, f_ch)
|
||||
|
||||
elif component['comp_cat'] is 'OA':
|
||||
gain_zero = component['gain']
|
||||
gain_tilting = component['gain_tlt']
|
||||
noise_fig = component['noise_figure']
|
||||
|
||||
optical_amplifier(spectrum, gain_zero, gain_tilting, noise_fig, f_cent, f_ch, b_eq)
|
||||
|
||||
elif component['comp_cat'] is 'fiber':
|
||||
fiber_type = component['fiber_type']
|
||||
fiber_param = fibers[fiber_type]
|
||||
fiber_length = component['length']
|
||||
|
||||
fiber(spectrum, fiber_param, fiber_length, f_ch, b_eq, roll_off, control_param)
|
||||
|
||||
else:
|
||||
error_string = 'Error in link structure: the ' + component['comp_cat'] + ' category is unknown \n' \
|
||||
+ 'allowed values are (case sensitive): PC, OA and fiber'
|
||||
print(error_string)
|
||||
|
||||
if flag_save_each_comp:
|
||||
f_name = 'Output from component ID #' + component['comp_id']
|
||||
osnr_nli_db, osnr_lin_db = \
|
||||
compute_and_save_osnr(spectrum, flag_save=True, file_name=f_name, output_path=output_path)
|
||||
|
||||
osnr_nli_db, osnr_lin_db = \
|
||||
compute_and_save_osnr(spectrum, flag_save=True, file_name='link_output', output_path=output_path)
|
||||
|
||||
return osnr_nli_db, osnr_lin_db
|
||||
@@ -1,29 +0,0 @@
|
||||
# coding=utf-8
|
||||
""" spectrum_in.py describes the input spectrum of OLE, i.e. spectrum.
|
||||
spectrum is a dictionary containing two fields:
|
||||
laser_position: a list of bool indicating if a laser is turned on or not
|
||||
signals: a list of dictionaries each of them, describing one channel in the WDM comb
|
||||
|
||||
The laser_position is defined respect to a frequency grid of 6.25 GHz space and the first slot is at the
|
||||
frequency described by the variable f0 in the dictionary sys_param in the file "general_parameters.py"
|
||||
|
||||
Each dictionary element of the list 'signals' describes the profile of a WDM channel:
|
||||
b_ch: the -3 dB channel bandwidth (for a root raised cosine, it is equal to the symbol rate)
|
||||
roll_off: the roll off parameter of the root raised cosine shape
|
||||
p_ch: the channel power [W]
|
||||
p_nli: power of accumulated NLI in b_ch [W]
|
||||
p_ase: power of accumulated ASE noise in b_ch [W]
|
||||
"""
|
||||
|
||||
n_ch = 41
|
||||
|
||||
spectrum = {
|
||||
'laser_position': [0, 0, 0, 1, 0, 0, 0, 0] * n_ch,
|
||||
'signals': [{
|
||||
'b_ch': 0.032,
|
||||
'roll_off': 0.15,
|
||||
'p_ch': 1E-3,
|
||||
'p_nli': 0,
|
||||
'p_ase': 0
|
||||
} for _ in range(n_ch)]
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user