mirror of
https://github.com/Telecominfraproject/oopt-gnpy.git
synced 2025-10-30 17:47:50 +00:00
Compare commits
824 Commits
phase-1
...
experiment
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
049b077ee4 | ||
|
|
ab2080a805 | ||
|
|
8ab54e76df | ||
|
|
0465397b1d | ||
|
|
d3ec39d506 | ||
|
|
bfe68a5948 | ||
|
|
2ea3363613 | ||
|
|
89cce6e6a3 | ||
|
|
0f10ac706c | ||
|
|
66d26f0ffa | ||
|
|
3c96914482 | ||
|
|
61b1e73362 | ||
|
|
03435079cc | ||
|
|
6661907c1d | ||
|
|
fe811f725c | ||
|
|
f015c6abed | ||
|
|
8598e6591f | ||
|
|
5e2259062c | ||
|
|
d483802a86 | ||
|
|
9a0eece69c | ||
|
|
1657bfd05f | ||
|
|
49bf558916 | ||
|
|
99f44a597b | ||
|
|
71293c1c18 | ||
|
|
bd7c70f902 | ||
|
|
20c92d4338 | ||
|
|
f0158e7202 | ||
|
|
62408ddc98 | ||
|
|
b4f87b36db | ||
|
|
9f49a115a1 | ||
|
|
c7d2305589 | ||
|
|
5826a649de | ||
|
|
fa826391f6 | ||
|
|
3481ba8ee3 | ||
|
|
b4ab0b55de | ||
|
|
0370b45d8a | ||
|
|
468e689094 | ||
|
|
aafd82b16d | ||
|
|
60ee331153 | ||
|
|
3a8ce74355 | ||
|
|
fd44463238 | ||
|
|
a21f3fe6ee | ||
|
|
0ccbb2960c | ||
|
|
84ba2da553 | ||
|
|
e693d96ca1 | ||
|
|
81cb7f8133 | ||
|
|
3471969956 | ||
|
|
7a0985c362 | ||
|
|
b79a9e2e67 | ||
|
|
1e037fe6f5 | ||
|
|
0897be57c1 | ||
|
|
4172b06b19 | ||
|
|
32a4875e46 | ||
|
|
c577a75725 | ||
|
|
8827e0cf6f | ||
|
|
b0012fe399 | ||
|
|
31e634615b | ||
|
|
8300a55e39 | ||
|
|
5b939bc57a | ||
|
|
2f1ab9cc50 | ||
|
|
42ba3eb98d | ||
|
|
9eb87fc8e1 | ||
|
|
8fab9bb945 | ||
|
|
1ead232a78 | ||
|
|
b15c8c60ab | ||
|
|
66bdeb0e4d | ||
|
|
1a2e090104 | ||
|
|
a8e280e29b | ||
|
|
edb54b02ac | ||
|
|
83d3f32fe0 | ||
|
|
085a379592 | ||
|
|
37bd5d0404 | ||
|
|
f788b81d21 | ||
|
|
2ff1ce6b34 | ||
|
|
41a1e40d14 | ||
|
|
921e8d2d3c | ||
|
|
c009d28f7d | ||
|
|
898eada097 | ||
|
|
bdfc55e801 | ||
|
|
57f264bedb | ||
|
|
fbe4fa3cf0 | ||
|
|
b2ef345f35 | ||
|
|
471ea7dfba | ||
|
|
1b52f638ff | ||
|
|
84ab38a75f | ||
|
|
916e5377f8 | ||
|
|
534bfd881e | ||
|
|
7c4015324d | ||
|
|
8499ee52f4 | ||
|
|
cc1123863c | ||
|
|
ca382806f6 | ||
|
|
3559fc61c2 | ||
|
|
33581cdcc9 | ||
|
|
991eb02964 | ||
|
|
286e321a2d | ||
|
|
56f158113d | ||
|
|
024f6ff963 | ||
|
|
8118a0f4f4 | ||
|
|
eb89d8fd86 | ||
|
|
a938c1738b | ||
|
|
4f88882513 | ||
|
|
4e8d8b7ddd | ||
|
|
afb7d75749 | ||
|
|
488d0e1fe8 | ||
|
|
708442e4cd | ||
|
|
a2d905dfb1 | ||
|
|
d564fe3e2a | ||
|
|
ea21cce1c0 | ||
|
|
aa9b4aefbe | ||
|
|
8655030e59 | ||
|
|
8107ddeb79 | ||
|
|
76c8e55f06 | ||
|
|
a7b1ab47d8 | ||
|
|
879f587ab9 | ||
|
|
8af2d80219 | ||
|
|
315eea1f55 | ||
|
|
b61e541e15 | ||
|
|
81f88e78c7 | ||
|
|
87cc3dac00 | ||
|
|
89e28cc7be | ||
|
|
2ba29a78c5 | ||
|
|
f990a6c1be | ||
|
|
424e5a4786 | ||
|
|
6a7a04ebb1 | ||
|
|
0366fc2956 | ||
|
|
48b7d71f02 | ||
|
|
715baf2a1c | ||
|
|
e55cea776e | ||
|
|
b388d143fd | ||
|
|
c592c572d8 | ||
|
|
dfa0a26a28 | ||
|
|
609cd94798 | ||
|
|
022f743db1 | ||
|
|
1957beb1b6 | ||
|
|
9ca72d6105 | ||
|
|
e8e126a6ce | ||
|
|
7849782173 | ||
|
|
149a0da8c9 | ||
|
|
1e7c70a59b | ||
|
|
c9d8282e7f | ||
|
|
e7084a2c29 | ||
|
|
d79d2e0724 | ||
|
|
402155c225 | ||
|
|
e5ec669419 | ||
|
|
8f424e8c9d | ||
|
|
fea2b84bb9 | ||
|
|
0c918940c4 | ||
|
|
a63a6ac0ec | ||
|
|
9f58b914d2 | ||
|
|
029bac4b03 | ||
|
|
a27ad57220 | ||
|
|
8d31d924f2 | ||
|
|
8c3b514f90 | ||
|
|
3df27fe315 | ||
|
|
a6087ce354 | ||
|
|
aae0382523 | ||
|
|
b0c2acb1b5 | ||
|
|
a52c96ae2e | ||
|
|
bf28821b5b | ||
|
|
328bd6ea71 | ||
|
|
ec9eb8d054 | ||
|
|
f8c8526045 | ||
|
|
d8c236bb44 | ||
|
|
33ff0910b8 | ||
|
|
faa69917d9 | ||
|
|
d9f5ca9827 | ||
|
|
c817ef7335 | ||
|
|
07de489d6b | ||
|
|
acafc78456 | ||
|
|
325721545e | ||
|
|
dbe2bf560c | ||
|
|
7872cc2203 | ||
|
|
25b4d0e755 | ||
|
|
9af1c90664 | ||
|
|
6b4d44a3f1 | ||
|
|
2faf8d2cdd | ||
|
|
676c94ddf2 | ||
|
|
6f93b64f84 | ||
|
|
54bf426472 | ||
|
|
1862ce9104 | ||
|
|
3771c13d32 | ||
|
|
f1d0230dad | ||
|
|
182929cc96 | ||
|
|
81585c5a86 | ||
|
|
2f52c11589 | ||
|
|
0f4d8573cf | ||
|
|
660b8b3c6e | ||
|
|
71d6a1138c | ||
|
|
a6e741d8fe | ||
|
|
58bcf65cf6 | ||
|
|
27ce55de38 | ||
|
|
36ca22db9b | ||
|
|
33a8de9b39 | ||
|
|
22b76e36db | ||
|
|
528ff31590 | ||
|
|
4d6966cbd3 | ||
|
|
9c9e3be967 | ||
|
|
2dd4745ef7 | ||
|
|
4e786a32b5 | ||
|
|
6ecb2c85e2 | ||
|
|
cd234a909b | ||
|
|
c249f44ea1 | ||
|
|
ed1f51393a | ||
|
|
8bd43130ab | ||
|
|
6c975a53a1 | ||
|
|
8a1001cd40 | ||
|
|
beb2b576aa | ||
|
|
8f3923046b | ||
|
|
88c68d2065 | ||
|
|
8bcde72a10 | ||
|
|
4653dbcf4b | ||
|
|
cde08b32a4 | ||
|
|
2eed891f8d | ||
|
|
c0b84e84c8 | ||
|
|
2c20fd3f9f | ||
|
|
f4db56ca29 | ||
|
|
5b6d58ac7d | ||
|
|
ecb8bd9fbe | ||
|
|
2f9385451f | ||
|
|
1a1346461b | ||
|
|
27dcd29074 | ||
|
|
93986f36c3 | ||
|
|
a6ab8055b1 | ||
|
|
31ea479d7f | ||
|
|
89fb2e047b | ||
|
|
8f705e6173 | ||
|
|
8f735316f5 | ||
|
|
0d7a1871a1 | ||
|
|
33832b3d25 | ||
|
|
4da7f0cc38 | ||
|
|
e29f8485ea | ||
|
|
2da344a563 | ||
|
|
2a0cb8e14f | ||
|
|
e1dc3dc357 | ||
|
|
8259124f73 | ||
|
|
0422956ac6 | ||
|
|
ff82c5171b | ||
|
|
f9bd6310f1 | ||
|
|
471eab126e | ||
|
|
6ad011d12d | ||
|
|
561c8aff85 | ||
|
|
5cf5dd2234 | ||
|
|
fb9915d301 | ||
|
|
7ab67194d6 | ||
|
|
603ac9d8c5 | ||
|
|
a3c7811e9d | ||
|
|
a3778dfe8b | ||
|
|
2dff934612 | ||
|
|
89d666948e | ||
|
|
c3499142b0 | ||
|
|
d8feccc715 | ||
|
|
16173355f3 | ||
|
|
f46134fda5 | ||
|
|
bfecff0412 | ||
|
|
168f1891cf | ||
|
|
862845b4ac | ||
|
|
b7a5dbff49 | ||
|
|
5be30d89a7 | ||
|
|
d94dc51d88 | ||
|
|
22acd88d44 | ||
|
|
fd406c106b | ||
|
|
16134b5caf | ||
|
|
2c485efced | ||
|
|
279d08a0e8 | ||
|
|
1d4a8998e1 | ||
|
|
47a41e7980 | ||
|
|
ecfc4a8cb2 | ||
|
|
2d66b6266b | ||
|
|
b7afb5f9d2 | ||
|
|
58c16a59ac | ||
|
|
f09789f5ef | ||
|
|
b2e12cd3e0 | ||
|
|
71b157a8ba | ||
|
|
cb8affe9b2 | ||
|
|
3f7180c706 | ||
|
|
f0bc2dc62f | ||
|
|
9c95fd6b69 | ||
|
|
c0fda8c3a2 | ||
|
|
bac20af381 | ||
|
|
626211a320 | ||
|
|
783aaa8cb4 | ||
|
|
768bd8af19 | ||
|
|
3894f52194 | ||
|
|
dcfa9edb1c | ||
|
|
4ebdb5629c | ||
|
|
75b0668fc2 | ||
|
|
5fe94ed463 | ||
|
|
db21b97603 | ||
|
|
8074d0c548 | ||
|
|
2d611afbb0 | ||
|
|
bc42507724 | ||
|
|
ff82ab5718 | ||
|
|
62fe374e15 | ||
|
|
e519a3bc39 | ||
|
|
4f146d12ee | ||
|
|
46d6074ad5 | ||
|
|
cbb61f1240 | ||
|
|
0e9f3c3576 | ||
|
|
92f11dc075 | ||
|
|
3aa0a0999b | ||
|
|
e86fbcfa5b | ||
|
|
0b2ee6fdaf | ||
|
|
35f3866882 | ||
|
|
d86bea80d3 | ||
|
|
13b4b5072f | ||
|
|
efae43f122 | ||
|
|
45e8c8692b | ||
|
|
f8fc2a5050 | ||
|
|
3c20d57cc4 | ||
|
|
2cb3858330 | ||
|
|
925d36a561 | ||
|
|
0ffaca91cc | ||
|
|
d3a0f1d969 | ||
|
|
37704db583 | ||
|
|
aadd038bbe | ||
|
|
6d601b4267 | ||
|
|
194798d881 | ||
|
|
1a10495645 | ||
|
|
0e2316513e | ||
|
|
72ce4e2fad | ||
|
|
4ad7311e18 | ||
|
|
fa2b0e8fad | ||
|
|
78eb926693 | ||
|
|
3613efbaab | ||
|
|
2e732854b3 | ||
|
|
f9560d6b1d | ||
|
|
51b0826398 | ||
|
|
af0adb454d | ||
|
|
cdd4c571b0 | ||
|
|
9c440764c7 | ||
|
|
6e94834033 | ||
|
|
1720ed23c9 | ||
|
|
137fab1d92 | ||
|
|
0fee63fa81 | ||
|
|
d5f0d80eed | ||
|
|
b7d4d43f56 | ||
|
|
c0379a1981 | ||
|
|
e5db8e42d1 | ||
|
|
27cf9806f0 | ||
|
|
5dbb5cd112 | ||
|
|
af75569eb8 | ||
|
|
aebf2ff270 | ||
|
|
3bcdeda3e9 | ||
|
|
7433667243 | ||
|
|
d7c009167f | ||
|
|
79f198d6fe | ||
|
|
315a12b9df | ||
|
|
e14d145f2c | ||
|
|
f839da39f0 | ||
|
|
45ca7a63ed | ||
|
|
5d187255ae | ||
|
|
7adf6aed59 | ||
|
|
b22a7a0234 | ||
|
|
8805723114 | ||
|
|
88db4358f5 | ||
|
|
a5d9685caf | ||
|
|
94ff8e6beb | ||
|
|
f3400d9bc1 | ||
|
|
6a6591e41d | ||
|
|
a3a53f3b06 | ||
|
|
2f39abfdb8 | ||
|
|
92239d66fc | ||
|
|
178813806f | ||
|
|
265dbffc53 | ||
|
|
a67a08a4d0 | ||
|
|
6d15f55304 | ||
|
|
0dbcd1f265 | ||
|
|
a0f6380f90 | ||
|
|
ab69cf5bf4 | ||
|
|
3e55a5526a | ||
|
|
b6216bb701 | ||
|
|
fa3ea3aaa7 | ||
|
|
7b56e3a6c3 | ||
|
|
00ad542a11 | ||
|
|
c3e00eea2c | ||
|
|
5c8dd911e3 | ||
|
|
407fd62da5 | ||
|
|
c92f7ca0d8 | ||
|
|
676901e113 | ||
|
|
c099b53a03 | ||
|
|
fd065e4e7c | ||
|
|
fd97527561 | ||
|
|
7b9647a063 | ||
|
|
bf943f1347 | ||
|
|
dbff610d77 | ||
|
|
46aae9486e | ||
|
|
70066de390 | ||
|
|
106af9d444 | ||
|
|
f0be267f9f | ||
|
|
edbaec7265 | ||
|
|
f4537a538b | ||
|
|
d6eb6f33d2 | ||
|
|
7a139c261a | ||
|
|
2e7aa213ed | ||
|
|
d9344287e4 | ||
|
|
b9768a81e9 | ||
|
|
3418c07512 | ||
|
|
5f8621c224 | ||
|
|
c05f3555a3 | ||
|
|
f21827395b | ||
|
|
894c7bb17a | ||
|
|
2028cfae4d | ||
|
|
2064f65c04 | ||
|
|
1de4a4eaa2 | ||
|
|
06ff45d0c2 | ||
|
|
81474e252e | ||
|
|
0069265905 | ||
|
|
dec15f6797 | ||
|
|
f6041cd844 | ||
|
|
ec20d3981b | ||
|
|
08a867ef5a | ||
|
|
76c8296a5d | ||
|
|
f65059dd7f | ||
|
|
e23fef3f64 | ||
|
|
30234f913c | ||
|
|
2dd017bddc | ||
|
|
b25a298087 | ||
|
|
8635a7c182 | ||
|
|
46d25df241 | ||
|
|
0338ccb08f | ||
|
|
8899a575b8 | ||
|
|
b4407b1ff3 | ||
|
|
75f0aebe8f | ||
|
|
5a2dd53636 | ||
|
|
3555154c3e | ||
|
|
6c92c282e7 | ||
|
|
bd1847e5ba | ||
|
|
38727d6203 | ||
|
|
7e6d557d01 | ||
|
|
5d3ce91839 | ||
|
|
1f34e3005e | ||
|
|
8e27437086 | ||
|
|
b4cbe8029e | ||
|
|
92f3fd2063 | ||
|
|
1112b331ef | ||
|
|
ec34e84a3a | ||
|
|
bc9eee326a | ||
|
|
c9106c3a6f | ||
|
|
fa949f977a | ||
|
|
4c2d61bb9b | ||
|
|
ec7b14da8c | ||
|
|
771af4991c | ||
|
|
a08ce9ecb7 | ||
|
|
4d84a4f528 | ||
|
|
5d92baf35e | ||
|
|
ac5171e95e | ||
|
|
697ac311fe | ||
|
|
c22d1173af | ||
|
|
c0cc5fa9fd | ||
|
|
4bd9a9cdda | ||
|
|
63f8139dbc | ||
|
|
be731a5977 | ||
|
|
dd4ce4cea4 | ||
|
|
2548a2eee8 | ||
|
|
03948d6785 | ||
|
|
4c1c17eea6 | ||
|
|
b258d22d25 | ||
|
|
aef43e6bca | ||
|
|
3d7362743d | ||
|
|
96f3d5a805 | ||
|
|
2df500e027 | ||
|
|
346f24022a | ||
|
|
cb45c7ef16 | ||
|
|
9cfb57dc4b | ||
|
|
020d852758 | ||
|
|
8d97fcd735 | ||
|
|
097fe3114e | ||
|
|
5e0fd265ff | ||
|
|
6af137a085 | ||
|
|
3cdc8511a8 | ||
|
|
2d515eea4c | ||
|
|
61289119cb | ||
|
|
b6bc995e40 | ||
|
|
b0bb41bac6 | ||
|
|
0927a92652 | ||
|
|
f51061d650 | ||
|
|
74314f00ca | ||
|
|
81c5ef4a23 | ||
|
|
72e329b08e | ||
|
|
50f884663f | ||
|
|
0d81eb4b29 | ||
|
|
978a9407fa | ||
|
|
2c3b74cdc1 | ||
|
|
448e0f54be | ||
|
|
17f638e991 | ||
|
|
b78d3d8eda | ||
|
|
02a7e467e2 | ||
|
|
15304890f5 | ||
|
|
9ea96e431c | ||
|
|
7b3bfea614 | ||
|
|
d68637c2c8 | ||
|
|
f009306030 | ||
|
|
ca97cba18b | ||
|
|
a46c8c5398 | ||
|
|
88c2e2bd70 | ||
|
|
1bbcee8715 | ||
|
|
39a8fa3335 | ||
|
|
fe067e5367 | ||
|
|
5efbd17829 | ||
|
|
fa3e54a747 | ||
|
|
603beccb01 | ||
|
|
4b20afd599 | ||
|
|
adbe283c83 | ||
|
|
1908d7e29a | ||
|
|
7c6e16cfbc | ||
|
|
1480d23088 | ||
|
|
4be3522209 | ||
|
|
5381e0300f | ||
|
|
cde822ebf8 | ||
|
|
72d3525da1 | ||
|
|
dc867fa051 | ||
|
|
eaf3fcade8 | ||
|
|
3df270e4ac | ||
|
|
7937392dfc | ||
|
|
9c1c0f8d1f | ||
|
|
ad2ab0d164 | ||
|
|
c4bed94eb0 | ||
|
|
edc8eb55de | ||
|
|
f4f9868381 | ||
|
|
f103bebe05 | ||
|
|
c168af46bc | ||
|
|
7d82248903 | ||
|
|
e6cb269754 | ||
|
|
ac8a96398a | ||
|
|
86c79c7c60 | ||
|
|
0c47b3f3ea | ||
|
|
b2b500c5dc | ||
|
|
efc8468268 | ||
|
|
205baebd48 | ||
|
|
af9ba2750d | ||
|
|
e04afdbe4c | ||
|
|
e94fd9590e | ||
|
|
4f4f05abdf | ||
|
|
bcf93e1d9f | ||
|
|
48198bdd89 | ||
|
|
fbb4f3e5dd | ||
|
|
cefd1cf030 | ||
|
|
f8fa544e31 | ||
|
|
27885a4cbc | ||
|
|
185a62958f | ||
|
|
1ba748f2a4 | ||
|
|
90a75a9b3d | ||
|
|
215295efb1 | ||
|
|
2413bd9e0d | ||
|
|
356ae650fd | ||
|
|
2444c24545 | ||
|
|
7727708a3a | ||
|
|
0bfacd84f4 | ||
|
|
b271c1ca3c | ||
|
|
75660febc1 | ||
|
|
13aaa174e1 | ||
|
|
d112c728fc | ||
|
|
826af4a9fd | ||
|
|
c9693d355f | ||
|
|
9f37cb8ce6 | ||
|
|
d99e8ca565 | ||
|
|
44312125ab | ||
|
|
7558721642 | ||
|
|
fb49f7fb5d | ||
|
|
ee7f2c2f47 | ||
|
|
833fe006af | ||
|
|
9be0607b2e | ||
|
|
6dc3f2ffa6 | ||
|
|
df7cbf0b76 | ||
|
|
19b6378b1a | ||
|
|
29cb2b50a8 | ||
|
|
5932c014a0 | ||
|
|
a3f75e9af0 | ||
|
|
9ed31e2c4e | ||
|
|
8599f63fbf | ||
|
|
07de78cb05 | ||
|
|
f764bbe080 | ||
|
|
7aa343b767 | ||
|
|
69198779a7 | ||
|
|
3088032ad8 | ||
|
|
dfaa12598d | ||
|
|
1c724cdc6c | ||
|
|
b59423fb01 | ||
|
|
96bceed102 | ||
|
|
f8e146b9b9 | ||
|
|
c54ddb644a | ||
|
|
2be616411f | ||
|
|
7415744807 | ||
|
|
23905a90f4 | ||
|
|
83444b329e | ||
|
|
9898dc85a9 | ||
|
|
661287f600 | ||
|
|
353c6a77e9 | ||
|
|
8c006fec3f | ||
|
|
5896b7ce6a | ||
|
|
f831f6cb2c | ||
|
|
b505a1ae01 | ||
|
|
a0fac17c0e | ||
|
|
851f606fc0 | ||
|
|
8940430ee0 | ||
|
|
06d3927275 | ||
|
|
7cea13e90d | ||
|
|
de2a504078 | ||
|
|
df14b441a3 | ||
|
|
ab1391440f | ||
|
|
310d32dcea | ||
|
|
f8cd822c92 | ||
|
|
3ade885e41 | ||
|
|
008a88192c | ||
|
|
639e7f012c | ||
|
|
c8ecc16648 | ||
|
|
225fb1ec0c | ||
|
|
c943e0e9b4 | ||
|
|
3d8ac83fcc | ||
|
|
06399ca8af | ||
|
|
8f8fc13ded | ||
|
|
2b018cb9a5 | ||
|
|
9577f4c9a3 | ||
|
|
771d98cc10 | ||
|
|
91e875d54c | ||
|
|
da39f1489f | ||
|
|
2940576681 | ||
|
|
a49c137b78 | ||
|
|
87e748cd83 | ||
|
|
94c2e332bb | ||
|
|
1437b6010e | ||
|
|
5fc203482d | ||
|
|
ff6d81b749 | ||
|
|
167e644bd0 | ||
|
|
d1c7489768 | ||
|
|
a783e165dd | ||
|
|
45bdd82864 | ||
|
|
79c5cb6b78 | ||
|
|
63ade5fdef | ||
|
|
853b8c7aa3 | ||
|
|
0655fb60de | ||
|
|
69b28e3508 | ||
|
|
5c16d9539f | ||
|
|
e3acf02bde | ||
|
|
10268e82d4 | ||
|
|
bc44bf726a | ||
|
|
9839681bc0 | ||
|
|
beb292cb07 | ||
|
|
29c4134f60 | ||
|
|
a6157f328d | ||
|
|
c6432e2c28 | ||
|
|
4a756bf2a9 | ||
|
|
b77cc5cd40 | ||
|
|
9f94af6ff7 | ||
|
|
7aba40cd5b | ||
|
|
fb6c17c5ff | ||
|
|
4504d80c80 | ||
|
|
8a8c8989cb | ||
|
|
20731dbe4b | ||
|
|
55393ca9eb | ||
|
|
e9aa4d5601 | ||
|
|
6d49769df9 | ||
|
|
0333e9d094 | ||
|
|
53bedca50a | ||
|
|
b9518ca987 | ||
|
|
794e713d6d | ||
|
|
5e1fd7501e | ||
|
|
c86ea206d9 | ||
|
|
b810cf84c2 | ||
|
|
15bc5db2ed | ||
|
|
4845d9005e | ||
|
|
bed4e9f1e1 | ||
|
|
7b20db10cc | ||
|
|
d59b3e8c46 | ||
|
|
8fccbb0ac2 | ||
|
|
f36a610e52 | ||
|
|
479a2f358e | ||
|
|
8795c357ae | ||
|
|
d8cb7526bb | ||
|
|
6ead8e391b | ||
|
|
362f45083d | ||
|
|
36218037ec | ||
|
|
584b56bc83 | ||
|
|
d33602e131 | ||
|
|
4304b49bf0 | ||
|
|
4f5325acac | ||
|
|
f8a40bfaf0 | ||
|
|
52dfb20a2b | ||
|
|
34b20cdfe0 | ||
|
|
f462201499 | ||
|
|
5106bdf634 | ||
|
|
b93c6dbcbd | ||
|
|
2ca141baba | ||
|
|
01fe5d2147 | ||
|
|
74830cede4 | ||
|
|
2f21cc29f7 | ||
|
|
05f8d97d68 | ||
|
|
04c4795192 | ||
|
|
ff6e379b6f | ||
|
|
25cfc375bc | ||
|
|
627184ef2d | ||
|
|
180e1178ef | ||
|
|
1c33454ce3 | ||
|
|
c7abe11dd8 | ||
|
|
7a6feccc8b | ||
|
|
f05e578f51 | ||
|
|
277ebea2b5 | ||
|
|
4982c5e147 | ||
|
|
726e217205 | ||
|
|
695c538ee5 | ||
|
|
e9b287ceac | ||
|
|
ecdae68b62 | ||
|
|
e737564b32 | ||
|
|
75c4a8d96d | ||
|
|
28dc1b050f | ||
|
|
4a9a7e31ba | ||
|
|
92da31f905 | ||
|
|
afd264c816 | ||
|
|
d676e3e217 | ||
|
|
4af117aacf | ||
|
|
b27e567a5e | ||
|
|
8ec9a5bf99 | ||
|
|
f129f92cfd | ||
|
|
ef87373d4b | ||
|
|
ff938d4ced | ||
|
|
ae555e0ce1 | ||
|
|
44db6093f5 | ||
|
|
d4bf6ce201 | ||
|
|
0b1c7bfa30 | ||
|
|
80f8a345b5 | ||
|
|
c929ce0257 | ||
|
|
7d3ab357d0 | ||
|
|
84d87602b8 | ||
|
|
14938ea7dd | ||
|
|
be0c052e5e | ||
|
|
af90a6658e | ||
|
|
9720c979b3 | ||
|
|
7c86186416 | ||
|
|
404dbfe725 | ||
|
|
02eac208dd | ||
|
|
bc9a0432cd | ||
|
|
dba7846d6c | ||
|
|
238d7723bb | ||
|
|
d2724bb1a5 | ||
|
|
8e046a9b50 | ||
|
|
1b808afd5d | ||
|
|
8d6f69eb05 | ||
|
|
c4562df955 | ||
|
|
8e1e8a8be3 | ||
|
|
363e92e072 | ||
|
|
c5a52fdb6d | ||
|
|
2dd096cfab | ||
|
|
80e1ce12ce | ||
|
|
2777d957e4 | ||
|
|
898aa4f41a | ||
|
|
c9ece6ad7c | ||
|
|
a6265c1b8d | ||
|
|
5646714c13 | ||
|
|
8ab13537a9 | ||
|
|
25e14e4846 | ||
|
|
55d67f890d | ||
|
|
9b3d7614f5 | ||
|
|
a3273d24b5 | ||
|
|
ffd7bec485 | ||
|
|
0aef76407d | ||
|
|
f2ad236863 | ||
|
|
487237638e | ||
|
|
3a78ccafce | ||
|
|
e99b9c286c | ||
|
|
8c3ffdfd4e | ||
|
|
a4c7395cfe | ||
|
|
398124e841 | ||
|
|
07cc3fc079 | ||
|
|
27bae162d6 | ||
|
|
d4392e5a7a | ||
|
|
a0aab0918a | ||
|
|
2461385a94 | ||
|
|
ba06a0e104 | ||
|
|
48f9c448e4 | ||
|
|
2e0d01bc0b | ||
|
|
0d3a86f1d8 | ||
|
|
a30cd0f721 | ||
|
|
1491c2361f | ||
|
|
ca7b993d95 | ||
|
|
8c75c4f9d7 | ||
|
|
b730b1a4f4 | ||
|
|
a7d3c00e5b | ||
|
|
9146290ecd | ||
|
|
3fae7210d8 | ||
|
|
002f1dfa36 | ||
|
|
b3c1e6af95 | ||
|
|
58ac717f8d | ||
|
|
f193fb261a | ||
|
|
55d8d23b25 | ||
|
|
ca642e3dfc | ||
|
|
bfb7b466eb | ||
|
|
6467cb5819 | ||
|
|
ba215d8a07 | ||
|
|
4dc0913825 | ||
|
|
fedebc7038 | ||
|
|
b42715f003 | ||
|
|
48ae4252db | ||
|
|
0b4dd58c2a | ||
|
|
81b863122d | ||
|
|
732749d459 | ||
|
|
014e5fd966 | ||
|
|
dc7a459697 | ||
|
|
7f378e5479 | ||
|
|
d91e279294 | ||
|
|
c5abc4109f | ||
|
|
c8ce640a2a | ||
|
|
029877604e | ||
|
|
7ee1ad2a92 | ||
|
|
24f3e135ad | ||
|
|
a5718911c5 | ||
|
|
660d9c8c49 | ||
|
|
2ec50c4e07 | ||
|
|
32b4eda3f9 | ||
|
|
d5f5ee5595 | ||
|
|
f0545c57a8 | ||
|
|
815f4d2810 | ||
|
|
bd474151ab | ||
|
|
2c585faef6 | ||
|
|
b15be2cf0d | ||
|
|
3cc98ae388 | ||
|
|
09d1dbf927 | ||
|
|
cda7f5d50b | ||
|
|
1047bbc37c | ||
|
|
b1cb759164 | ||
|
|
2640912baa | ||
|
|
8676daed3a | ||
|
|
db31986d28 | ||
|
|
7823eca871 | ||
|
|
572d7d6999 |
1
.codecov.yml
Normal file
1
.codecov.yml
Normal file
@@ -0,0 +1 @@
|
||||
comment: off
|
||||
3
.docker-entry.sh
Executable file
3
.docker-entry.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
cp -nr /oopt-gnpy/examples /shared
|
||||
exec "$@"
|
||||
52
.docker-travis.sh
Executable file
52
.docker-travis.sh
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
IMAGE_NAME=telecominfraproject/oopt-gnpy
|
||||
IMAGE_TAG=$(git describe --tags)
|
||||
|
||||
if [[ "${TRAVIS_BRANCH}" == "experimental/2019-summit" ]]; then
|
||||
IMAGE_NAME=telecominfraproject/oopt-gnpy-experimental
|
||||
fi
|
||||
|
||||
ALREADY_FOUND=0
|
||||
docker pull ${IMAGE_NAME}:${IMAGE_TAG} && ALREADY_FOUND=1
|
||||
|
||||
if [[ $ALREADY_FOUND == 0 ]]; then
|
||||
docker build . -t ${IMAGE_NAME}
|
||||
docker tag ${IMAGE_NAME} ${IMAGE_NAME}:${IMAGE_TAG}
|
||||
else
|
||||
echo "Image ${IMAGE_NAME}:${IMAGE_TAG} already available, will just update the other tags"
|
||||
fi
|
||||
|
||||
docker images
|
||||
|
||||
do_docker_login() {
|
||||
echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin
|
||||
}
|
||||
|
||||
if [[ "${TRAVIS_PULL_REQUEST}" == "false" ]]; then
|
||||
if [[ "${TRAVIS_BRANCH}" == "develop" || "${TRAVIS_BRANCH}" == "docker" ]]; then
|
||||
echo "Publishing latest"
|
||||
docker tag ${IMAGE_NAME}:${IMAGE_TAG} ${IMAGE_NAME}:latest
|
||||
do_docker_login
|
||||
if [[ $ALREADY_FOUND == 0 ]]; then
|
||||
docker push ${IMAGE_NAME}:${IMAGE_TAG}
|
||||
fi
|
||||
docker push ${IMAGE_NAME}:latest
|
||||
elif [[ "${TRAVIS_BRANCH}" == "master" ]]; then
|
||||
echo "Publishing stable"
|
||||
docker tag ${IMAGE_NAME}:${IMAGE_TAG} ${IMAGE_NAME}:stable
|
||||
do_docker_login
|
||||
if [[ $ALREADY_FOUND == 0 ]]; then
|
||||
docker push ${IMAGE_NAME}:${IMAGE_TAG}
|
||||
fi
|
||||
docker push ${IMAGE_NAME}:stable
|
||||
elif [[ "${TRAVIS_BRANCH}" == "experimental/2019-summit" ]]; then
|
||||
echo "Publishing ad-hoc image for the TIP Summit demo"
|
||||
do_docker_login
|
||||
if [[ $ALREADY_FOUND == 0 ]]; then
|
||||
docker push ${IMAGE_NAME}:${IMAGE_TAG}
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -1,21 +0,0 @@
|
||||
# http://editorconfig.org
|
||||
|
||||
root = true
|
||||
|
||||
[*]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
|
||||
[*.bat]
|
||||
indent_style = tab
|
||||
end_of_line = crlf
|
||||
|
||||
[LICENSE]
|
||||
insert_final_newline = false
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
15
.github/ISSUE_TEMPLATE.md
vendored
15
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,15 +0,0 @@
|
||||
* gnpy version:
|
||||
* Python version:
|
||||
* Operating System:
|
||||
|
||||
### Description
|
||||
|
||||
Describe what you were trying to get done.
|
||||
Tell us what happened, what went wrong, and what you expected to happen.
|
||||
|
||||
### What I Did
|
||||
|
||||
```
|
||||
Paste the command(s) you ran and the output.
|
||||
If there was a crash, please include the traceback here.
|
||||
```
|
||||
29
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
29
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Environment:**
|
||||
- OS: [e.g. Windows]
|
||||
- Python Version [e.g, 3.7]
|
||||
- Anaconda Version [e.g. 3.7]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -2,6 +2,7 @@
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
.ipynb_checkpoints
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
@@ -60,3 +61,6 @@ target/
|
||||
|
||||
# pyenv python configuration file
|
||||
.python-version
|
||||
|
||||
# MacOS DS_store
|
||||
.DS_Store
|
||||
|
||||
4
.readthedocs.yml
Normal file
4
.readthedocs.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
build:
|
||||
image: latest
|
||||
python:
|
||||
version: 3.6
|
||||
52
.travis.yml
52
.travis.yml
@@ -1,29 +1,27 @@
|
||||
# Config file for automatic testing at travis-ci.org
|
||||
# This file will be regenerated if you run travis_pypi_setup.py
|
||||
|
||||
dist: xenial
|
||||
sudo: false
|
||||
language: python
|
||||
services: docker
|
||||
python:
|
||||
- 3.5
|
||||
- 3.4
|
||||
- 3.3
|
||||
- 2.7
|
||||
- 2.6
|
||||
|
||||
# command to install dependencies, e.g. pip install -r requirements.txt --use-mirrors
|
||||
install: pip install -U tox-travis
|
||||
|
||||
# command to run tests, e.g. python setup.py test
|
||||
script: tox
|
||||
|
||||
# After you create the Github repo and add it to Travis, run the
|
||||
# travis_pypi_setup.py script to finish PyPI deployment setup
|
||||
deploy:
|
||||
provider: pypi
|
||||
distributions: sdist bdist_wheel
|
||||
user: <TBD>
|
||||
password:
|
||||
secure: PLEASE_REPLACE_ME
|
||||
on:
|
||||
tags: true
|
||||
repo: <TBD>/gnpy
|
||||
python: 2.7
|
||||
- "3.6"
|
||||
- "3.7"
|
||||
install: skip
|
||||
script:
|
||||
- python setup.py install
|
||||
- pip install pytest-cov rstcheck
|
||||
- pytest --cov-report=xml --cov=gnpy
|
||||
- rstcheck --ignore-roles cite --ignore-directives automodule --recursive --ignore-messages '(Duplicate explicit target name.*)' .
|
||||
- ./examples/transmission_main_example.py
|
||||
- ./examples/path_requests_run.py
|
||||
- ./examples/transmission_main_example.py examples/raman_edfa_example_network.json --sim examples/sim_params.json --show-channels
|
||||
- sphinx-build docs/ x-throwaway-location
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
jobs:
|
||||
include:
|
||||
- stage: test
|
||||
name: Docker image
|
||||
script:
|
||||
- git fetch --unshallow
|
||||
- ./.docker-travis.sh
|
||||
- docker images
|
||||
|
||||
8
.zuul.yaml
Normal file
8
.zuul.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- project:
|
||||
check:
|
||||
jobs:
|
||||
- noop
|
||||
gate:
|
||||
jobs:
|
||||
- noop
|
||||
36
AUTHORS.rst
36
AUTHORS.rst
@@ -1,13 +1,29 @@
|
||||
=======
|
||||
Credits
|
||||
=======
|
||||
gnpy is written and maintained by the Telecom Infra Project with work
|
||||
contributed by the following TIP members.
|
||||
|
||||
Development Lead
|
||||
----------------
|
||||
To learn how to contribute, please see CONTRIBUTING.md
|
||||
|
||||
* <TBD> <<TBD>@<TBD>.com>
|
||||
(*in alphabetical order*)
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
None yet. Why not be the first?
|
||||
- Alessio Ferrari (Politecnico di Torino) <alessio.ferrari@polito.it>
|
||||
- Anders Lindgren (Telia Company) <Anders.X.Lindgren@teliacompany.com>
|
||||
- Andrea d'Amico (Politecnico di Torino) <andrea.damico@polito.it>
|
||||
- Brian Taylor (Facebook) <briantaylor@fb.com>
|
||||
- David Boertjes (Ciena) <dboertje@ciena.com>
|
||||
- Diego Landa (Facebook) <dlanda@fb.com>
|
||||
- Esther Le Rouzic (Orange) <esther.lerouzic@orange.com>
|
||||
- Gabriele Galimberti (Cisco) <ggalimbe@cisco.com>
|
||||
- Gert Grammel (Juniper Networks) <ggrammel@juniper.net>
|
||||
- Gilad Goldfarb (Facebook) <giladg@fb.com>
|
||||
- James Powell (Telecom Infra Project) <james.powell@telecominfraproject.com>
|
||||
- Jan Kundrát (Telecom Infra Project) <jan.kundrat@telecominfraproject.com>
|
||||
- Jeanluc Augé (Orange) <jeanluc.auge@orange.com>
|
||||
- Jonas Mårtensson (RISE) <jonas.martensson@ri.se>
|
||||
- Mattia Cantono (Politecnico di Torino) <mattia.cantono@polito.it>
|
||||
- Miguel Garrich (University Catalunya) <miquel.garrich@upct.es>
|
||||
- Raj Nagarajan (Lumentum) <raj.nagarajan@lumentum.com>
|
||||
- Roberts Miculens (Lattelecom) <roberts.miculens@lattelecom.lv>
|
||||
- Shengxiang Zhu (University of Arizona) <szhu@email.arizona.edu>
|
||||
- Stefan Melin (Telia Company) <Stefan.Melin@teliacompany.com>
|
||||
- Vittorio Curri (Politecnico di Torino) <vittorio.curri@polito.it>
|
||||
- Xufeng Liu (Jabil) <xufeng_liu@jabil.com>
|
||||
|
||||
114
CONTRIBUTING.rst
114
CONTRIBUTING.rst
@@ -1,114 +0,0 @@
|
||||
.. highlight:: shell
|
||||
|
||||
============
|
||||
Contributing
|
||||
============
|
||||
|
||||
Contributions are welcome, and they are greatly appreciated! Every
|
||||
little bit helps, and credit will always be given.
|
||||
|
||||
You can contribute in many ways:
|
||||
|
||||
Types of Contributions
|
||||
----------------------
|
||||
|
||||
Report Bugs
|
||||
~~~~~~~~~~~
|
||||
|
||||
Report bugs at https://github.com/<TBD>/gnpy/issues.
|
||||
|
||||
If you are reporting a bug, please include:
|
||||
|
||||
* Your operating system name and version.
|
||||
* Any details about your local setup that might be helpful in troubleshooting.
|
||||
* Detailed steps to reproduce the bug.
|
||||
|
||||
Fix Bugs
|
||||
~~~~~~~~
|
||||
|
||||
Look through the GitHub issues for bugs. Anything tagged with "bug"
|
||||
and "help wanted" is open to whoever wants to implement it.
|
||||
|
||||
Implement Features
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Look through the GitHub issues for features. Anything tagged with "enhancement"
|
||||
and "help wanted" is open to whoever wants to implement it.
|
||||
|
||||
Write Documentation
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
gnpy could always use more documentation, whether as part of the
|
||||
official gnpy docs, in docstrings, or even on the web in blog posts,
|
||||
articles, and such.
|
||||
|
||||
Submit Feedback
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
The best way to send feedback is to file an issue at https://github.com/<TBD>/gnpy/issues.
|
||||
|
||||
If you are proposing a feature:
|
||||
|
||||
* Explain in detail how it would work.
|
||||
* Keep the scope as narrow as possible, to make it easier to implement.
|
||||
* Remember that this is a volunteer-driven project, and that contributions
|
||||
are welcome :)
|
||||
|
||||
Get Started!
|
||||
------------
|
||||
|
||||
Ready to contribute? Here's how to set up `gnpy` for local development.
|
||||
|
||||
1. Fork the `gnpy` repo on GitHub.
|
||||
2. Clone your fork locally::
|
||||
|
||||
$ git clone git@github.com:your_name_here/gnpy.git
|
||||
|
||||
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
|
||||
|
||||
$ mkvirtualenv gnpy
|
||||
$ cd gnpy/
|
||||
$ python setup.py develop
|
||||
|
||||
4. Create a branch for local development::
|
||||
|
||||
$ git checkout -b name-of-your-bugfix-or-feature
|
||||
|
||||
Now you can make your changes locally.
|
||||
|
||||
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
|
||||
|
||||
$ flake8 gnpy tests
|
||||
$ python setup.py test or py.test
|
||||
$ tox
|
||||
|
||||
To get flake8 and tox, just pip install them into your virtualenv.
|
||||
|
||||
6. Commit your changes and push your branch to GitHub::
|
||||
|
||||
$ git add .
|
||||
$ git commit -m "Your detailed description of your changes."
|
||||
$ git push origin name-of-your-bugfix-or-feature
|
||||
|
||||
7. Submit a pull request through the GitHub website.
|
||||
|
||||
Pull Request Guidelines
|
||||
-----------------------
|
||||
|
||||
Before you submit a pull request, check that it meets these guidelines:
|
||||
|
||||
1. The pull request should include tests.
|
||||
2. If the pull request adds functionality, the docs should be updated. Put
|
||||
your new functionality into a function with a docstring, and add the
|
||||
feature to the list in README.rst.
|
||||
3. The pull request should work for Python 2.6, 2.7, 3.3, 3.4 and 3.5, and for PyPy. Check
|
||||
https://travis-ci.org/<TBD>/gnpy/pull_requests
|
||||
and make sure that the tests pass for all supported Python versions.
|
||||
|
||||
Tips
|
||||
----
|
||||
|
||||
To run a subset of tests::
|
||||
|
||||
$ py.test tests.test_gnpy
|
||||
|
||||
8
Dockerfile
Normal file
8
Dockerfile
Normal file
@@ -0,0 +1,8 @@
|
||||
FROM python:3.7-slim
|
||||
COPY . /oopt-gnpy
|
||||
WORKDIR /oopt-gnpy
|
||||
RUN python setup.py install
|
||||
WORKDIR /shared
|
||||
ENTRYPOINT ["/oopt-gnpy/.docker-entry.sh"]
|
||||
CMD ["python", "examples/path_requests_run.py", "examples/2019-demo-topology.json", "examples/2019-demo-services.json", "examples/2019-demo-equipment.json", "--rest"]
|
||||
EXPOSE 5000
|
||||
251
Excel_userguide.rst
Normal file
251
Excel_userguide.rst
Normal file
@@ -0,0 +1,251 @@
|
||||
|
||||
How to prepare the Excel input file
|
||||
-----------------------------------
|
||||
|
||||
`examples/transmission_main_example.py <examples/transmission_main_example.py>`_ gives the possibility to use an excel input file instead of a json file. The program then will generate the corresponding json file for you.
|
||||
|
||||
The file named 'meshTopologyExampleV2.xls' is an example.
|
||||
|
||||
In order to work the excel file MUST contain at least 2 sheets:
|
||||
|
||||
- Nodes
|
||||
- Links
|
||||
|
||||
(In progress) The File MAY contain an additional sheet:
|
||||
|
||||
- Eqt
|
||||
- Service
|
||||
|
||||
Nodes sheet
|
||||
-----------
|
||||
|
||||
Nodes sheet contains nine columns.
|
||||
Each line represents a 'node' (ROADM site or an in line amplifier site ILA or a Fused)::
|
||||
|
||||
City (Mandatory) ; State ; Country ; Region ; Latitude ; Longitude ; Type
|
||||
|
||||
- **City** is used for the name of a node of the graph. It accepts letters, numbers,underscore,dash, blank... (not exhaustive). The user may want to avoid commas for future CSV exports.
|
||||
|
||||
**City name MUST be unique**
|
||||
|
||||
- **Type** is not mandatory.
|
||||
|
||||
- If not filled, it will be interpreted as an 'ILA' site if node degree is 2 and as a ROADM otherwise.
|
||||
- If filled, it can take "ROADM", "FUSED" or "ILA" values. If another string is used, it will be considered as not filled. FUSED means that ingress and egress spans will be fused together.
|
||||
|
||||
- *State*, *Country*, *Region* are not mandatory.
|
||||
"Region" is a holdover from the CORONET topology reference file `CORONET_Global_Topology.xls <examples/CORONET_Global_Topology.xls>`_. CORONET separates its network into geographical regions (Europe, Asia, Continental US.) This information is not used by gnpy.
|
||||
|
||||
- *Longitude*, *Latitude* are not mandatory. If filled they should contain numbers.
|
||||
|
||||
- **Booster_restriction** and **Preamp_restriction** are not mandatory.
|
||||
If used, they must contain one or several amplifier type_variety names separated by ' | '. This information is used to restrict types of amplifiers used in a ROADM node during autodesign. If a ROADM booster or preamp is already specified in the Eqpt sheet , the field is ignored. The field is also ignored if the node is not a ROADM node.
|
||||
|
||||
**There MUST NOT be empty line(s) between two nodes lines**
|
||||
|
||||
|
||||
Links sheet
|
||||
-----------
|
||||
|
||||
Links sheet must contain sixteen columns::
|
||||
|
||||
<-- east cable from a to z --> <-- west from z to -->
|
||||
NodeA ; NodeZ ; Distance km ; Fiber type ; Lineic att ; Con_in ; Con_out ; PMD ; Cable Id ; Distance km ; Fiber type ; Lineic att ; Con_in ; Con_out ; PMD ; Cable Id
|
||||
|
||||
|
||||
Links sheets MUST contain all links between nodes defined in Nodes sheet.
|
||||
Each line represents a 'bidir link' between two nodes. The two directions are represented on a single line with "east cable from a to z" fields and "west from z to a" fields. Values for 'a to z' may be different from values from 'z to a'.
|
||||
Since both direction of a bidir 'a-z' link are described on the same line (east and west), 'z to a' direction MUST NOT be repeated in a different line. If repeated, it will generate another parrallel bidir link between the same end nodes.
|
||||
|
||||
|
||||
Parameters for "east cable from a to z" and "west from z to a" are detailed in 2x7 columns. If not filled, "west from z to a" is copied from "east cable from a to z".
|
||||
|
||||
For example, a line filled with::
|
||||
|
||||
node6 ; node3 ; 80 ; SSMF ; 0.2 ; 0.5 ; 0.5 ; 0.1 ; cableB ; ; ; 0.21 ; 0.2 ; ; ;
|
||||
|
||||
will generate a unidir fiber span from node6 to node3 with::
|
||||
|
||||
[node6 node3 80 SSMF 0.2 0.5 0.5 0.1 cableB]
|
||||
|
||||
and a fiber span from node3 to node6::
|
||||
|
||||
[node6 node3 80 SSMF 0.21 0.2 0.5 0.1 cableB] attributes.
|
||||
|
||||
- **NodeA** and **NodeZ** are Mandatory.
|
||||
They are the two endpoints of the link. They MUST contain a node name from the **City** names listed in Nodes sheet.
|
||||
|
||||
- **Distance km** is not mandatory.
|
||||
It is the link length.
|
||||
|
||||
- If filled it MUST contain numbers. If empty it is replaced by a default "80" km value.
|
||||
- If value is below 150 km, it is considered as a single (bidirectional) fiber span.
|
||||
- If value is over 150 km the `transmission_main_example.py <examples/transmission_main_example.py>`_ program will automatically suppose that intermediate span description are required and will generate fiber spans elements with "_1","_2", ... trailing strings which are not visible in the json output. The reason for the splitting is that current edfa usually do not support large span loss. The current assumption is that links larger than 150km will require intermediate amplification. This value will be revisited when Raman amplification is added”
|
||||
|
||||
- **Fiber type** is not mandatory.
|
||||
|
||||
If filled it must contain types listed in `eqpt_config.json <examples/eqpt_config.json>`_ in "Fiber" list "type_variety".
|
||||
If not filled it takes "SSMF" as default value.
|
||||
|
||||
- **Lineic att** is not mandatory.
|
||||
|
||||
It is the lineic attenuation expressed in dB/km.
|
||||
If filled it must contain positive numbers.
|
||||
If not filled it takes "0.2" dB/km value
|
||||
|
||||
- *Con_in*, *Con_out* are not mandatory.
|
||||
|
||||
They are the connector loss in dB at ingress and egress of the fiber spans.
|
||||
If filled they must contain positive numbers.
|
||||
If not filled they take "0.5" dB default value.
|
||||
|
||||
- *PMD* is not mandatory and and is not used yet.
|
||||
|
||||
It is the PMD value of the link in ps.
|
||||
If filled they must contain positive numbers.
|
||||
If not filled, it takes "0.1" ps value.
|
||||
|
||||
- *Cable Id* is not mandatory.
|
||||
If filled they must contain strings with the same constraint as "City" names. Its value is used to differenate links having the same end points. In this case different Id should be used. Cable Ids are not meant to be unique in general.
|
||||
|
||||
|
||||
|
||||
|
||||
(in progress)
|
||||
|
||||
Eqpt sheet
|
||||
----------
|
||||
|
||||
Eqt sheet is optional. It lists the amplifiers types and characteristics on each degree of the *Node A* line.
|
||||
Eqpt sheet must contain twelve columns::
|
||||
|
||||
<-- east cable from a to z --> <-- west from z to a -->
|
||||
Node A ; Node Z ; amp type ; att_in ; amp gain ; tilt ; att_out ; amp type ; att_in ; amp gain ; tilt ; att_out
|
||||
|
||||
If the sheet is present, it MUST have as many lines as egress directions of ROADMs defined in Links Sheet.
|
||||
|
||||
For example, consider the following list of links (A,B and C being a ROADM and amp# ILAs)
|
||||
|
||||
::
|
||||
|
||||
A - amp1
|
||||
amp1 - amp2
|
||||
Amp2 - B
|
||||
A - amp3
|
||||
amp3 - C
|
||||
|
||||
then Eqpt sheet should contain:
|
||||
- one line for each ILAs: amp1, amp2, amp3
|
||||
- one line for each degree 1 ROADMs B and C
|
||||
- two lines for ROADM A which is a degree 2 ROADM
|
||||
|
||||
::
|
||||
|
||||
A - amp1
|
||||
amp1 - amp2
|
||||
Amp2 - B
|
||||
A - amp3
|
||||
amp3 - C
|
||||
B - amp2
|
||||
C - amp3
|
||||
|
||||
|
||||
In case you already have filled Nodes and Links sheets `create_eqpt_sheet.py <examples/create_eqpt_sheet.py>`_ can be used to automatically create a template for the mandatory entries of the list.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd examples
|
||||
$ python create_eqpt_sheet.py meshTopologyExampleV2.xls
|
||||
|
||||
This generates a text file meshTopologyExampleV2_eqt_sheet.txt whose content can be directly copied into the Eqt sheet of the excel file. The user then can fill the values in the rest of the columns.
|
||||
|
||||
|
||||
- **Node A** is mandatory. It is the name of the node (as listed in Nodes sheet).
|
||||
If Node A is a 'ROADM' (Type attribute in sheet Node), its number of occurence must be equal to its degree.
|
||||
If Node A is an 'ILA' it should appear only once.
|
||||
|
||||
- **Node Z** is mandatory. It is the egress direction from the *Node A* site. Multiple Links between the same Node A and NodeZ is not supported.
|
||||
|
||||
- **amp type** is not mandatory.
|
||||
If filled it must contain types listed in `eqpt_config.json <examples/eqpt_config.json>`_ in "Edfa" list "type_variety".
|
||||
If not filled it takes "std_medium_gain" as default value.
|
||||
If filled with fused, a fused element with 0.0 dB loss will be placed instead of an amplifier. This might be used to avoid booster amplifier on a ROADM direction.
|
||||
|
||||
- **amp_gain** is not mandatory. It is the value to be set on the amplifier (in dB).
|
||||
If not filled, it will be determined with design rules in the convert.py file.
|
||||
If filled, it must contain positive numbers.
|
||||
|
||||
- *att_in* and *att_out* are not mandatory and are not used yet. They are the value of the attenautor at input and output of amplifier (in dB).
|
||||
If filled they must contain positive numbers.
|
||||
|
||||
- *tilt* --TODO--
|
||||
|
||||
# to be completed #
|
||||
|
||||
(in progress)
|
||||
|
||||
Service sheet
|
||||
-------------
|
||||
|
||||
Service sheet is optional. It lists the services for which path and feasibility must be computed with path_requests_run.py.
|
||||
|
||||
Service sheet must contain 11 columns::
|
||||
|
||||
route id ; Source ; Destination ; TRX type ; Mode ; System: spacing ; System: input power (dBm) ; System: nb of channels ; routing: disjoint from ; routing: path ; routing: is loose?
|
||||
|
||||
- **route id** is mandatory. It must be unique. It is the identifier of the request. It can be an integer or a string (do not use blank or dash or coma)
|
||||
|
||||
- **Source** is mandatory. It is the name of the source node (as listed in Nodes sheet). Source MUST be a ROADM node. (TODO: relax this and accept trx entries)
|
||||
|
||||
- **Destination** is mandatory. It is the name of the destination node (as listed in Nodes sheet). Source MUST be a ROADM node. (TODO: relax this and accept trx entries)
|
||||
|
||||
- **TRX type** is mandatory. They are the variety type and selected mode of the transceiver to be used for the propagation simulation. These modes MUST be defined in the equipment library. The format of the mode is used as the name of the mode. (TODO: maybe add another mode id on Transceiver library ?). In particular the mode selection defines the channel baudrate to be used for the propagation simulation.
|
||||
|
||||
- **mode** is optional. If not specified, the program will search for the mode of the defined transponder with the highest baudrate fitting within the spacing value.
|
||||
|
||||
- **System: spacing** is mandatory. Spacing is the channel spacing defined in GHz difined for the feasibility propagation simulation, assuming system full load.
|
||||
|
||||
- **System: input power (dBm) ; System: nb of channels** are optional input defining the system parameters for the propagation simulation.
|
||||
|
||||
- input power is the channel optical input power in dBm
|
||||
- nb of channels is the number of channels to be used for the simulation.
|
||||
|
||||
- **routing: disjoint from ; routing: path ; routing: is loose?** are optional.
|
||||
|
||||
- disjoint from: identifies the requests from which this request must be disjoint. If filled it must contain request ids separated by ' | '
|
||||
- path: is the set of ROADM nodes that must be used by the path. It must contain the list of ROADM names that the path must cross. TODO : only ROADM nodes are accepted in this release. Relax this with any type of nodes. If filled it must contain ROADM ids separated by ' | '. Exact names are required.
|
||||
- is loose? 'no' value means that the list of nodes should be strictly followed, while any other value means that the constraint may be relaxed if the node is not reachable.
|
||||
|
||||
- ** path bandwidth** is optional. It is the amount of capacity required between source and destination in Gbit/s. Default value is 0.0 Gbit/s.
|
||||
|
||||
path_requests_run.py
|
||||
------------------------
|
||||
|
||||
**Usage**: path_requests_run.py [-h] [-v] [-o OUTPUT]
|
||||
[network_filename xls or json] [service_filename xls or json] [eqpt_filename json]
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd examples
|
||||
$ python path_requests_run.py meshTopologyExampleV2.xls service_file.json eqpt_file -o output_file.json
|
||||
|
||||
A function that computes performances for a list of services provided in the service file (accepts json or excel format.
|
||||
|
||||
if the service <file.xls> is in xls format, path_requests_run.py converts it to a json file <file_services.json> following the Yang model for requesting Path Computation defined in `draft-ietf-teas-yang-path-computation-01.txt <https://www.ietf.org/id/draft-ietf-teas-yang-path-computation-01.pdf>`_. For PSE use, additional fields with trx type and mode have been added to the te-bandwidth field.
|
||||
|
||||
A template for the json file can be found here: `service_template.json <service_template.json>`_
|
||||
|
||||
|
||||
If no output file is given, the computation is shown on standard output for demo.
|
||||
If a file is specified with the optional -o argument, the result of the computation is converted into a json format following the Yang model for requesting Path Computation defined in `draft-ietf-teas-yang-path-computation-01.txt <https://www.ietf.org/id/draft-ietf-teas-yang-path-computation-01.pdf>`_. TODO: verify that this implementation is correct + give feedback to ietf on what is missing for our specific application.
|
||||
|
||||
A template for the result of computation json file can be found here: `path_result_template.json <path_result_template.json>`_
|
||||
|
||||
Important note: path_requests_run.py is not a network dimensionning tool : each service does not reserve spectrum, or occupy ressources such as transponders. It only computes path feasibility assuming the spectrum (between defined frequencies) is loaded with "nb of channels" spaced by "spacing" values as specified in the system parameters input in the service file, each cannel having the same characteristics in terms of baudrate, format, ... as the service transponder. The transceiver element acts as a "logical starting/stopping point" for the spectral information propagation. At that point it is not meant to represent the capacity of add drop ports
|
||||
As a result transponder type is not part of the network info. it is related to the list of services requests.
|
||||
|
||||
In a next step we plan to provide required features to enable dimensionning : alocation of ressources, counting channels, limitation of the number of channels, ...
|
||||
|
||||
(in progress)
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
=======
|
||||
History
|
||||
=======
|
||||
|
||||
0.1.0 (2017-06-29)
|
||||
------------------
|
||||
|
||||
* First release on PyPI.
|
||||
40
LICENSE
40
LICENSE
@@ -1,31 +1,29 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
BSD License
|
||||
|
||||
Copyright (c) 2017, <TBD>
|
||||
Copyright (c) 2018, Telecom Infra Project
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
|
||||
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
||||
OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
11
MANIFEST.in
11
MANIFEST.in
@@ -1,11 +0,0 @@
|
||||
include AUTHORS.rst
|
||||
include CONTRIBUTING.rst
|
||||
include HISTORY.rst
|
||||
include LICENSE
|
||||
include README.rst
|
||||
|
||||
recursive-include tests *
|
||||
recursive-exclude * __pycache__
|
||||
recursive-exclude * *.py[co]
|
||||
|
||||
recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif
|
||||
87
Makefile
87
Makefile
@@ -1,87 +0,0 @@
|
||||
.PHONY: clean clean-test clean-pyc clean-build docs help
|
||||
.DEFAULT_GOAL := help
|
||||
define BROWSER_PYSCRIPT
|
||||
import os, webbrowser, sys
|
||||
try:
|
||||
from urllib import pathname2url
|
||||
except:
|
||||
from urllib.request import pathname2url
|
||||
|
||||
webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1])))
|
||||
endef
|
||||
export BROWSER_PYSCRIPT
|
||||
|
||||
define PRINT_HELP_PYSCRIPT
|
||||
import re, sys
|
||||
|
||||
for line in sys.stdin:
|
||||
match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line)
|
||||
if match:
|
||||
target, help = match.groups()
|
||||
print("%-20s %s" % (target, help))
|
||||
endef
|
||||
export PRINT_HELP_PYSCRIPT
|
||||
BROWSER := python -c "$$BROWSER_PYSCRIPT"
|
||||
|
||||
help:
|
||||
@python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST)
|
||||
|
||||
clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts
|
||||
|
||||
|
||||
clean-build: ## remove build artifacts
|
||||
rm -fr build/
|
||||
rm -fr dist/
|
||||
rm -fr .eggs/
|
||||
find . -name '*.egg-info' -exec rm -fr {} +
|
||||
find . -name '*.egg' -exec rm -f {} +
|
||||
|
||||
clean-pyc: ## remove Python file artifacts
|
||||
find . -name '*.pyc' -exec rm -f {} +
|
||||
find . -name '*.pyo' -exec rm -f {} +
|
||||
find . -name '*~' -exec rm -f {} +
|
||||
find . -name '__pycache__' -exec rm -fr {} +
|
||||
|
||||
clean-test: ## remove test and coverage artifacts
|
||||
rm -fr .tox/
|
||||
rm -f .coverage
|
||||
rm -fr htmlcov/
|
||||
|
||||
lint: ## check style with flake8
|
||||
flake8 gnpy tests
|
||||
|
||||
test: ## run tests quickly with the default Python
|
||||
py.test
|
||||
|
||||
|
||||
test-all: ## run tests on every Python version with tox
|
||||
tox
|
||||
|
||||
coverage: ## check code coverage quickly with the default Python
|
||||
coverage run --source gnpy -m pytest
|
||||
coverage report -m
|
||||
coverage html
|
||||
$(BROWSER) htmlcov/index.html
|
||||
|
||||
docs: ## generate Sphinx HTML documentation, including API docs
|
||||
rm -f docs/gnpy.rst
|
||||
rm -f docs/modules.rst
|
||||
sphinx-apidoc -o docs/ gnpy
|
||||
$(MAKE) -C docs clean
|
||||
$(MAKE) -C docs html
|
||||
$(BROWSER) docs/_build/html/index.html
|
||||
|
||||
servedocs: docs ## compile the docs watching for changes
|
||||
watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D .
|
||||
|
||||
release: clean ## package and upload a release
|
||||
python setup.py sdist upload
|
||||
python setup.py bdist_wheel upload
|
||||
|
||||
dist: clean ## builds source and wheel package
|
||||
python setup.py sdist
|
||||
python setup.py bdist_wheel
|
||||
ls -l dist
|
||||
|
||||
install: clean ## install the package to the active Python's site-packages
|
||||
python setup.py install
|
||||
639
README.rst
639
README.rst
@@ -1,32 +1,633 @@
|
||||
====
|
||||
gnpy
|
||||
====
|
||||
.. image:: docs/images/GNPy-banner.png
|
||||
:width: 100%
|
||||
:align: left
|
||||
:alt: GNPy with an OLS system
|
||||
|
||||
====================================================================
|
||||
`gnpy`: mesh optical network route planning and optimization library
|
||||
====================================================================
|
||||
|
||||
|docs| |build| |doi|
|
||||
|
||||
**`gnpy` is an open-source, community-developed library for building route
|
||||
planning and optimization tools in real-world mesh optical networks.**
|
||||
|
||||
`gnpy <http://github.com/telecominfraproject/oopt-gnpy>`__ is:
|
||||
--------------------------------------------------------------
|
||||
|
||||
- a sponsored project of the `OOPT/PSE <https://telecominfraproject.com/open-optical-packet-transport/>`_ working group of the `Telecom Infra Project <http://telecominfraproject.com>`_
|
||||
- fully community-driven, fully open source library
|
||||
- driven by a consortium of operators, vendors, and academic researchers
|
||||
- intended for rapid development of production-grade route planning tools
|
||||
- easily extensible to include custom network elements
|
||||
- performant to the scale of real-world mesh optical networks
|
||||
|
||||
Documentation: https://gnpy.readthedocs.io
|
||||
|
||||
Get In Touch
|
||||
~~~~~~~~~~~~
|
||||
|
||||
There are `weekly calls <https://telecominfraproject.workplace.com/events/702894886867547/>`__ about our progress.
|
||||
Newcomers, users and telecom operators are especially welcome there.
|
||||
We encourage all interested people outside the TIP to `join the project <https://telecominfraproject.com/apply-for-membership/>`__.
|
||||
|
||||
Branches and Tagged Releases
|
||||
----------------------------
|
||||
|
||||
- all releases are `available via GitHub <https://github.com/Telecominfraproject/oopt-gnpy/releases>`_
|
||||
- the `master <https://github.com/Telecominfraproject/oopt-gnpy/tree/master>`_ branch contains stable, `validated code <https://github.com/Telecominfraproject/oopt-gnpy/wiki/Testing-for-Quality>`_. It is updated from develop on a release schedule determined by the OOPT-PSE Working Group.
|
||||
- the `develop <https://github.com/Telecominfraproject/oopt-gnpy/tree/develop>`_ branch contains the latest code under active development, which may not be fully validated and tested.
|
||||
|
||||
How to Install
|
||||
--------------
|
||||
|
||||
Using prebuilt Docker images
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Our `Docker images <https://hub.docker.com/r/telecominfraproject/oopt-gnpy>`_ contain everything needed to run all examples from this guide.
|
||||
Docker transparently fetches the image over the network upon first use.
|
||||
On Linux and Mac, run:
|
||||
|
||||
|
||||
.. image:: https://img.shields.io/pypi/v/gnpy.svg
|
||||
:target: https://pypi.python.org/pypi/gnpy
|
||||
.. code-block:: shell-session
|
||||
|
||||
.. image:: https://img.shields.io/travis/<TBD>/gnpy.svg
|
||||
:target: https://travis-ci.org/<TBD>/gnpy
|
||||
$ docker run -it --rm --volume $(pwd):/shared telecominfraproject/oopt-gnpy
|
||||
root@bea050f186f7:/shared/examples#
|
||||
|
||||
.. image:: https://readthedocs.org/projects/gnpy/badge/?version=latest
|
||||
:target: https://gnpy.readthedocs.io/en/latest/?badge=latest
|
||||
:alt: Documentation Status
|
||||
On Windows, launch from Powershell as:
|
||||
|
||||
.. image:: https://pyup.io/repos/github/<TBD>/gnpy/shield.svg
|
||||
:target: https://pyup.io/repos/github/<TBD>/gnpy/
|
||||
:alt: Updates
|
||||
.. code-block:: powershell
|
||||
|
||||
PS C:\> docker run -it --rm --volume ${PWD}:/shared telecominfraproject/oopt-gnpy
|
||||
root@89784e577d44:/shared/examples#
|
||||
|
||||
Gaussian Noise (GN) modeling library
|
||||
In both cases, a directory named ``examples/`` will appear in your current working directory.
|
||||
GNPy automaticallly populates it with example files from the current release.
|
||||
Remove that directory if you want to start from scratch.
|
||||
|
||||
Using Python on your computer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* Free software: BSD license
|
||||
* Documentation: https://gnpy.readthedocs.io.
|
||||
**Note**: `gnpy` supports Python 3 only. Python 2 is not supported.
|
||||
`gnpy` requires Python ≥3.6
|
||||
|
||||
**Note**: the `gnpy` maintainers strongly recommend the use of Anaconda for
|
||||
managing dependencies.
|
||||
|
||||
Features
|
||||
--------
|
||||
It is recommended that you use a "virtual environment" when installing `gnpy`.
|
||||
Do not install `gnpy` on your system Python.
|
||||
|
||||
* TODO
|
||||
We recommend the use of the `Anaconda Python distribution <https://www.anaconda.com/download>`_ which comes with many scientific computing
|
||||
dependencies pre-installed. Anaconda creates a base "virtual environment" for
|
||||
you automatically. You can also create and manage your ``conda`` "virtual
|
||||
environments" yourself (see:
|
||||
https://conda.io/docs/user-guide/tasks/manage-environments.html)
|
||||
|
||||
To activate your Anaconda virtual environment, you may need to do the
|
||||
following:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ source /path/to/anaconda/bin/activate # activate Anaconda base environment
|
||||
(base) $ # note the change to the prompt
|
||||
|
||||
You can check which Anaconda environment you are using with:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
(base) $ conda env list # list all environments
|
||||
# conda environments:
|
||||
#
|
||||
base * /src/install/anaconda3
|
||||
|
||||
(base) $ echo $CONDA_DEFAULT_ENV # show default environment
|
||||
base
|
||||
|
||||
You can check your version of Python with the following. If you are using
|
||||
Anaconda's Python 3, you should see similar output as below. Your results may
|
||||
be slightly different depending on your Anaconda installation path and the
|
||||
exact version of Python you are using.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ which python # check which Python executable is used
|
||||
/path/to/anaconda/bin/python
|
||||
$ python -V # check your Python version
|
||||
Python 3.6.5 :: Anaconda, Inc.
|
||||
|
||||
From within your Anaconda Python 3 environment, you can clone the master branch
|
||||
of the `gnpy` repo and install it with:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ git clone https://github.com/Telecominfraproject/oopt-gnpy # clone the repo
|
||||
$ cd oopt-gnpy
|
||||
$ python setup.py install # install
|
||||
|
||||
To test that `gnpy` was successfully installed, you can run this command. If it
|
||||
executes without a ``ModuleNotFoundError``, you have successfully installed
|
||||
`gnpy`.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ python -c 'import gnpy' # attempt to import gnpy
|
||||
|
||||
$ pytest # run tests
|
||||
|
||||
Instructions for First Use
|
||||
--------------------------
|
||||
|
||||
``gnpy`` is a library for building route planning and optimization tools.
|
||||
|
||||
It ships with a number of example programs. Release versions will ship with
|
||||
fully-functional programs.
|
||||
|
||||
**Note**: *If you are a network operator or involved in route planning and
|
||||
optimization for your organization, please contact project maintainer Jan
|
||||
Kundrát <jan.kundrat@telecominfraproject.com>. gnpy is looking for users with
|
||||
specific, delineated use cases to drive requirements for future
|
||||
development.*
|
||||
|
||||
This example demonstrates how GNPy can be used to check the expected SNR at the end of the line by varying the channel input power:
|
||||
|
||||
.. image:: https://telecominfraproject.github.io/oopt-gnpy/docs/images/transmission_main_example.svg
|
||||
:width: 100%
|
||||
:align: left
|
||||
:alt: Running a simple simulation example
|
||||
:target: https://asciinema.org/a/252295
|
||||
|
||||
By default, this script operates on a single span network defined in
|
||||
`examples/edfa_example_network.json <examples/edfa_example_network.json>`_
|
||||
|
||||
You can specify a different network at the command line as follows. For
|
||||
example, to use the CORONET Global network defined in
|
||||
`examples/CORONET_Global_Topology.json <examples/CORONET_Global_Topology.json>`_:
|
||||
|
||||
.. code-block:: shell-session
|
||||
|
||||
$ ./examples/transmission_main_example.py examples/CORONET_Global_Topology.json
|
||||
|
||||
It is also possible to use an Excel file input (for example
|
||||
`examples/CORONET_Global_Topology.xls <examples/CORONET_Global_Topology.xls>`_).
|
||||
The Excel file will be processed into a JSON file with the same prefix. For
|
||||
further instructions on how to prepare the Excel input file, see
|
||||
`Excel_userguide.rst <Excel_userguide.rst>`_.
|
||||
|
||||
The main transmission example will calculate the average signal OSNR and SNR
|
||||
across network elements (transceiver, ROADMs, fibers, and amplifiers)
|
||||
between two transceivers selected by the user. Additional details are provided by doing ``transmission_main_example.py -h``. (By default, for the CORONET Global
|
||||
network, it will show the transmission of spectral information between Abilene and Albany)
|
||||
|
||||
This script calculates the average signal OSNR = |OSNR| and SNR = |SNR|.
|
||||
|
||||
.. |OSNR| replace:: P\ :sub:`ch`\ /P\ :sub:`ase`
|
||||
.. |SNR| replace:: P\ :sub:`ch`\ /(P\ :sub:`nli`\ +\ P\ :sub:`ase`)
|
||||
|
||||
|Pase| is the amplified spontaneous emission noise, and |Pnli| the non-linear
|
||||
interference noise.
|
||||
|
||||
.. |Pase| replace:: P\ :sub:`ase`
|
||||
.. |Pnli| replace:: P\ :sub:`nli`
|
||||
|
||||
Further Instructions for Use (`transmission_main_example.py`, `path_requests_run.py`)
|
||||
-------------------------------------------------------------------------------------
|
||||
|
||||
Design and transmission parameters are defined in a dedicated json file. By
|
||||
default, this information is read from `examples/eqpt_config.json
|
||||
<examples/eqpt_config.json>`_. This file defines the equipment libraries that
|
||||
can be customized (EDFAs, fibers, and transceivers).
|
||||
|
||||
It also defines the simulation parameters (spans, ROADMs, and the spectral
|
||||
information to transmit.)
|
||||
|
||||
The EDFA equipment library is a list of supported amplifiers. New amplifiers
|
||||
can be added and existing ones removed. Three different noise models are available:
|
||||
|
||||
1. ``'type_def': 'variable_gain'`` is a simplified model simulating a 2-coil EDFA with internal, input and output VOAs. The NF vs gain response is calculated accordingly based on the input parameters: ``nf_min``, ``nf_max``, and ``gain_flatmax``. It is not a simple interpolation but a 2-stage NF calculation.
|
||||
2. ``'type_def': 'fixed_gain'`` is a fixed gain model. `NF == Cte == nf0` if `gain_min < gain < gain_flatmax`
|
||||
3. ``'type_def': None`` is an advanced model. A detailed JSON configuration file is required (by default `examples/std_medium_gain_advanced_config.json <examples/std_medium_gain_advanced_config.json>`_). It uses a 3rd order polynomial where NF = f(gain), NF_ripple = f(frequency), gain_ripple = f(frequency), N-array dgt = f(frequency). Compared to the previous models, NF ripple and gain ripple are modelled.
|
||||
|
||||
For all amplifier models:
|
||||
|
||||
+------------------------+-----------+-----------------------------------------+
|
||||
| field | type | description |
|
||||
+========================+===========+=========================================+
|
||||
| ``type_variety`` | (string) | a unique name to ID the amplifier in the|
|
||||
| | | JSON/Excel template topology input file |
|
||||
+------------------------+-----------+-----------------------------------------+
|
||||
| ``out_voa_auto`` | (boolean) | auto_design feature to optimize the |
|
||||
| | | amplifier output VOA. If true, output |
|
||||
| | | VOA is present and will be used to push |
|
||||
| | | amplifier gain to its maximum, within |
|
||||
| | | EOL power margins. |
|
||||
+------------------------+-----------+-----------------------------------------+
|
||||
| ``allowed_for_design`` | (boolean) | If false, the amplifier will not be |
|
||||
| | | picked by auto-design but it can still |
|
||||
| | | be used as a manual input (from JSON or |
|
||||
| | | Excel template topology files.) |
|
||||
+------------------------+-----------+-----------------------------------------+
|
||||
|
||||
The fiber library currently describes SSMF and NZDF but additional fiber types can be entered by the user following the same model:
|
||||
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| field | type | description |
|
||||
+======================+===========+=========================================+
|
||||
| ``type_variety`` | (string) | a unique name to ID the fiber in the |
|
||||
| | | JSON or Excel template topology input |
|
||||
| | | file |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| ``dispersion`` | (number) | (s.m-1.m-1) |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| ``gamma`` | (number) | 2pi.n2/(lambda*Aeff) (w-2.m-1) |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
|
||||
The transceiver equipment library is a list of supported transceivers. New
|
||||
transceivers can be added and existing ones removed at will by the user. It is
|
||||
used to determine the service list path feasibility when running the
|
||||
`path_request_run.py routine <examples/path_request_run.py>`_.
|
||||
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| field | type | description |
|
||||
+======================+===========+=========================================+
|
||||
| ``type_variety`` | (string) | A unique name to ID the transceiver in |
|
||||
| | | the JSON or Excel template topology |
|
||||
| | | input file |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| ``frequency`` | (number) | Min/max as below. |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| ``mode`` | (number) | A list of modes supported by the |
|
||||
| | | transponder. New modes can be added at |
|
||||
| | | will by the user. The modes are specific|
|
||||
| | | to each transponder type_variety. |
|
||||
| | | Each mode is described as below. |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
|
||||
The modes are defined as follows:
|
||||
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| field | type | description |
|
||||
+======================+===========+=========================================+
|
||||
| ``format`` | (string) | a unique name to ID the mode |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| ``baud_rate`` | (number) | in Hz |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| ``OSNR`` | (number) | min required OSNR in 0.1nm (dB) |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| ``bit_rate`` | (number) | in bit/s |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| ``roll_off`` | (number) | Not used. |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| ``tx_osnr`` | (number) | In dB. OSNR out from transponder. |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
| ``cost`` | (number) | Arbitrary unit |
|
||||
+----------------------+-----------+-----------------------------------------+
|
||||
|
||||
Simulation parameters are defined as follows.
|
||||
|
||||
Auto-design automatically creates EDFA amplifier network elements when they are
|
||||
missing, after a fiber, or between a ROADM and a fiber. This auto-design
|
||||
functionality can be manually and locally deactivated by introducing a ``Fused``
|
||||
network element after a ``Fiber`` or a ``Roadm`` that doesn't need amplification.
|
||||
The amplifier is chosen in the EDFA list of the equipment library based on
|
||||
gain, power, and NF criteria. Only the EDFA that are marked
|
||||
``'allowed_for_design': true`` are considered.
|
||||
|
||||
For amplifiers defined in the topology JSON input but whose ``gain = 0``
|
||||
(placeholder), auto-design will set its gain automatically: see ``power_mode`` in
|
||||
the ``Spans`` library to find out how the gain is calculated.
|
||||
|
||||
Span configuration is performed as follows. It is not a list (which may change
|
||||
in later releases) and the user can only modify the value of existing
|
||||
parameters:
|
||||
|
||||
+-------------------------------------+-----------+---------------------------------------------+
|
||||
| field | type | description |
|
||||
+=====================================+===========+=============================================+
|
||||
| ``power_mode`` | (boolean) | If false, gain mode. Auto-design sets |
|
||||
| | | amplifier gain = preceding span loss, |
|
||||
| | | unless the amplifier exists and its |
|
||||
| | | gain > 0 in the topology input JSON. |
|
||||
| | | If true, power mode (recommended for |
|
||||
| | | auto-design and power sweep.) |
|
||||
| | | Auto-design sets amplifier power |
|
||||
| | | according to delta_power_range. If the |
|
||||
| | | amplifier exists with gain > 0 in the |
|
||||
| | | topology JSON input, then its gain is |
|
||||
| | | translated into a power target/channel. |
|
||||
| | | Moreover, when performing a power sweep |
|
||||
| | | (see ``power_range_db`` in the SI |
|
||||
| | | configuration library) the power sweep |
|
||||
| | | is performed w/r/t this power target, |
|
||||
| | | regardless of preceding amplifiers |
|
||||
| | | power saturation/limitations. |
|
||||
+-------------------------------------+-----------+---------------------------------------------+
|
||||
| ``delta_power_range_db`` | (number) | Auto-design only, power-mode |
|
||||
| | | only. Specifies the [min, max, step] |
|
||||
| | | power excursion/span. It is a relative |
|
||||
| | | power excursion w/r/t the |
|
||||
| | | power_dbm + power_range_db |
|
||||
| | | (power sweep if applicable) defined in |
|
||||
| | | the SI configuration library. This |
|
||||
| | | relative power excursion is = 1/3 of |
|
||||
| | | the span loss difference with the |
|
||||
| | | reference 20 dB span. The 1/3 slope is |
|
||||
| | | derived from the GN model equations. |
|
||||
| | | For example, a 23 dB span loss will be |
|
||||
| | | set to 1 dB more power than a 20 dB |
|
||||
| | | span loss. The 20 dB reference spans |
|
||||
| | | will *always* be set to |
|
||||
| | | power = power_dbm + power_range_db. |
|
||||
| | | To configure the same power in all |
|
||||
| | | spans, use `[0, 0, 0]`. All spans will |
|
||||
| | | be set to |
|
||||
| | | power = power_dbm + power_range_db. |
|
||||
| | | To configure the same power in all spans |
|
||||
| | | and 3 dB more power just for the longest |
|
||||
| | | spans: `[0, 3, 3]`. The longest spans are |
|
||||
| | | set to |
|
||||
| | | power = power_dbm + power_range_db + 3. |
|
||||
| | | To configure a 4 dB power range across |
|
||||
| | | all spans in 0.5 dB steps: `[-2, 2, 0.5]`. |
|
||||
| | | A 17 dB span is set to |
|
||||
| | | power = power_dbm + power_range_db - 1, |
|
||||
| | | a 20 dB span to |
|
||||
| | | power = power_dbm + power_range_db and |
|
||||
| | | a 23 dB span to |
|
||||
| | | power = power_dbm + power_range_db + 1 |
|
||||
+-------------------------------------+-----------+---------------------------------------------+
|
||||
| ``max_fiber_lineic_loss_for_raman`` | (number) | Maximum linear fiber loss for Raman |
|
||||
| | | amplification use. |
|
||||
+-------------------------------------+-----------+---------------------------------------------+
|
||||
| ``max_length`` | (number) | Split fiber lengths > max_length. |
|
||||
| | | Interest to support high level |
|
||||
| | | topologies that do not specify in line |
|
||||
| | | amplification sites. For example the |
|
||||
| | | CORONET_Global_Topology.xls defines |
|
||||
| | | links > 1000km between 2 sites: it |
|
||||
| | | couldn't be simulated if these links |
|
||||
| | | were not split in shorter span lengths. |
|
||||
+-------------------------------------+-----------+---------------------------------------------+
|
||||
| ``length_unit`` | "m"/"km" | Unit for ``max_length``. |
|
||||
+-------------------------------------+-----------+---------------------------------------------+
|
||||
| ``max_loss`` | (number) | Not used in the current code |
|
||||
| | | implementation. |
|
||||
+-------------------------------------+-----------+---------------------------------------------+
|
||||
| ``padding`` | (number) | In dB. Min span loss before putting an |
|
||||
| | | attenuator before fiber. Attenuator |
|
||||
| | | value |
|
||||
| | | Fiber.att_in = max(0, padding - span_loss). |
|
||||
| | | Padding can be set manually to reach a |
|
||||
| | | higher padding value for a given fiber |
|
||||
| | | by filling in the Fiber/params/att_in |
|
||||
| | | field in the topology json input [1] |
|
||||
| | | but if span_loss = length * loss_coef |
|
||||
| | | + att_in + con_in + con_out < padding, |
|
||||
| | | the specified att_in value will be |
|
||||
| | | completed to have span_loss = padding. |
|
||||
| | | Therefore it is not possible to set |
|
||||
| | | span_loss < padding. |
|
||||
+-------------------------------------+-----------+---------------------------------------------+
|
||||
| ``EOL`` | (number) | All fiber span loss ageing. The value |
|
||||
| | | is added to the con_out (fiber output |
|
||||
| | | connector). So the design and the path |
|
||||
| | | feasibility are performed with |
|
||||
| | | span_loss + EOL. EOL cannot be set |
|
||||
| | | manually for a given fiber span |
|
||||
| | | (workaround is to specify higher |
|
||||
| | | ``con_out`` loss for this fiber). |
|
||||
+-------------------------------------+-----------+---------------------------------------------+
|
||||
| ``con_in``, | (number) | Default values if Fiber/params/con_in/out |
|
||||
| ``con_out`` | | is None in the topology input |
|
||||
| | | description. This default value is |
|
||||
| | | ignored if a Fiber/params/con_in/out |
|
||||
| | | value is input in the topology for a |
|
||||
| | | given Fiber. |
|
||||
+-------------------------------------+-----------+---------------------------------------------+
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"uid": "fiber (A1->A2)",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params":
|
||||
{
|
||||
"type_variety": "SSMF",
|
||||
"length": 120.0,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0,
|
||||
"con_out": 0
|
||||
}
|
||||
}
|
||||
|
||||
ROADMs can be configured as follows. The user can only modify the value of
|
||||
existing parameters:
|
||||
|
||||
+--------------------------+-----------+---------------------------------------------+
|
||||
| field | type | description |
|
||||
+==========================+===========+=============================================+
|
||||
| ``target_pch_out_db`` | (number) | Auto-design sets the ROADM egress channel |
|
||||
| | | power. This reflects typical control loop |
|
||||
| | | algorithms that adjust ROADM losses to |
|
||||
| | | equalize channels (eg coming from different |
|
||||
| | | ingress direction or add ports) |
|
||||
| | | This is the default value |
|
||||
| | | Roadm/params/target_pch_out_db if no value |
|
||||
| | | is given in the ``Roadm`` element in the |
|
||||
| | | topology input description. |
|
||||
| | | This default value is ignored if a |
|
||||
| | | params/target_pch_out_db value is input in |
|
||||
| | | the topology for a given ROADM. |
|
||||
+--------------------------+-----------+---------------------------------------------+
|
||||
| ``add_drop_osnr`` | (number) | OSNR contribution from the add/drop ports |
|
||||
+--------------------------+-----------+---------------------------------------------+
|
||||
| ``restrictions`` | (dict of | If non-empty, keys ``preamp_variety_list`` |
|
||||
| | strings) | and ``booster_variety_list`` represent |
|
||||
| | | list of ``type_variety`` amplifiers which |
|
||||
| | | are allowed for auto-design within ROADM's |
|
||||
| | | line degrees. |
|
||||
| | | |
|
||||
| | | If no booster should be placed on a degree, |
|
||||
| | | insert a ``Fused`` node on the degree |
|
||||
| | | output. |
|
||||
+--------------------------+-----------+---------------------------------------------+
|
||||
|
||||
The ``SpectralInformation`` object can be configured as follows. The user can
|
||||
only modify the value of existing parameters. It defines a spectrum of N
|
||||
identical carriers. While the code libraries allow for different carriers and
|
||||
power levels, the current user parametrization only allows one carrier type and
|
||||
one power/channel definition.
|
||||
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| field | type | description |
|
||||
+======================+===========+===========================================+
|
||||
| ``f_min``, | (number) | In Hz. Carrier min max excursion. |
|
||||
| ``f_max`` | | |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| ``baud_rate`` | (number) | In Hz. Simulated baud rate. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| ``spacing`` | (number) | In Hz. Carrier spacing. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| ``roll_off`` | (number) | Not used. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| ``tx_osnr`` | (number) | In dB. OSNR out from transponder. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| ``power_dbm`` | (number) | Reference channel power. In gain mode |
|
||||
| | | (see spans/power_mode = false), all gain |
|
||||
| | | settings are offset w/r/t this reference |
|
||||
| | | power. In power mode, it is the |
|
||||
| | | reference power for |
|
||||
| | | Spans/delta_power_range_db. For example, |
|
||||
| | | if delta_power_range_db = `[0,0,0]`, the |
|
||||
| | | same power=power_dbm is launched in every |
|
||||
| | | spans. The network design is performed |
|
||||
| | | with the power_dbm value: even if a |
|
||||
| | | power sweep is defined (see after) the |
|
||||
| | | design is not repeated. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| ``power_range_db`` | (number) | Power sweep excursion around power_dbm. |
|
||||
| | | It is not the min and max channel power |
|
||||
| | | values! The reference power becomes: |
|
||||
| | | power_range_db + power_dbm. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
| ``sys_margins`` | (number) | In dB. Added margin on min required |
|
||||
| | | transceiver OSNR. |
|
||||
+----------------------+-----------+-------------------------------------------+
|
||||
|
||||
The `transmission_main_example.py <examples/transmission_main_example.py>`_ script propagates a spectrum of channels at 32 Gbaud, 50 GHz spacing and 0 dBm/channel.
|
||||
Launch power can be overridden by using the ``--power`` argument.
|
||||
Spectrum information is not yet parametrized but can be modified directly in the ``eqpt_config.json`` (via the ``SpectralInformation`` -SI- structure) to accommodate any baud rate or spacing.
|
||||
The number of channel is computed based on ``spacing`` and ``f_min``, ``f_max`` values.
|
||||
|
||||
An experimental support for Raman amplification is available:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ ./examples/transmission_main_example.py \
|
||||
examples/raman_edfa_example_network.json \
|
||||
--sim examples/sim_params.json --show-channels
|
||||
|
||||
Configuration of Raman pumps (their frequencies, power and pumping direction) is done via the `RamanFiber element in the network topology <examples/raman_edfa_example_network.json>`_.
|
||||
General numeric parameters for simulaiton control are provided in the `examples/sim_params.json <examples/sim_params.json>`_.
|
||||
|
||||
Use `examples/path_requests_run.py <examples/path_requests_run.py>`_ to run multiple optimizations as follows:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ python path_requests_run.py -h
|
||||
Usage: path_requests_run.py [-h] [-v] [-o OUTPUT] [network_filename] [service_filename] [eqpt_filename]
|
||||
|
||||
The ``network_filename`` and ``service_filename`` can be an XLS or JSON file. The ``eqpt_filename`` must be a JSON file.
|
||||
|
||||
To see an example of it, run:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd examples
|
||||
$ python path_requests_run.py meshTopologyExampleV2.xls meshTopologyExampleV2_services.json eqpt_config.json -o output_file.json
|
||||
|
||||
This program requires a list of connections to be estimated and the equipment
|
||||
library. The program computes performances for the list of services (accepts
|
||||
JSON or Excel format) using the same spectrum propagation modules as
|
||||
``transmission_main_example.py``. Explanation on the Excel template is provided in
|
||||
the `Excel_userguide.rst <Excel_userguide.rst#service-sheet>`_. Template for
|
||||
the JSON format can be found here: `service-template.json
|
||||
<service-template.json>`_.
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
``gnpy`` is looking for additional contributors, especially those with experience
|
||||
planning and maintaining large-scale, real-world mesh optical networks.
|
||||
|
||||
To get involved, please contact Jan Kundrát
|
||||
<jan.kundrat@telecominfraproject.com> or Gert Grammel <ggrammel@juniper.net>.
|
||||
|
||||
``gnpy`` contributions are currently limited to members of `TIP
|
||||
<http://telecominfraproject.com>`_. Membership is free and open to all.
|
||||
|
||||
See the `Onboarding Guide
|
||||
<https://github.com/Telecominfraproject/gnpy/wiki/Onboarding-Guide>`_ for
|
||||
specific details on code contributions.
|
||||
|
||||
See `AUTHORS.rst <AUTHORS.rst>`_ for past and present contributors.
|
||||
|
||||
Project Background
|
||||
------------------
|
||||
|
||||
Data Centers are built upon interchangeable, highly standardized node and
|
||||
network architectures rather than a sum of isolated solutions. This also
|
||||
translates to optical networking. It leads to a push in enabling multi-vendor
|
||||
optical network by disaggregating HW and SW functions and focusing on
|
||||
interoperability. In this paradigm, the burden of responsibility for ensuring
|
||||
the performance of such disaggregated open optical systems falls on the
|
||||
operators. Consequently, operators and vendors are collaborating in defining
|
||||
control models that can be readily used by off-the-shelf controllers. However,
|
||||
node and network models are only part of the answer. To take reasonable
|
||||
decisions, controllers need to incorporate logic to simulate and assess optical
|
||||
performance. Hence, a vendor-independent optical quality estimator is required.
|
||||
Given its vendor-agnostic nature, such an estimator needs to be driven by a
|
||||
consortium of operators, system and component suppliers.
|
||||
|
||||
Founded in February 2016, the Telecom Infra Project (TIP) is an
|
||||
engineering-focused initiative which is operator driven, but features
|
||||
collaboration across operators, suppliers, developers, integrators, and
|
||||
startups with the goal of disaggregating the traditional network deployment
|
||||
approach. The group’s ultimate goal is to help provide better connectivity for
|
||||
communities all over the world as more people come on-line and demand more
|
||||
bandwidth- intensive experiences like video, virtual reality and augmented
|
||||
reality.
|
||||
|
||||
Within TIP, the Open Optical Packet Transport (OOPT) project group is chartered
|
||||
with unbundling monolithic packet-optical network technologies in order to
|
||||
unlock innovation and support new, more flexible connectivity paradigms.
|
||||
|
||||
The key to unbundling is the ability to accurately plan and predict the
|
||||
performance of optical line systems based on an accurate simulation of optical
|
||||
parameters. Under that OOPT umbrella, the Physical Simulation Environment (PSE)
|
||||
working group set out to disrupt the planning landscape by providing an open
|
||||
source simulation model which can be used freely across multiple vendor
|
||||
implementations.
|
||||
|
||||
.. |docs| image:: https://readthedocs.org/projects/gnpy/badge/?version=develop
|
||||
:target: http://gnpy.readthedocs.io/en/develop/?badge=develop
|
||||
:alt: Documentation Status
|
||||
:scale: 100%
|
||||
|
||||
.. |build| image:: https://travis-ci.com/Telecominfraproject/oopt-gnpy.svg?branch=develop
|
||||
:target: https://travis-ci.com/Telecominfraproject/oopt-gnpy
|
||||
:alt: Build Status
|
||||
:scale: 100%
|
||||
|
||||
.. |doi| image:: https://zenodo.org/badge/96894149.svg
|
||||
:target: https://zenodo.org/badge/latestdoi/96894149
|
||||
:alt: DOI
|
||||
:scale: 100%
|
||||
|
||||
TIP OOPT/PSE & PSE WG Charter
|
||||
-----------------------------
|
||||
|
||||
We believe that openly sharing ideas, specifications, and other intellectual
|
||||
property is the key to maximizing innovation and reducing complexity
|
||||
|
||||
TIP OOPT/PSE's goal is to build an end-to-end simulation environment which
|
||||
defines the network models of the optical device transfer functions and their
|
||||
parameters. This environment will provide validation of the optical
|
||||
performance requirements for the TIP OLS building blocks.
|
||||
|
||||
- The model may be approximate or complete depending on the network complexity.
|
||||
Each model shall be validated against the proposed network scenario.
|
||||
- The environment must be able to process network models from multiple vendors,
|
||||
and also allow users to pick any implementation in an open source framework.
|
||||
- The PSE will influence and benefit from the innovation of the DTC, API, and
|
||||
OLS working groups.
|
||||
- The PSE represents a step along the journey towards multi-layer optimization.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
``gnpy`` is distributed under a standard BSD 3-Clause License.
|
||||
|
||||
See `LICENSE <LICENSE>`__ for more details.
|
||||
|
||||
3
docs/.gitignore
vendored
3
docs/.gitignore
vendored
@@ -1,3 +0,0 @@
|
||||
/gnpy.rst
|
||||
/gnpy.*.rst
|
||||
/modules.rst
|
||||
179
docs/Makefile
179
docs/Makefile
@@ -1,177 +1,20 @@
|
||||
# Makefile for Sphinx documentation
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
SPHINXBUILD = python -msphinx
|
||||
SPHINXPROJ = GNpy
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
# User-friendly check for sphinx-build
|
||||
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
|
||||
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
|
||||
endif
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " xml to make Docutils-native XML files"
|
||||
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
.PHONY: help Makefile
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/gnpy.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/gnpy.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/gnpy"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/gnpy"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
latexpdfja:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through platex and dvipdfmx..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
||||
xml:
|
||||
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
|
||||
@echo
|
||||
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
|
||||
|
||||
pseudoxml:
|
||||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
||||
@echo
|
||||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
@@ -1 +0,0 @@
|
||||
.. include:: ../AUTHORS.rst
|
||||
1850
docs/biblio.bib
Normal file
1850
docs/biblio.bib
Normal file
File diff suppressed because it is too large
Load Diff
271
docs/conf.py
Executable file → Normal file
271
docs/conf.py
Executable file → Normal file
@@ -1,8 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# gnpy documentation build configuration file, created by
|
||||
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
|
||||
# sphinx-quickstart on Mon Dec 18 14:41:01 2017.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
@@ -13,263 +13,164 @@
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.abspath('../'))
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another
|
||||
# directory, add these directories to sys.path here. If the directory is
|
||||
# relative to the documentation root, use os.path.abspath to make it
|
||||
# absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# Get the project root dir, which is the parent dir of this
|
||||
cwd = os.getcwd()
|
||||
project_root = os.path.dirname(cwd)
|
||||
|
||||
# Insert the project root dir as the first element in the PYTHONPATH.
|
||||
# This lets us ensure that the source package is imported, and that its
|
||||
# version is used.
|
||||
sys.path.insert(0, project_root)
|
||||
|
||||
import gnpy
|
||||
|
||||
# -- General configuration ---------------------------------------------
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
#
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = ['sphinx.ext.autodoc',
|
||||
'sphinx.ext.mathjax',
|
||||
'sphinx.ext.githubpages','sphinxcontrib.bibtex']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
source_suffix = ['.rst', '.md']
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'gnpy'
|
||||
copyright = u"2017, <TBD>"
|
||||
project = 'gnpy'
|
||||
copyright = '2018, Telecom InfraProject - OOPT PSE Group'
|
||||
author = 'Telecom InfraProject - OOPT PSE Group'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement
|
||||
# for |version| and |release|, also used in various other places throughout
|
||||
# the built documents.
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = gnpy.__version__
|
||||
version = '0.1'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = gnpy.__version__
|
||||
release = '0.1'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to
|
||||
# some non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built
|
||||
# documents.
|
||||
#keep_warnings = False
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
#
|
||||
on_rtd = os.environ.get('READTHEDOCS') == 'True'
|
||||
if on_rtd:
|
||||
html_theme = 'default'
|
||||
else:
|
||||
html_theme = 'alabaster'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a
|
||||
# theme further. For a list of options available for each theme, see the
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
#
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as
|
||||
# html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the
|
||||
# top of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon
|
||||
# of the docs. This file should be a Windows icon file (.ico) being
|
||||
# 16x16 or 32x32 pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets)
|
||||
# here, relative to this directory. They are copied after the builtin
|
||||
# static files, so a file named "default.css" will overwrite the builtin
|
||||
# "default.css".
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page
|
||||
# bottom, using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names
|
||||
# Custom sidebar templates, must be a dictionary that maps document names
|
||||
# to template names.
|
||||
#html_additional_pages = {}
|
||||
#
|
||||
# This is required for the alabaster theme
|
||||
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
|
||||
html_sidebars = {
|
||||
'**': [
|
||||
'about.html',
|
||||
'navigation.html',
|
||||
'relations.html', # needs 'show_related': True theme option to display
|
||||
'searchbox.html',
|
||||
'donate.html',
|
||||
]
|
||||
}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer.
|
||||
# Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer.
|
||||
# Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages
|
||||
# will contain a <link> tag referring to it. The value of this option
|
||||
# must be the base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
# -- Options for HTMLHelp output ------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'gnpydoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ------------------------------------------
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
#
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
#
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
#
|
||||
# 'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#
|
||||
# 'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
('index', 'gnpy.tex',
|
||||
u'gnpy Documentation',
|
||||
u'<TBD>', 'manual'),
|
||||
(master_doc, 'gnpy.tex', 'gnpy Documentation',
|
||||
'Telecom InfraProject - OOPT PSE Group', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at
|
||||
# the top of the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings
|
||||
# are parts, not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ------------------------------------
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'gnpy',
|
||||
u'gnpy Documentation',
|
||||
[u'<TBD>'], 1)
|
||||
(master_doc, 'gnpy', 'gnpy Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output ----------------------------------------
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'gnpy',
|
||||
u'gnpy Documentation',
|
||||
u'<TBD>',
|
||||
'gnpy',
|
||||
'One line description of project.',
|
||||
(master_doc, 'gnpy', 'gnpy Documentation',
|
||||
author, 'gnpy', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
autodoc_default_flags = ['members', 'undoc-members', 'private-members', 'show-inheritance']
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
.. include:: ../CONTRIBUTING.rst
|
||||
@@ -1 +0,0 @@
|
||||
.. include:: ../HISTORY.rst
|
||||
BIN
docs/images/GNPy-banner.png
Normal file
BIN
docs/images/GNPy-banner.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 518 KiB |
100
docs/index.rst
100
docs/index.rst
@@ -1,18 +1,33 @@
|
||||
Welcome to gnpy's documentation!
|
||||
======================================
|
||||
.. gnpy documentation master file, created by
|
||||
sphinx-quickstart on Mon Dec 18 14:41:01 2017.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Contents:
|
||||
Welcome to gnpy's documentation!
|
||||
================================
|
||||
|
||||
**gnpy is an open-source, community-developed library for building route planning
|
||||
and optimization tools in real-world mesh optical networks.**
|
||||
|
||||
`gnpy <http://github.com/telecominfraproject/gnpy>`_ is:
|
||||
|
||||
- a sponsored project of the `OOPT/PSE <http://telecominfraproject.com/project-groups-2/backhaul-projects/open-optical-packet-transport/>`_ working group of the `Telecom Infra Project <http://telecominfraproject.com>`_.
|
||||
- fully community-driven, fully open source library
|
||||
- driven by a consortium of operators, vendors, and academic researchers
|
||||
- intended for rapid development of production-grade route planning tools
|
||||
- easily extensible to include custom network elements
|
||||
- performant to the scale of real-world mesh optical networks
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
The following pages are meant to describe specific implementation details and
|
||||
modeling assumptions behind gnpy.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
readme
|
||||
installation
|
||||
usage
|
||||
modules
|
||||
contributing
|
||||
authors
|
||||
history
|
||||
model
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
@@ -20,3 +35,68 @@ Indices and tables
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
||||
Contributors in alphabetical order
|
||||
==================================
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Name | Surname | Affiliation | Contact |
|
||||
+==========+============+=======================+======================================+
|
||||
| Alessio | Ferrari | Politecnico di Torino | alessio.ferrari@polito.it |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Anders | Lindgren | Telia Company | Anders.X.Lindgren@teliacompany.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Andrea | d'Amico | Politecnico di Torino | andrea.damico@polito.it |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Brian | Taylor | Facebook | briantaylor@fb.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| David | Boertjes | Ciena | dboertje@ciena.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Diego | Landa | Facebook | dlanda@fb.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Esther | Le Rouzic | Orange | esther.lerouzic@orange.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Gabriele | Galimberti | Cisco | ggalimbe@cisco.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Gert | Grammel | Juniper Networks | ggrammel@juniper.net |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Gilad | Goldfarb | Facebook | giladg@fb.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| James | Powell | Telecom Infra Project | james.powell@telecominfraproject.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Jan | Kundrát | Telecom Infra Project | jan.kundrat@telecominfraproject.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Jeanluc | Augé | Orange | jeanluc.auge@orange.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Jonas | Mårtensson | RISE Research Sweden | jonas.martensson@ri.se |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Mattia | Cantono | Politecnico di Torino | mattia.cantono@polito.it |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Miguel | Garrich | University Catalunya | miquel.garrich@upct.es |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Raj | Nagarajan | Lumentum | raj.nagarajan@lumentum.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Roberts | Miculens | Lattelecom | roberts.miculens@lattelecom.lv |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Shengxiang | Zhu | University of Arizona | szhu@email.arizona.edu |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Stefan | Melin | Telia Company | Stefan.Melin@teliacompany.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Vittorio | Curri | Politecnico di Torino | vittorio.curri@polito.it |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
| Xufeng | Liu | Jabil | xufeng_liu@jabil.com |
|
||||
+----------+------------+-----------------------+--------------------------------------+
|
||||
|
||||
--------------
|
||||
|
||||
- Goal is to build an end-to-end simulation environment which defines the
|
||||
network models of the optical device transfer functions and their parameters.
|
||||
This environment will provide validation of the optical performance
|
||||
requirements for the TIP OLS building blocks.
|
||||
- The model may be approximate or complete depending on the network complexity.
|
||||
Each model shall be validated against the proposed network scenario.
|
||||
- The environment must be able to process network models from multiple vendors,
|
||||
and also allow users to pick any implementation in an open source framework.
|
||||
- The PSE will influence and benefit from the innovation of the DTC, API, and
|
||||
OLS working groups.
|
||||
- The PSE represents a step along the journey towards multi-layer optimization.
|
||||
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
.. highlight:: shell
|
||||
|
||||
============
|
||||
Installation
|
||||
============
|
||||
|
||||
|
||||
Stable release
|
||||
--------------
|
||||
|
||||
To install gnpy, run this command in your terminal:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ pip install gnpy
|
||||
|
||||
This is the preferred method to install gnpy, as it will always install the most recent stable release.
|
||||
|
||||
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
|
||||
you through the process.
|
||||
|
||||
.. _pip: https://pip.pypa.io
|
||||
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
|
||||
|
||||
|
||||
From sources
|
||||
------------
|
||||
|
||||
The sources for gnpy can be downloaded from the `Github repo`_.
|
||||
|
||||
You can either clone the public repository:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git clone git://github.com/<TBD>/gnpy
|
||||
|
||||
Or download the `tarball`_:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ curl -OL https://github.com/<TBD>/gnpy/tarball/master
|
||||
|
||||
Once you have a copy of the source, you can install it with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python setup.py install
|
||||
|
||||
|
||||
.. _Github repo: https://github.com/<TBD>/gnpy
|
||||
.. _tarball: https://github.com/<TBD>/gnpy/tarball/master
|
||||
278
docs/make.bat
278
docs/make.bat
@@ -1,242 +1,36 @@
|
||||
@ECHO OFF
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set BUILDDIR=_build
|
||||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
|
||||
set I18NSPHINXOPTS=%SPHINXOPTS% .
|
||||
if NOT "%PAPER%" == "" (
|
||||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
||||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
|
||||
)
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
if "%1" == "help" (
|
||||
:help
|
||||
echo.Please use `make ^<target^>` where ^<target^> is one of
|
||||
echo. html to make standalone HTML files
|
||||
echo. dirhtml to make HTML files named index.html in directories
|
||||
echo. singlehtml to make a single large HTML file
|
||||
echo. pickle to make pickle files
|
||||
echo. json to make JSON files
|
||||
echo. htmlhelp to make HTML files and a HTML help project
|
||||
echo. qthelp to make HTML files and a qthelp project
|
||||
echo. devhelp to make HTML files and a Devhelp project
|
||||
echo. epub to make an epub
|
||||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
||||
echo. text to make text files
|
||||
echo. man to make manual pages
|
||||
echo. texinfo to make Texinfo files
|
||||
echo. gettext to make PO message catalogs
|
||||
echo. changes to make an overview over all changed/added/deprecated items
|
||||
echo. xml to make Docutils-native XML files
|
||||
echo. pseudoxml to make pseudoxml-XML files for display purposes
|
||||
echo. linkcheck to check all external links for integrity
|
||||
echo. doctest to run all doctests embedded in the documentation if enabled
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "clean" (
|
||||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
||||
del /q /s %BUILDDIR%\*
|
||||
goto end
|
||||
)
|
||||
|
||||
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.http://sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
if "%1" == "html" (
|
||||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "dirhtml" (
|
||||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "singlehtml" (
|
||||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pickle" (
|
||||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the pickle files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "json" (
|
||||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the JSON files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "htmlhelp" (
|
||||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run HTML Help Workshop with the ^
|
||||
.hhp project file in %BUILDDIR%/htmlhelp.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "qthelp" (
|
||||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
||||
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
||||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\gnpy.qhcp
|
||||
echo.To view the help file:
|
||||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\gnpy.ghc
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "devhelp" (
|
||||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "epub" (
|
||||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latex" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdf" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf
|
||||
cd %BUILDDIR%/..
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdfja" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf-ja
|
||||
cd %BUILDDIR%/..
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "text" (
|
||||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The text files are in %BUILDDIR%/text.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "man" (
|
||||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "texinfo" (
|
||||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "gettext" (
|
||||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "changes" (
|
||||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.The overview file is in %BUILDDIR%/changes.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "linkcheck" (
|
||||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Link check complete; look for any errors in the above output ^
|
||||
or in %BUILDDIR%/linkcheck/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "doctest" (
|
||||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of doctests in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/doctest/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "xml" (
|
||||
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The XML files are in %BUILDDIR%/xml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pseudoxml" (
|
||||
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
|
||||
goto end
|
||||
)
|
||||
|
||||
:end
|
||||
@ECHO OFF
|
||||
|
||||
pushd %~dp0
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=python -msphinx
|
||||
)
|
||||
set SOURCEDIR=.
|
||||
set BUILDDIR=_build
|
||||
set SPHINXPROJ=GNpy
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
%SPHINXBUILD% >NUL 2>NUL
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The Sphinx module was not found. Make sure you have Sphinx installed,
|
||||
echo.then set the SPHINXBUILD environment variable to point to the full
|
||||
echo.path of the 'sphinx-build' executable. Alternatively you may add the
|
||||
echo.Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.http://sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
|
||||
goto end
|
||||
|
||||
:help
|
||||
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
|
||||
|
||||
:end
|
||||
popd
|
||||
|
||||
146
docs/model.rst
Normal file
146
docs/model.rst
Normal file
@@ -0,0 +1,146 @@
|
||||
The QoT estimation in the PSE framework of TIP-OOPT
|
||||
=======================================================
|
||||
|
||||
QoT-E including ASE noise and NLI accumulation
|
||||
----------------------------------------------
|
||||
|
||||
The operations of PSE simulative framework are based on the capability to
|
||||
estimate the QoT of one or more channels operating lightpaths over a given
|
||||
network route. For backbone transport networks, we can suppose that
|
||||
transceivers are operating polarization-division-multiplexed multilevel
|
||||
modulation formats with DSP-based coherent receivers, including equalization.
|
||||
For the optical links, we focus on state-of-the-art amplified and uncompensated
|
||||
fiber links, connecting network nodes including ROADMs, where add and drop
|
||||
operations on data traffic are performed. In such a transmission scenario, it
|
||||
is well accepted
|
||||
:cite:`vacondio_nonlinear_2012,bononi_modeling_2012,carena_modeling_2012,mecozzi_nonlinear_2012,secondini_analytical_2012,johannisson_perturbation_2013,dar_properties_2013,serena_alternative_2013,secondini_achievable_2013,poggiolini_gn-model_2014,dar_accumulation_2014,poggiolini_analytical_2011,savory_approximations_2013,bononi_single-_2013,johannisson_modeling_2014`
|
||||
to assume that transmission performances are limited by the amplified
|
||||
spontaneous emission (ASE) noise generated by optical amplifiers and and
|
||||
by nonlinear propagation effects: accumulation of a Gaussian disturbance
|
||||
defined as nonlinear interference (NLI) and generation of phase noise.
|
||||
State-of-the-art DSP in commercial transceivers are typically able to
|
||||
compensate for most of the phase noise through carrier-phase estimator
|
||||
(CPE) algorithms, for modulation formats with cardinality up to 16, per
|
||||
polarization state
|
||||
:cite:`poggiolini_recent_2017,schmidt_experimental_2015,fehenberger_experimental_2016`.
|
||||
So, for backbone networks covering medium-to-wide geographical areas, we
|
||||
can suppose that propagation is limited by the accumulation of two
|
||||
Gaussian disturbances: the ASE noise and the NLI. Additional impairments
|
||||
such as filtering effects introduced by ROADMs can be considered as
|
||||
additional equivalent power penalties depending on the ratio between the
|
||||
channel bandwidth and the ROADMs filters and the number of traversed
|
||||
ROADMs (hops) of the route under analysis. Modeling the two major
|
||||
sources of impairments as Gaussian disturbances, and being the receivers
|
||||
*coherent*, the unique QoT parameter determining the bit error rate
|
||||
(BER) for the considered transmission scenario is the generalized
|
||||
signal-to-noise ratio (SNR) defined as
|
||||
|
||||
.. math::
|
||||
|
||||
{\text{SNR}}= L_F \frac{P_{\text{ch}}}{P_{\text{ASE}}+P_{\text{NLI}}} = L_F \left(\frac{1}{{\text{SNR}}_{\text{LIN}}}+\frac{1}{{\text{SNR}}_{\text{NL}}}\right)^{-1}
|
||||
|
||||
|
||||
where :math:`P_{\text{ch}}` is the channel power,
|
||||
:math:`P_{\text{ASE}}` and :math:`P_{\text{NLI}}` are the power levels of the disturbances
|
||||
in the channel bandwidth for ASE noise and NLI, respectively.
|
||||
:math:`L_F` is a parameter assuming values smaller or equal than one
|
||||
that summarizes the equivalent power penalty loss such as
|
||||
filtering effects. Note that for state-of-the art equipment, filtering
|
||||
effects can be typically neglected over routes with few hops
|
||||
:cite:`rahman_mitigation_2014,foggi_overcoming_2015`.
|
||||
|
||||
To properly estimate :math:`P_{\text{ch}}` and :math:`P_{\text{ASE}}`
|
||||
the transmitted power at the beginning of the considered route must be
|
||||
known, and losses and amplifiers gain and noise figure, including their
|
||||
variation with frequency, must be characterized. So, the evaluation of
|
||||
:math:`{\text{SNR}}_{\text{LIN}}` *just* requires an accurate
|
||||
knowledge of equipment, which is not a trivial aspect, but it is not
|
||||
related to physical-model issues. For the evaluation of the NLI, several
|
||||
models have been proposed and validated in the technical literature
|
||||
:cite:`vacondio_nonlinear_2012,bononi_modeling_2012,carena_modeling_2012,mecozzi_nonlinear_2012,secondini_analytical_2012,johannisson_perturbation_2013,dar_properties_2013,serena_alternative_2013,secondini_achievable_2013,poggiolini_gn-model_2014,dar_accumulation_2014,poggiolini_analytical_2011,savory_approximations_2013,bononi_single-_2013,johannisson_modeling_2014`.
|
||||
The decision about which model to test within the PSE activities was
|
||||
driven by requirements of the entire PSE framework:
|
||||
|
||||
i. the model must be *local*, i.e., related individually to each network
|
||||
element (i.e. fiber span) generating NLI, independently of preceding and
|
||||
subsequent elements; and ii. the related computational time must be compatible
|
||||
with interactive operations.
|
||||
|
||||
So, the choice fell on the Gaussian Noise
|
||||
(GN) model with incoherent accumulation of NLI over fiber spans
|
||||
:cite:`poggiolini_gn-model_2014`. We implemented both the
|
||||
exact GN-model evaluation of NLI based on a double integral (Eq. (11) of
|
||||
:cite:`poggiolini_gn-model_2014`) and its analytical
|
||||
approximation (Eq. (120-121) of
|
||||
:cite:`poggiolini_analytical_2011`). We performed several
|
||||
validation analyses comparing results of the two implementations with
|
||||
split-step simulations over wide bandwidths
|
||||
:cite:`pilori_ffss_2017`, and results clearly showed that
|
||||
for fiber types with chromatic dispersion roughly larger than 4
|
||||
ps/nm/km, the analytical approximation ensures an excellent accuracy
|
||||
with a computational time compatible with real-time operations.
|
||||
|
||||
The Gaussian Noise Model to evaluate the NLI
|
||||
--------------------------------------------
|
||||
|
||||
As previously stated, fiber propagation of multilevel modulation formats
|
||||
relying on the polarization-division-multiplexing generates impairments that
|
||||
can be summarized as a disturbance called nonlinear interference (NLI), when
|
||||
exploiting a DSP-based coherent receiver, as in all state-of-the-art equipment.
|
||||
From a practical point of view, the NLI can be modeled as an additive Gaussian
|
||||
random process added by each fiber span, and whose strength depends on the cube
|
||||
of the input power spectral density and on the fiber-span parameters.
|
||||
|
||||
Since the introduction in the market in 2007 of the first transponder based on
|
||||
such a transmission technique, the scientific community has intensively worked
|
||||
to define the propagation behavior of such a trasnmission technique. First,
|
||||
the role of in-line chromatic dispersion compensation has been investigated,
|
||||
deducing that besides being not essential, it is indeed detrimental for
|
||||
performances :cite:`curri_dispersion_2008`. Then, it has been observed that
|
||||
the fiber propagation impairments are practically summarized by the sole NLI,
|
||||
being all the other phenomena compensated for by the blind equalizer
|
||||
implemented in the receiver DSP :cite:`carena_statistical_2010`. Once these
|
||||
assessments have been accepted by the community, several prestigious research
|
||||
groups have started to work on deriving analytical models able to estimating
|
||||
the NLI accumulation, and consequentially the generalized SNR that sets the
|
||||
BER, according to the transponder BER vs. SNR performance. Many models
|
||||
delivering different levels of accuracy have been developed and validated. As
|
||||
previously clarified, for the purposes of the PSE framework, the GN-model with
|
||||
incoherent accumulation of NLI over fiber spans has been selected as adequate.
|
||||
The reason for such a choice is first such a model being a "local" model, so
|
||||
related to each fiber spans, independently of the preceding and succeeding
|
||||
network elements. The other model characteristic driving the choice is the
|
||||
availability of a closed form for the model, so permitting a real-time
|
||||
evaluation, as required by the PSE framework. For a detailed derivation of the
|
||||
model, please refer to :cite:`poggiolini_analytical_2011`, while a qualitative
|
||||
description can be summarized as in the following. The GN-model assumes that
|
||||
the channel comb propagating in the fiber is well approximated by unpolarized
|
||||
spectrally shaped Gaussian noise. In such a scenario, supposing to rely - as in
|
||||
state-of-the-art equipment - on a receiver entirely compensating for linear
|
||||
propagation effects, propagation in the fiber only excites the four-wave mixing
|
||||
(FWM) process among the continuity of the tones occupying the bandwidth. Such a
|
||||
FWM generates an unpolarized complex Gaussian disturbance in each spectral slot
|
||||
that can be easily evaluated extending the FWM theory from a set of discrete
|
||||
tones - the standard FWM theory introduced back in the 90s by Inoue
|
||||
:cite:`Innoue-FWM`- to a continuity of tones, possibly spectrally shaped.
|
||||
Signals propagating in the fiber are not equivalent to Gaussian noise, but
|
||||
thanks to the absence of in-line compensation for choromatic dispersion, the
|
||||
become so, over short distances. So, the Gaussian noise model with incoherent
|
||||
accumulation of NLI has estensively proved to be a quick yet accurate and
|
||||
conservative tool to estimate propagation impairments of fiber propagation.
|
||||
Note that the GN-model has not been derived with the aim of an *exact*
|
||||
performance estimation, but to pursue a conservative performance prediction.
|
||||
So, considering these characteristics, and the fact that the NLI is always a
|
||||
secondary effect with respect to the ASE noise accumulation, and - most
|
||||
importantly - that typically linear propagation parameters (losses, gains and
|
||||
noise figures) are known within a variation range, a QoT estimator based on the
|
||||
GN model is adequate to deliver performance predictions in terms of a
|
||||
reasonable SNR range, rather than an exact value. As final remark, it must be
|
||||
clarified that the GN-model is adequate to be used when relying on a relatively
|
||||
narrow bandwidth up to few THz. When exceeding such a bandwidth occupation, the
|
||||
GN-model must be generalized introducing the interaction with the Stimulated
|
||||
Raman Scattering in order to give a proper estimation for all channels
|
||||
:cite:`cantono2018modeling`. This will be the main upgrade required within the
|
||||
PSE framework.
|
||||
|
||||
.. bibliography:: biblio.bib
|
||||
@@ -1 +0,0 @@
|
||||
.. include:: ../README.rst
|
||||
94
docs/source/gnpy.core.rst
Normal file
94
docs/source/gnpy.core.rst
Normal file
@@ -0,0 +1,94 @@
|
||||
gnpy\.core package
|
||||
==================
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
gnpy\.core\.ansi_escapes module
|
||||
-------------------------------
|
||||
|
||||
.. automodule:: gnpy.core.ansi_escapes
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.convert module
|
||||
--------------------------
|
||||
|
||||
.. automodule:: gnpy.core.convert
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.elements module
|
||||
---------------------------
|
||||
|
||||
.. automodule:: gnpy.core.elements
|
||||
|
||||
gnpy\.core\.equipment module
|
||||
----------------------------
|
||||
|
||||
.. automodule:: gnpy.core.equipment
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.exceptions module
|
||||
-----------------------------
|
||||
|
||||
.. automodule:: gnpy.core.exceptions
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.execute module
|
||||
--------------------------
|
||||
|
||||
.. automodule:: gnpy.core.execute
|
||||
|
||||
gnpy\.core\.info module
|
||||
-----------------------
|
||||
|
||||
.. automodule:: gnpy.core.info
|
||||
|
||||
gnpy\.core\.network module
|
||||
--------------------------
|
||||
|
||||
.. automodule:: gnpy.core.network
|
||||
|
||||
gnpy\.core\.node module
|
||||
-----------------------
|
||||
|
||||
.. automodule:: gnpy.core.node
|
||||
|
||||
gnpy\.core\.request module
|
||||
--------------------------
|
||||
|
||||
.. automodule:: gnpy.core.request
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.service_sheet module
|
||||
--------------------------------
|
||||
|
||||
.. automodule:: gnpy.core.service_sheet
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
gnpy\.core\.units module
|
||||
------------------------
|
||||
|
||||
.. automodule:: gnpy.core.units
|
||||
|
||||
gnpy\.core\.utils module
|
||||
------------------------
|
||||
|
||||
.. automodule:: gnpy.core.utils
|
||||
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: gnpy.core
|
||||
14
docs/source/gnpy.rst
Normal file
14
docs/source/gnpy.rst
Normal file
@@ -0,0 +1,14 @@
|
||||
gnpy package
|
||||
============
|
||||
|
||||
Subpackages
|
||||
-----------
|
||||
|
||||
.. toctree::
|
||||
|
||||
gnpy.core
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: gnpy
|
||||
7
docs/source/modules.rst
Normal file
7
docs/source/modules.rst
Normal file
@@ -0,0 +1,7 @@
|
||||
gnpy
|
||||
====
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
gnpy
|
||||
@@ -1,7 +0,0 @@
|
||||
=====
|
||||
Usage
|
||||
=====
|
||||
|
||||
To use gnpy in a project::
|
||||
|
||||
import gnpy
|
||||
124
examples/2019-demo-equipment.json
Normal file
124
examples/2019-demo-equipment.json
Normal file
@@ -0,0 +1,124 @@
|
||||
{ "Edfa":[
|
||||
|
||||
{
|
||||
"type_variety": "fixed27",
|
||||
"type_def": "fixed_gain",
|
||||
"gain_flatmax": 27,
|
||||
"gain_min": 27,
|
||||
"p_max": 21,
|
||||
"nf0": 5.5,
|
||||
"allowed_for_design": false
|
||||
},
|
||||
|
||||
{
|
||||
"type_variety": "fixed22",
|
||||
"type_def": "fixed_gain",
|
||||
"gain_flatmax": 22,
|
||||
"gain_min": 22,
|
||||
"p_max": 21,
|
||||
"nf0": 5.5,
|
||||
"allowed_for_design": false
|
||||
}
|
||||
],
|
||||
"Fiber":[{
|
||||
"type_variety": "SSMF",
|
||||
"dispersion": 1.67e-05,
|
||||
"gamma": 0.00127
|
||||
},
|
||||
{
|
||||
"type_variety": "NZDF",
|
||||
"dispersion": 0.5e-05,
|
||||
"gamma": 0.00146
|
||||
},
|
||||
{
|
||||
"type_variety": "LOF",
|
||||
"dispersion": 2.2e-05,
|
||||
"gamma": 0.000843
|
||||
}
|
||||
],
|
||||
"Span":[{
|
||||
"power_mode": false,
|
||||
"delta_power_range_db": [-2,3,0.5],
|
||||
"max_fiber_lineic_loss_for_raman": 0.25,
|
||||
"target_extended_gain": 2.5,
|
||||
"max_length": 150,
|
||||
"length_units": "km",
|
||||
"max_loss": 28,
|
||||
"padding": 10,
|
||||
"EOL": 0,
|
||||
"con_in": 0,
|
||||
"con_out": 0
|
||||
}
|
||||
],
|
||||
"Roadm":[{
|
||||
"target_pch_out_db": -25,
|
||||
"add_drop_osnr": 30.00,
|
||||
"restrictions": {
|
||||
"preamp_variety_list":[],
|
||||
"booster_variety_list":[]
|
||||
}
|
||||
}],
|
||||
"SI":[{
|
||||
"f_min": 191.6e12,
|
||||
"baud_rate": 32e9,
|
||||
"f_max":195.1e12,
|
||||
"spacing": 50e9,
|
||||
"power_dbm": 0,
|
||||
"power_range_db": [0,0,1],
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"sys_margins": 2
|
||||
}],
|
||||
"Transceiver":[
|
||||
{
|
||||
"type_variety": "Cassini",
|
||||
"frequency":{
|
||||
"min": 191.35e12,
|
||||
"max": 196.1e12
|
||||
},
|
||||
"mode":[
|
||||
{
|
||||
|
||||
"format": "dp-qpsk",
|
||||
"baud_rate": 32e9,
|
||||
"OSNR": 11,
|
||||
"bit_rate": 100e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 37.5e9,
|
||||
"cost":1
|
||||
},
|
||||
{
|
||||
"format": "16-qam",
|
||||
"baud_rate": 66e9,
|
||||
"OSNR": 15,
|
||||
"bit_rate": 200e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 75e9,
|
||||
"cost":1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type_variety": "Voyager",
|
||||
"frequency":{
|
||||
"min": 191.35e12,
|
||||
"max": 196.1e12
|
||||
},
|
||||
"mode":[
|
||||
{
|
||||
"format": "mode 1",
|
||||
"baud_rate": 32e9,
|
||||
"OSNR": 12,
|
||||
"bit_rate": 100e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 37.5e9,
|
||||
"cost":1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
}
|
||||
67
examples/2019-demo-services.json
Normal file
67
examples/2019-demo-services.json
Normal file
@@ -0,0 +1,67 @@
|
||||
{
|
||||
"path-request": [
|
||||
{
|
||||
"request-id": "first",
|
||||
"source": "netconf:10.0.254.93:830",
|
||||
"destination": "netconf:10.0.254.94:830",
|
||||
"src-tp-id": "trx-Amsterdam",
|
||||
"dst-tp-id": "trx-Bremen",
|
||||
"bidirectional": true,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Cassini",
|
||||
"trx_mode": null,
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": null,
|
||||
"path_bandwidth": 100000000000.0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "second",
|
||||
"source": "netconf:10.0.254.93:830",
|
||||
"destination": "netconf:10.0.254.94:830",
|
||||
"src-tp-id": "trx-Amsterdam",
|
||||
"dst-tp-id": "trx-Bremen",
|
||||
"bidirectional": true,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Cassini",
|
||||
"trx_mode": null,
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": null,
|
||||
"path_bandwidth": 100000000000.0
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"synchronization": [
|
||||
{
|
||||
"synchronization-id": "some redundancy please",
|
||||
"svec": {
|
||||
"relaxable": "false",
|
||||
"disjointness": "node link",
|
||||
"request-id-number": [
|
||||
"first",
|
||||
"second"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
1263
examples/2019-demo-topology.json
Normal file
1263
examples/2019-demo-topology.json
Normal file
File diff suppressed because it is too large
Load Diff
179
examples/2019-generate-tip-demo.py
Normal file
179
examples/2019-generate-tip-demo.py
Normal file
@@ -0,0 +1,179 @@
|
||||
# How many nodes in the ring topology? Up to eight is supported, then I ran out of cities..
|
||||
HOW_MANY = 3
|
||||
|
||||
# city names
|
||||
ALL_CITIES = [
|
||||
'Amsterdam',
|
||||
'Bremen',
|
||||
'Cologne',
|
||||
'Dueseldorf',
|
||||
'Eindhoven',
|
||||
'Frankfurt',
|
||||
'Ghent',
|
||||
'Hague',
|
||||
]
|
||||
# end of configurable parameters
|
||||
|
||||
|
||||
J = {
|
||||
"elements": [],
|
||||
"connections": [],
|
||||
}
|
||||
|
||||
def unidir_join(a, b):
|
||||
global J
|
||||
J["connections"].append(
|
||||
{"from_node": a, "to_node": b}
|
||||
)
|
||||
|
||||
def mk_edfa(name, gain, voa=0.0):
|
||||
global J
|
||||
J["elements"].append(
|
||||
{"uid": name, "type": "Edfa", "type_variety": f"fixed{gain}", "operational": {"gain_target": gain, "out_voa": voa}}
|
||||
)
|
||||
|
||||
def add_att(a, b, att):
|
||||
global J
|
||||
if att > 0:
|
||||
uid = f"att-({a})-({b})"
|
||||
else:
|
||||
uid = f"splice-({a})-({b})"
|
||||
J["elements"].append(
|
||||
{"uid": uid, "type": "Fused", "params": {"loss": att}},
|
||||
)
|
||||
unidir_join(a, uid)
|
||||
unidir_join(uid, b)
|
||||
return uid
|
||||
|
||||
def build_fiber(city1, city2):
|
||||
global J
|
||||
J["elements"].append(
|
||||
{
|
||||
"uid": f"fiber-{city1}-{city2}",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 50,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": 1.5,
|
||||
"con_out": 1.5,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
def unidir_patch(a, b):
|
||||
global J
|
||||
uid = f"patch-({a})-({b})"
|
||||
J["elements"].append(
|
||||
{
|
||||
"uid": uid,
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5,
|
||||
}
|
||||
}
|
||||
)
|
||||
add_att(a, uid, 0.0)
|
||||
add_att(uid, b, 0.0)
|
||||
|
||||
for CITY in (ALL_CITIES[x] for x in range(0, HOW_MANY)):
|
||||
J["elements"].append(
|
||||
{"uid": f"trx-{CITY}", "type": "Transceiver"}
|
||||
)
|
||||
target_pwr = [
|
||||
{"to_node": f"trx-{CITY}", "target_pch_out_db": -25},
|
||||
{"to_node": f"splice-(roadm-{CITY}-AD)-(patch-(roadm-{CITY}-AD)-(roadm-{CITY}-L1))", "target_pch_out_db": -12},
|
||||
{"to_node": f"splice-(roadm-{CITY}-AD)-(patch-(roadm-{CITY}-AD)-(roadm-{CITY}-L2))", "target_pch_out_db": -12},
|
||||
]
|
||||
J["elements"].append(
|
||||
{"uid": f"roadm-{CITY}-AD", "type": "Roadm", "params": {"target_pch_out_db": -2.0, "per_degree_target_pch_out_db": target_pwr}}
|
||||
)
|
||||
unidir_join(f"trx-{CITY}", f"roadm-{CITY}-AD")
|
||||
unidir_join(f"roadm-{CITY}-AD", f"trx-{CITY}")
|
||||
|
||||
for n in (1,2):
|
||||
target_pwr = [
|
||||
{"to_node": f"roadm-{CITY}-L{n}-booster", "target_pch_out_db": -23},
|
||||
{"to_node": f"splice-(roadm-{CITY}-L{n})-(patch-(roadm-{CITY}-L{n})-(roadm-{CITY}-AD))", "target_pch_out_db": -12},
|
||||
]
|
||||
for m in (1,2):
|
||||
if m == n:
|
||||
continue
|
||||
target_pwr.append(
|
||||
{"to_node": f"splice-(roadm-{CITY}-L{n})-(patch-(roadm-{CITY}-L{n})-(roadm-{CITY}-L{m}))", "target_pch_out_db": -12},
|
||||
)
|
||||
J["elements"].append(
|
||||
{"uid": f"roadm-{CITY}-L{n}", "type": "Roadm", "params": {"target_pch_out_db": -23.0, "per_degree_target_pch_out_db": target_pwr}}
|
||||
)
|
||||
mk_edfa(f"roadm-{CITY}-L{n}-booster", 22)
|
||||
mk_edfa(f"roadm-{CITY}-L{n}-preamp", 27)
|
||||
unidir_join(f"roadm-{CITY}-L{n}", f"roadm-{CITY}-L{n}-booster")
|
||||
unidir_join(f"roadm-{CITY}-L{n}-preamp", f"roadm-{CITY}-L{n}")
|
||||
|
||||
unidir_patch(f"roadm-{CITY}-AD", f"roadm-{CITY}-L{n}")
|
||||
unidir_patch(f"roadm-{CITY}-L{n}", f"roadm-{CITY}-AD")
|
||||
for m in (1,2):
|
||||
if m == n:
|
||||
continue
|
||||
#add_att(f"roadm-{CITY}-L{n}", f"roadm-{CITY}-L{m}", 22)
|
||||
unidir_patch(f"roadm-{CITY}-L{n}", f"roadm-{CITY}-L{m}")
|
||||
|
||||
for city1, city2 in ((ALL_CITIES[i], ALL_CITIES[i + 1] if i < HOW_MANY - 1 else ALL_CITIES[0]) for i in range(0, HOW_MANY)):
|
||||
build_fiber(city1, city2)
|
||||
unidir_join(f"roadm-{city1}-L1-booster", f"fiber-{city1}-{city2}")
|
||||
unidir_join(f"fiber-{city1}-{city2}", f"roadm-{city2}-L2-preamp")
|
||||
build_fiber(city2, city1)
|
||||
unidir_join(f"roadm-{city2}-L2-booster", f"fiber-{city2}-{city1}")
|
||||
unidir_join(f"fiber-{city2}-{city1}", f"roadm-{city1}-L1-preamp")
|
||||
|
||||
|
||||
for _, E in enumerate(J["elements"]):
|
||||
uid = E["uid"]
|
||||
if uid.startswith("roadm-") and (uid.endswith("-L1-booster") or uid.endswith("-L2-booster")):
|
||||
E["operational"]["out_voa"] = 12.0
|
||||
#if uid.endswith("-AD-add"):
|
||||
# E["operational"]["out_voa"] = 21
|
||||
|
||||
translate = {
|
||||
#"trx-Amsterdam": "10.0.254.93",
|
||||
#"trx-Bremen": "10.0.254.94",
|
||||
"trx-Amsterdam": "10.0.254.76",
|
||||
"trx-Bremen": "10.0.254.77",
|
||||
|
||||
# Amsterdam A/D: coherent-v9u
|
||||
"roadm-Amsterdam-AD": "10.0.254.107",
|
||||
# Bremen A/D: -spi
|
||||
"roadm-Bremen-AD": "10.0.254.225",
|
||||
|
||||
# Amsterdam -> Bremen ...QR79
|
||||
"roadm-Amsterdam-L1": "10.0.254.78",
|
||||
# Bremen -> Amsterdam ...QCP9
|
||||
"roadm-Bremen-L2": "10.0.254.102",
|
||||
|
||||
# Bremen -> Cologne ...WKP
|
||||
"roadm-Bremen-L1": "10.0.254.100",
|
||||
# Cologne -> Bremen ...QLK6
|
||||
"roadm-Cologne-L2": "10.0.254.104",
|
||||
|
||||
# Cologne -> Amsterdam ...TQQ
|
||||
"roadm-Cologne-L1": "10.0.254.99",
|
||||
# Amsterdam -> Cologne ...Q7JS
|
||||
"roadm-Amsterdam-L2": "10.0.254.79",
|
||||
|
||||
# spare Line/Degree ...QC8B
|
||||
"spare-line-degree": "10.0.254.101",
|
||||
# spare Add/Drop: ...NNN
|
||||
"spare-add-drop": "10.0.254.228",
|
||||
}
|
||||
|
||||
import json
|
||||
s = json.dumps(J, indent=2)
|
||||
for (old, new) in translate.items():
|
||||
s = s.replace(f'"{old}"', f'"netconf:{new}:830"')
|
||||
print(s)
|
||||
7631
examples/CORONET_CONUS_Topology.json
Normal file
7631
examples/CORONET_CONUS_Topology.json
Normal file
File diff suppressed because it is too large
Load Diff
10278
examples/CORONET_Global_Topology.json
Normal file
10278
examples/CORONET_Global_Topology.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
examples/CORONET_Global_Topology.xls
Normal file
BIN
examples/CORONET_Global_Topology.xls
Normal file
Binary file not shown.
160
examples/Juniper-BoosterHG.json
Normal file
160
examples/Juniper-BoosterHG.json
Normal file
@@ -0,0 +1,160 @@
|
||||
{
|
||||
"nf_fit_coeff": [
|
||||
0.0008,
|
||||
0.0272,
|
||||
-0.2249,
|
||||
6.4902
|
||||
],
|
||||
"f_min": 191.35e12,
|
||||
"f_max": 196.1e12,
|
||||
"nf_ripple": [
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0
|
||||
],
|
||||
"gain_ripple": [
|
||||
0.15017064489112,
|
||||
0.14157768006701,
|
||||
0.00223094639866,
|
||||
-0.06701528475711,
|
||||
-0.05982935510889,
|
||||
-0.01028161641541,
|
||||
0.02740682579566,
|
||||
0.02795958961474,
|
||||
0.00107516750419,
|
||||
-0.02199015912898,
|
||||
-0.00877407872698,
|
||||
0.0453465242881,
|
||||
0.1204721524288,
|
||||
0.18936662479061,
|
||||
0.23826109715241,
|
||||
0.26956762981574,
|
||||
0.27836159966498,
|
||||
0.26941687604691,
|
||||
0.23579878559464,
|
||||
0.18147717755444,
|
||||
0.1191656197655,
|
||||
0.05921587102177,
|
||||
0.01509526800668,
|
||||
-0.01053287269681,
|
||||
-0.02475397822447,
|
||||
-0.01847257118928,
|
||||
-0.00420121440538,
|
||||
0.01584903685091,
|
||||
0.0399193886097,
|
||||
0.04494451423784,
|
||||
0.04961788107202,
|
||||
0.03378873534338,
|
||||
0.01027114740367,
|
||||
-0.01319618927973,
|
||||
-0.04962835008375,
|
||||
-0.0765630234506,
|
||||
-0.10606051088777,
|
||||
-0.13550774706866,
|
||||
-0.15460322445561,
|
||||
-0.17113588777219,
|
||||
-0.18053287269681,
|
||||
-0.18324644053602,
|
||||
-0.19440221943049,
|
||||
-0.20897508375209,
|
||||
-0.23575900335007,
|
||||
-0.25188965661642,
|
||||
-0.22244242043552,
|
||||
-0.15656302345061
|
||||
],
|
||||
"dgt": [
|
||||
2.4553191172498,
|
||||
2.44342862248888,
|
||||
2.41879254989742,
|
||||
2.38192717604575,
|
||||
2.33147727493671,
|
||||
2.26678136721453,
|
||||
2.19013043016015,
|
||||
2.10336369905543,
|
||||
2.01414465424155,
|
||||
1.92915262384742,
|
||||
1.85543800978691,
|
||||
1.79748596476494,
|
||||
1.75428006928365,
|
||||
1.72461030013125,
|
||||
1.70379790088896,
|
||||
1.68845480656382,
|
||||
1.6761448370895,
|
||||
1.66286684904577,
|
||||
1.64799163036252,
|
||||
1.63068023161292,
|
||||
1.61073904908309,
|
||||
1.58973304612691,
|
||||
1.56750088631614,
|
||||
1.54578500307573,
|
||||
1.5242627235492,
|
||||
1.50335352244996,
|
||||
1.48420288841848,
|
||||
1.46637521309853,
|
||||
1.44977369463316,
|
||||
1.43476940680732,
|
||||
1.42089447397912,
|
||||
1.40864903907609,
|
||||
1.3966294751726,
|
||||
1.38430337205545,
|
||||
1.3710092503689,
|
||||
1.35690844654118,
|
||||
1.3405812000038,
|
||||
1.32210817897091,
|
||||
1.30069883494415,
|
||||
1.27657903892303,
|
||||
1.24931318255134,
|
||||
1.21911100318577,
|
||||
1.18632744096844,
|
||||
1.15209185089701,
|
||||
1.11575888725852,
|
||||
1.07773189112355,
|
||||
1.03941448941778,
|
||||
1.0
|
||||
]
|
||||
}
|
||||
0
examples/__init__.py
Normal file
0
examples/__init__.py
Normal file
101
examples/create_eqpt_sheet.py
Normal file
101
examples/create_eqpt_sheet.py
Normal file
@@ -0,0 +1,101 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
create_eqpt_sheet.py
|
||||
====================
|
||||
|
||||
XLS parser that can be called to create a "City" column in the "Eqpt" sheet.
|
||||
|
||||
If not present in the "Nodes" sheet, the "Type" column will be implicitly
|
||||
determined based on the topology.
|
||||
"""
|
||||
|
||||
try:
|
||||
from xlrd import open_workbook
|
||||
except ModuleNotFoundError:
|
||||
exit('Required: `pip install xlrd`')
|
||||
from argparse import ArgumentParser
|
||||
|
||||
PARSER = ArgumentParser()
|
||||
PARSER.add_argument('workbook', nargs='?', default='meshTopologyExampleV2.xls',
|
||||
help='create the mandatory columns in Eqpt sheet')
|
||||
ALL_ROWS = lambda sh, start=0: (sh.row(x) for x in range(start, sh.nrows))
|
||||
|
||||
class Node:
|
||||
""" Node element contains uid, list of connected nodes and eqpt type
|
||||
"""
|
||||
def __init__(self, uid, to_node):
|
||||
self.uid = uid
|
||||
self.to_node = to_node
|
||||
self.eqpt = None
|
||||
|
||||
def __repr__(self):
|
||||
return f'uid {self.uid} \nto_node {[node for node in self.to_node]}\neqpt {self.eqpt}\n'
|
||||
|
||||
def __str__(self):
|
||||
return f'uid {self.uid} \nto_node {[node for node in self.to_node]}\neqpt {self.eqpt}\n'
|
||||
|
||||
def read_excel(input_filename):
|
||||
""" read excel Nodes and Links sheets and create a dict of nodes with
|
||||
their to_nodes and type of eqpt
|
||||
"""
|
||||
with open_workbook(input_filename) as wobo:
|
||||
# reading Links sheet
|
||||
links_sheet = wobo.sheet_by_name('Links')
|
||||
nodes = {}
|
||||
for row in ALL_ROWS(links_sheet, start=5):
|
||||
try:
|
||||
nodes[row[0].value].to_node.append(row[1].value)
|
||||
except KeyError:
|
||||
nodes[row[0].value] = Node(row[0].value, [row[1].value])
|
||||
try:
|
||||
nodes[row[1].value].to_node.append(row[0].value)
|
||||
except KeyError:
|
||||
nodes[row[1].value] = Node(row[1].value, [row[0].value])
|
||||
|
||||
nodes_sheet = wobo.sheet_by_name('Nodes')
|
||||
for row in ALL_ROWS(nodes_sheet, start=5):
|
||||
node = row[0].value
|
||||
eqpt = row[6].value
|
||||
try:
|
||||
if eqpt == 'ILA' and len(nodes[node].to_node) != 2:
|
||||
print(f'Inconsistancy ILA node with degree > 2: {node} ')
|
||||
exit()
|
||||
if eqpt == '' and len(nodes[node].to_node) == 2:
|
||||
nodes[node].eqpt = 'ILA'
|
||||
elif eqpt == '' and len(nodes[node].to_node) != 2:
|
||||
nodes[node].eqpt = 'ROADM'
|
||||
else:
|
||||
nodes[node].eqpt = eqpt
|
||||
except KeyError:
|
||||
print(f'inconsistancy between nodes and links sheet: {node} is not listed in links')
|
||||
exit()
|
||||
return nodes
|
||||
|
||||
def create_eqt_template(nodes, input_filename):
|
||||
""" writes list of node A node Z corresponding to Nodes and Links sheets in order
|
||||
to help user populating Eqpt
|
||||
"""
|
||||
output_filename = f'{input_filename[:-4]}_eqpt_sheet.txt'
|
||||
with open(output_filename, 'w', encoding='utf-8') as my_file:
|
||||
# print header similar to excel
|
||||
my_file.write('OPTIONAL\n\n\n\
|
||||
\t\tNode a egress amp (from a to z)\t\t\t\t\tNode a ingress amp (from z to a) \
|
||||
\nNode A \tNode Z \tamp type \tatt_in \tamp gain \ttilt \tatt_out\
|
||||
amp type \tatt_in \tamp gain \ttilt \tatt_out\n')
|
||||
|
||||
|
||||
for node in nodes.values():
|
||||
if node.eqpt == 'ILA':
|
||||
my_file.write(f'{node.uid}\t{node.to_node[0]}\n')
|
||||
if node.eqpt == 'ROADM':
|
||||
for to_node in node.to_node:
|
||||
my_file.write(f'{node.uid}\t{to_node}\n')
|
||||
|
||||
print(f'File {output_filename} successfully created with Node A - Node Z ' +
|
||||
' entries for Eqpt sheet in excel file.')
|
||||
|
||||
if __name__ == '__main__':
|
||||
ARGS = PARSER.parse_args()
|
||||
create_eqt_template(read_excel(ARGS.workbook), ARGS.workbook)
|
||||
296
examples/default_edfa_config.json
Normal file
296
examples/default_edfa_config.json
Normal file
@@ -0,0 +1,296 @@
|
||||
{
|
||||
"nf_ripple": [
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0
|
||||
],
|
||||
"gain_ripple": [
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0
|
||||
],
|
||||
"dgt": [
|
||||
2.714526681131686,
|
||||
2.705443819238505,
|
||||
2.6947834587664494,
|
||||
2.6841217449620203,
|
||||
2.6681935771243177,
|
||||
2.6521732021128046,
|
||||
2.630396440815385,
|
||||
2.602860350286428,
|
||||
2.5696460593920065,
|
||||
2.5364027376452056,
|
||||
2.499446286796604,
|
||||
2.4587748041127506,
|
||||
2.414398437185221,
|
||||
2.3699990328716107,
|
||||
2.322373696229342,
|
||||
2.271520771371253,
|
||||
2.2174389328192197,
|
||||
2.16337565384239,
|
||||
2.1183028432496016,
|
||||
2.082225099873648,
|
||||
2.055100772005235,
|
||||
2.0279625371819305,
|
||||
2.0008103857988204,
|
||||
1.9736443063300082,
|
||||
1.9482128147680253,
|
||||
1.9245345552113182,
|
||||
1.9026104247588487,
|
||||
1.8806927939516411,
|
||||
1.862235672444246,
|
||||
1.847275503201129,
|
||||
1.835814081380705,
|
||||
1.824381436842932,
|
||||
1.8139629377087627,
|
||||
1.8045606557581335,
|
||||
1.7961751115773796,
|
||||
1.7877868031023945,
|
||||
1.7793941781790852,
|
||||
1.7709972329654864,
|
||||
1.7625959636196327,
|
||||
1.7541903672600494,
|
||||
1.7459181197626403,
|
||||
1.737780757913635,
|
||||
1.7297783508684146,
|
||||
1.7217732861435076,
|
||||
1.7137640932265894,
|
||||
1.7057507692361864,
|
||||
1.6918150918099673,
|
||||
1.6719047669939942,
|
||||
1.6460167077689267,
|
||||
1.6201194134191075,
|
||||
1.5986915141218316,
|
||||
1.5817353179379183,
|
||||
1.569199764184379,
|
||||
1.5566577309558969,
|
||||
1.545374152761467,
|
||||
1.5353620432989845,
|
||||
1.5266220576235803,
|
||||
1.5178910621476225,
|
||||
1.5097346239790443,
|
||||
1.502153039909686,
|
||||
1.495145456062699,
|
||||
1.488134243479226,
|
||||
1.48111939735681,
|
||||
1.474100442252211,
|
||||
1.4670307626366115,
|
||||
1.4599103316162523,
|
||||
1.45273959485914,
|
||||
1.445565137158368,
|
||||
1.4340878115214444,
|
||||
1.418273806730323,
|
||||
1.3981208704326855,
|
||||
1.3779439775587023,
|
||||
1.3598972673004606,
|
||||
1.3439818461440451,
|
||||
1.3301807335621048,
|
||||
1.316383926863083,
|
||||
1.3040618749785347,
|
||||
1.2932153453410835,
|
||||
1.2838336236692311,
|
||||
1.2744470198196236,
|
||||
1.2650555289898042,
|
||||
1.2556591482982988,
|
||||
1.2428104897182262,
|
||||
1.2264996957264114,
|
||||
1.2067249615595257,
|
||||
1.1869318618366975,
|
||||
1.1672278304018044,
|
||||
1.1476135933863398,
|
||||
1.1280891949729075,
|
||||
1.108555289615659,
|
||||
1.0895983485572227,
|
||||
1.0712204022764056,
|
||||
1.0534217504465226,
|
||||
1.0356155337864215,
|
||||
1.017807767853702,
|
||||
1.0
|
||||
]
|
||||
}
|
||||
1033
examples/demo.json
Normal file
1033
examples/demo.json
Normal file
File diff suppressed because it is too large
Load Diff
80
examples/edfa_example_network.json
Normal file
80
examples/edfa_example_network.json
Normal file
@@ -0,0 +1,80 @@
|
||||
{
|
||||
"network_name": "EDFA Example Network - P2P",
|
||||
"elements": [{
|
||||
"uid": "Site_A",
|
||||
"type": "Transceiver",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site A",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Span1",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 80,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"region": "",
|
||||
"latitude": 1,
|
||||
"longitude": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Edfa1",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_low_gain",
|
||||
"operational": {
|
||||
"gain_target": 17,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"region": "",
|
||||
"latitude": 2,
|
||||
"longitude": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Site_B",
|
||||
"type": "Transceiver",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site B",
|
||||
"region": "",
|
||||
"latitude": 2,
|
||||
"longitude": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
],
|
||||
"connections": [{
|
||||
"from_node": "Site_A",
|
||||
"to_node": "Span1"
|
||||
},
|
||||
{
|
||||
"from_node": "Span1",
|
||||
"to_node": "Edfa1"
|
||||
},
|
||||
{
|
||||
"from_node": "Edfa1",
|
||||
"to_node": "Site_B"
|
||||
}
|
||||
|
||||
]
|
||||
}
|
||||
1
examples/edfa_model/DFG_96.txt
Normal file
1
examples/edfa_model/DFG_96.txt
Normal file
@@ -0,0 +1 @@
|
||||
2.5135969849999999e+01 2.5118228139999999e+01 2.5095421330000001e+01 2.5062457710000000e+01 2.5026027650000000e+01 2.4996379529999999e+01 2.4981672549999999e+01 2.4975306679999999e+01 2.4983207260000000e+01 2.4997185649999999e+01 2.5017572470000001e+01 2.5038327809999998e+01 2.5054955849999999e+01 2.5067071899999998e+01 2.5070914110000000e+01 2.5070943650000000e+01 2.5071143240000001e+01 2.5075336270000001e+01 2.5087310179999999e+01 2.5103139360000000e+01 2.5122762040000001e+01 2.5142394790000001e+01 2.5159456330000001e+01 2.5173927039999999e+01 2.5176737670000001e+01 2.5170371410000001e+01 2.5152162539999999e+01 2.5131143099999999e+01 2.5108023350000000e+01 2.5085487770000000e+01 2.5069166750000001e+01 2.5058481759999999e+01 2.5054473130000002e+01 2.5051544410000002e+01 2.5049460589999999e+01 2.5047178490000000e+01 2.5045516559999999e+01 2.5044676490000001e+01 2.5040729200000001e+01 2.5032854080000000e+01 2.5023488300000000e+01 2.5016592339999999e+01 2.5013321359999999e+01 2.5011234340000001e+01 2.5010300149999999e+01 2.5009365480000000e+01 2.5008739640000002e+01 2.5008425350000000e+01 2.5006964660000001e+01 2.5004043100000001e+01 2.5000709980000000e+01 2.4998423200000001e+01 2.4993063320000001e+01 2.4983524209999999e+01 2.4971251030000001e+01 2.4960381080000001e+01 2.4948887209999999e+01 2.4935314890000001e+01 2.4921319270000001e+01 2.4908986970000001e+01 2.4898965140000001e+01 2.4889584630000002e+01 2.4880838700000002e+01 2.4872100920000001e+01 2.4864620259999999e+01 2.4858397730000000e+01 2.4854458380000001e+01 2.4851554430000000e+01 2.4851766009999999e+01 2.4854080140000001e+01 2.4859096240000000e+01 2.4864744580000000e+01 2.4872034859999999e+01 2.4880365200000000e+01 2.4889106689999998e+01 2.4897213130000001e+01 2.4902826040000001e+01 2.4906566900000001e+01 2.4908650800000000e+01 2.4910939440000000e+01 2.4913430790000000e+01 2.4915923440000000e+01 2.4921553509999999e+01 2.4930318610000000e+01 2.4940528120000000e+01 2.4949046689999999e+01 2.4957571229999999e+01 2.4967818449999999e+01 2.4981800929999999e+01 2.4997826860000000e+01 2.5013931830000001e+01 2.5028098459999999e+01 2.5040325750000001e+01 2.5052569810000001e+01 2.5064797009999999e+01 2.5077046970000001e+01
|
||||
1
examples/edfa_model/DGT_96.txt
Normal file
1
examples/edfa_model/DGT_96.txt
Normal file
@@ -0,0 +1 @@
|
||||
2.7145266811316859e+00 2.7054438192385049e+00 2.6947834587664494e+00 2.6841217449620203e+00 2.6681935771243177e+00 2.6521732021128046e+00 2.6303964408153848e+00 2.6028603502864280e+00 2.5696460593920065e+00 2.5364027376452056e+00 2.4994462867966041e+00 2.4587748041127506e+00 2.4143984371852212e+00 2.3699990328716107e+00 2.3223736962293420e+00 2.2715207713712529e+00 2.2174389328192197e+00 2.1633756538423898e+00 2.1183028432496016e+00 2.0822250998736478e+00 2.0551007720052352e+00 2.0279625371819305e+00 2.0008103857988204e+00 1.9736443063300082e+00 1.9482128147680253e+00 1.9245345552113182e+00 1.9026104247588487e+00 1.8806927939516411e+00 1.8622356724442459e+00 1.8472755032011290e+00 1.8358140813807049e+00 1.8243814368429321e+00 1.8139629377087627e+00 1.8045606557581335e+00 1.7961751115773796e+00 1.7877868031023945e+00 1.7793941781790852e+00 1.7709972329654864e+00 1.7625959636196327e+00 1.7541903672600494e+00 1.7459181197626403e+00 1.7377807579136351e+00 1.7297783508684146e+00 1.7217732861435076e+00 1.7137640932265894e+00 1.7057507692361864e+00 1.6918150918099673e+00 1.6719047669939942e+00 1.6460167077689267e+00 1.6201194134191075e+00 1.5986915141218316e+00 1.5817353179379183e+00 1.5691997641843789e+00 1.5566577309558969e+00 1.5453741527614671e+00 1.5353620432989845e+00 1.5266220576235803e+00 1.5178910621476225e+00 1.5097346239790443e+00 1.5021530399096861e+00 1.4951454560626991e+00 1.4881342434792260e+00 1.4811193973568100e+00 1.4741004422522110e+00 1.4670307626366115e+00 1.4599103316162523e+00 1.4527395948591399e+00 1.4455651371583680e+00 1.4340878115214444e+00 1.4182738067303231e+00 1.3981208704326855e+00 1.3779439775587023e+00 1.3598972673004606e+00 1.3439818461440451e+00 1.3301807335621048e+00 1.3163839268630830e+00 1.3040618749785347e+00 1.2932153453410835e+00 1.2838336236692311e+00 1.2744470198196236e+00 1.2650555289898042e+00 1.2556591482982988e+00 1.2428104897182262e+00 1.2264996957264114e+00 1.2067249615595257e+00 1.1869318618366975e+00 1.1672278304018044e+00 1.1476135933863398e+00 1.1280891949729075e+00 1.1085552896156590e+00 1.0895983485572227e+00 1.0712204022764056e+00 1.0534217504465226e+00 1.0356155337864215e+00 1.0178077678537021e+00 1.0000000000000000e+00
|
||||
1
examples/edfa_model/NFR_96.txt
Normal file
1
examples/edfa_model/NFR_96.txt
Normal file
@@ -0,0 +1 @@
|
||||
-3.1537433199999998e-01 -3.1537433199999998e-01 -3.1540091571002721e-01 -3.1849146117510951e-01 -3.2158358425400546e-01 -3.2467728615499991e-01 -3.2762368641496226e-01 -3.2054138461232762e-01 -3.1345546385118733e-01 -3.0636592135697482e-01 -2.9920267890990127e-01 -2.7061972852631744e-01 -2.4202215770774693e-01 -2.1340995523361256e-01 -1.8478227130158695e-01 -1.4809761118389625e-01 -1.1139416731807622e-01 -7.4671925273579881e-02 -3.8026748965679924e-02 -1.9958469399422092e-02 -1.8809287980157928e-03 1.6205879960573561e-02 3.4301964005709673e-02 5.2407330474054062e-02 7.0521986509597359e-02 7.9578036683472006e-02 8.8546647361909522e-02 9.7519863231965306e-02 1.0649768784154924e-01 9.7741380449907406e-02 8.8803437172660038e-02 7.9860899732845866e-02 7.0913764587403796e-02 6.3335892740565308e-02 5.5756212252058776e-02 4.8172631747863209e-02 4.0585148217162359e-02 3.3381591675710129e-02 2.6178308595650738e-02 1.8971315351761126e-02 1.1760609076833628e-02 1.6950294922759991e-02 2.2274991357701439e-02 2.7602433189104329e-02 3.2932622540790261e-02 3.8265561538776145e-02 4.3601252311271169e-02 3.4856990743481552e-02 2.5991055149117932e-02 1.7120541224980364e-02 8.2757587359203223e-03 1.9423214065246042e-03 -4.3943890171043590e-03 -1.0734375072893196e-02 -1.7077639301414434e-02 -2.4679702899572852e-02 -3.2297970403821680e-02 -3.9920180090477250e-02 -4.7534566327530239e-02 -4.9234003141433724e-02 -5.0934320036547187e-02 -5.2635517696692252e-02 -5.4337596806402461e-02 -5.6040558050919301e-02 -5.7718452237076875e-02 -5.6840590379175944e-02 -5.5962273198734966e-02 -5.5083500341416583e-02 -5.4204271452516814e-02 -5.8396088726955113e-02 -6.2627330169715334e-02 -6.6860769089203700e-02 -7.0901736256069450e-02 -5.2096097309052243e-02 -3.3280684121412940e-02 -1.4455489070928059e-02 4.3150387579057158e-03 1.4839202394482527e-02 2.5368841662503576e-02 3.5903960836465652e-02 4.6444564195321399e-02 5.6990656022467459e-02 6.7542240605774059e-02 1.0002709623672751e-01 1.3258013095133617e-01 1.6515013362773309e-01 1.9773711753599391e-01 2.3194802687829724e-01 2.6618779883837107e-01 3.0044543658085349e-01 3.3472095409250663e-01 3.5929034770587287e-01 3.8384389188855605e-01 4.0841026111391787e-01 4.3298946543290784e-01 4.3298946543290784e-01
|
||||
6
examples/edfa_model/OA.json
Normal file
6
examples/edfa_model/OA.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"nf_ripple": "NFR_96.txt",
|
||||
"gain_ripple": "DFG_96.txt",
|
||||
"dgt": "DGT_96.txt",
|
||||
"nf_fit_coeff": "pNFfit3.txt"
|
||||
}
|
||||
300
examples/edfa_model/amplifier_models_description.rst
Normal file
300
examples/edfa_model/amplifier_models_description.rst
Normal file
@@ -0,0 +1,300 @@
|
||||
*********************************************
|
||||
Amplifier models and configuration
|
||||
*********************************************
|
||||
|
||||
|
||||
1. Equipment configuration description
|
||||
#######################################
|
||||
|
||||
Equipment description defines equipment types and parameters.
|
||||
It takes place in the default **eqpt_config.json** file.
|
||||
By default **transmission_main_example.py** uses **eqpt_config.json** file and that
|
||||
can be changed with **-e** or **--equipment** command line parameter.
|
||||
|
||||
2. Amplifier parameters and subtypes
|
||||
#######################################
|
||||
|
||||
Several amplifiers can be used by GNpy, so they are defined as an array of equipment parameters in **eqpt_config.json** file.
|
||||
|
||||
- *"type_variety"*:
|
||||
Each amplifier is identified by its unique *"type_variety"*, which is used in the topology files input to reference a specific amplifier. It is a user free defined id.
|
||||
|
||||
For each amplifier *type_variety*, specific parameters are describing its attributes and performance:
|
||||
|
||||
- *"type_def"*:
|
||||
Sets the amplifier model that the simulation will use to calculate the ase noise contribution. 5 models are defined with reserved words:
|
||||
|
||||
- *"advanced_model"*
|
||||
- *"variable_gain"*
|
||||
- *"fixed_gain"*
|
||||
- *"dual_stage"*
|
||||
- *"openroadm"*
|
||||
*see next section for a full description of these models*
|
||||
|
||||
- *"advanced_config_from_json"*:
|
||||
**This parameter is only applicable to the _"advanced_model"_ model**
|
||||
|
||||
json file name describing:
|
||||
|
||||
- nf_fit_coeff
|
||||
- f_min/max
|
||||
- gain_ripple
|
||||
- nf_ripple
|
||||
- dgt
|
||||
|
||||
*see next section for a full description*
|
||||
|
||||
- *"gain_flatmax"*:
|
||||
amplifier maximum gain in dB before its extended gain range: flat or nominal tilt output.
|
||||
|
||||
If gain > gain_flatmax, the amplifier will tilt, based on its dgt function
|
||||
|
||||
If gain > gain_flatmax + target_extended_gain, the amplifier output power is reduced to not exceed the extended gain range.
|
||||
|
||||
- *"gain_min"*:
|
||||
amplifier minimum gain in dB.
|
||||
|
||||
If gain < gain_min, the amplifier input is automatically padded, which results in
|
||||
|
||||
NF += gain_min - gain
|
||||
|
||||
- *"p_max"*:
|
||||
amplifier max output power, full load
|
||||
|
||||
Total signal output power will not be allowed beyond this value
|
||||
|
||||
- *"nf_min/max"*:
|
||||
**These parameters are only applicable to the _"variable_gain"_ model**
|
||||
|
||||
min & max NF values in dB
|
||||
|
||||
NF_min is the amplifier NF @ gain_max
|
||||
|
||||
NF_max is the amplifier NF @ gain_min
|
||||
|
||||
- *"nf_coef"*:
|
||||
**This parameter is only applicable to the *"openroadm"* model**
|
||||
|
||||
[a, b, c, d] 3rd order polynomial coefficients list to define the incremental OSNR vs Pin
|
||||
|
||||
Incremental OSNR is the amplifier OSNR contribution
|
||||
|
||||
Pin is the amplifier channel input power defined in a 50GHz bandwidth
|
||||
|
||||
Incremental OSNR = a*Pin³ + b*Pin² + c*Pin + d
|
||||
|
||||
- *"preamp_variety"*:
|
||||
**This parameter is only applicable to the _"dual_stage"_ model**
|
||||
|
||||
1st stage type_variety
|
||||
|
||||
- *"booster_variety"*:
|
||||
**This parameter is only applicable to the *"dual_stage"* model**
|
||||
|
||||
2nd stage type_variety
|
||||
|
||||
- *"out_voa_auto"*: true/false
|
||||
**power_mode only**
|
||||
|
||||
**This parameter is only applicable to the *"advanced_model"* and *"variable_gain"* models**
|
||||
|
||||
If "out_voa_auto": true, auto_design will chose the output_VOA value that maximizes the amplifier gain within its power capability and therefore minimizes its NF.
|
||||
|
||||
- *"allowed_for_design"*: true/false
|
||||
**auto_design only**
|
||||
|
||||
Tells auto_design if this amplifier can be picked for the design (deactivates unwanted amplifiers)
|
||||
|
||||
It does not prevent the use of an amplifier if it is placed in the topology input.
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{"Edfa": [{
|
||||
"type_variety": "std_medium_gain",
|
||||
"type_def": "variable_gain",
|
||||
"gain_flatmax": 26,
|
||||
"gain_min": 15,
|
||||
"p_max": 23,
|
||||
"nf_min": 6,
|
||||
"nf_max": 10,
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": true
|
||||
},
|
||||
{
|
||||
"type_variety": "std_low_gain",
|
||||
"type_def": "variable_gain",
|
||||
"gain_flatmax": 16,
|
||||
"gain_min": 8,
|
||||
"p_max": 23,
|
||||
"nf_min": 6.5,
|
||||
"nf_max": 11,
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": true
|
||||
}
|
||||
]}
|
||||
|
||||
|
||||
3. Amplifier models
|
||||
#######################################
|
||||
|
||||
In an opensource and multi-vendor environnement, it is needed to support different use cases and context. Therefore several models are supported for amplifiers.
|
||||
|
||||
5 types of EDFA definition are possible and referenced by the *"type_def"* parameter with the following reserved words:
|
||||
|
||||
- *"advanced_model"*
|
||||
This model is refered as a whitebox model because of the detailed level of knowledge that is required. The amplifier NF model and ripple definition are described by a json file referenced with *"advanced_config_from_json"*: json filename. This json file contains:
|
||||
|
||||
- nf_fit_coeff: [a,b,c,d]
|
||||
|
||||
3rd order polynomial NF = f(-dg) coeficients list
|
||||
|
||||
dg = gain - gain_max
|
||||
|
||||
- f_min/max: amplifier frequency range in Hz
|
||||
- gain_ripple : [...]
|
||||
|
||||
amplifier gain ripple excursion comb list in dB across the frequency range.
|
||||
- nf_ripple : [...]
|
||||
|
||||
amplifier nf ripple excursion comb list in dB across the frequency range.
|
||||
- dgt : [...]
|
||||
amplifier dynamic gain tilt comb list across the frequency range.
|
||||
|
||||
*See next section for the generation of this json file*
|
||||
|
||||
.. code-block:: json-object
|
||||
|
||||
"Edfa":[{
|
||||
"type_variety": "high_detail_model_example",
|
||||
"type_def": "advanced_model",
|
||||
"gain_flatmax": 25,
|
||||
"gain_min": 15,
|
||||
"p_max": 21,
|
||||
"advanced_config_from_json": "std_medium_gain_advanced_config.json",
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": false
|
||||
}
|
||||
]
|
||||
|
||||
- *"variable_gain"*
|
||||
This model is refered as an operator model because a lower level of knowledge is required. A full polynomial description of the NF cross the gain range is not required. Instead, NF_min and NF_max values are required and used by the code to model a dual stage amplifier with an internal mid stage VOA. NF_min and NF_max values are typically available from equipment suppliers data-sheet.
|
||||
|
||||
There is a default JSON file ”default_edfa_config.json”* to enforce 0 tilt and ripple values because GNpy core algorithm is a multi-carrier propogation.
|
||||
- gain_ripple =[0,...,0]
|
||||
- nf_ripple = [0,...,0]
|
||||
- dgt = [...] generic dgt comb
|
||||
|
||||
.. code-block:: json-object
|
||||
|
||||
"Edfa":[{
|
||||
"type_variety": "std_medium_gain",
|
||||
"type_def": "variable_gain",
|
||||
"gain_flatmax": 26,
|
||||
"gain_min": 15,
|
||||
"p_max": 23,
|
||||
"nf_min": 6,
|
||||
"nf_max": 10,
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": true
|
||||
}
|
||||
]
|
||||
|
||||
- *"fixed_gain"*
|
||||
This model is also an operator model with a single NF value that emulates basic single coil amplifiers without internal VOA.
|
||||
|
||||
if gain_min < gain < gain_max, NF == nf0
|
||||
|
||||
if gain < gain_min, the amplifier input is automatically padded, which results in
|
||||
|
||||
NF += gain_min - gain
|
||||
|
||||
.. code-block:: json-object
|
||||
|
||||
"Edfa":[{
|
||||
"type_variety": "std_fixed_gain",
|
||||
"type_def": "fixed_gain",
|
||||
"gain_flatmax": 21,
|
||||
"gain_min": 20,
|
||||
"p_max": 21,
|
||||
"nf0": 5.5,
|
||||
"allowed_for_design": false
|
||||
}
|
||||
]
|
||||
|
||||
- *"openroadm"*
|
||||
This model is a black box model replicating OpenRoadm MSA spec for ILA.
|
||||
|
||||
.. code-block:: json-object
|
||||
|
||||
"Edfa":[{
|
||||
"type_variety": "low_noise",
|
||||
"type_def": "openroadm",
|
||||
"gain_flatmax": 27,
|
||||
"gain_min": 12,
|
||||
"p_max": 22,
|
||||
"nf_coef": [-8.104e-4,-6.221e-2,-5.889e-1,37.62],
|
||||
"allowed_for_design": false
|
||||
}
|
||||
]
|
||||
|
||||
- *"dual_stage"*
|
||||
This model allows the cascade (pre-defined combination) of any 2 amplifiers already described in the eqpt_config.json library.
|
||||
|
||||
- preamp_variety defines the 1st stge type variety
|
||||
|
||||
- booster variety defines the 2nd stage type variety
|
||||
|
||||
Both preamp and booster variety must exist in the eqpt libray
|
||||
The resulting NF is the sum of the 2 amplifiers
|
||||
The preamp is operated to its maximum gain
|
||||
|
||||
- gain_min indicates to auto_design when this dual_stage should be used
|
||||
|
||||
But unlike other models the 1st stage input will not be padded: it is always operated to its maximu gain and min NF. Therefore if gain adaptation and padding is needed it will be performed by the 2nd stage.
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"type_variety": "medium+low_gain",
|
||||
"type_def": "dual_stage",
|
||||
"gain_min": 25,
|
||||
"preamp_variety": "std_medium_gain",
|
||||
"booster_variety": "std_low_gain",
|
||||
"allowed_for_design": true
|
||||
}
|
||||
|
||||
4. advanced_config_from_json
|
||||
#######################################
|
||||
|
||||
The build_oa_json.py library in gnpy/examples/edfa_model can be used to build the json file required for the amplifier advanced_model type_def:
|
||||
|
||||
Update an existing json file with all the 96ch txt files for a given amplifier type
|
||||
amplifier type 'OA_type1' is hard coded but can be modified and other types added
|
||||
returns an updated amplifier json file: output_json_file_name = 'edfa_config.json'
|
||||
amplifier file names
|
||||
|
||||
Convert a set of amplifier files + input json definiton file into a valid edfa_json_file:
|
||||
|
||||
nf_fit_coeff: NF 3rd order polynomial coefficients txt file
|
||||
|
||||
nf = f(dg) with dg = gain_operational - gain_max
|
||||
|
||||
nf_ripple: NF ripple excursion txt file
|
||||
|
||||
gain_ripple: gain ripple txt file
|
||||
|
||||
dgt: dynamic gain txt file
|
||||
|
||||
input json file in argument (defult = 'OA.json')
|
||||
|
||||
the json input file should have the following fields:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"nf_fit_coeff": "nf_filename.txt",
|
||||
"nf_ripple": "nf_ripple_filename.txt",
|
||||
"gain_ripple": "DFG_filename.txt",
|
||||
"dgt": "DGT_filename.txt"
|
||||
}
|
||||
|
||||
87
examples/edfa_model/build_oa_json.py
Normal file
87
examples/edfa_model/build_oa_json.py
Normal file
@@ -0,0 +1,87 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Jan 30 12:32:00 2018
|
||||
|
||||
@author: jeanluc-auge
|
||||
|
||||
update an existing json file with all the 96ch txt files for a given amplifier type
|
||||
amplifier type 'OA_type1' is hard coded but can be modified and other types added
|
||||
returns an updated amplifier json file: output_json_file_name = 'edfa_config.json'
|
||||
"""
|
||||
import re
|
||||
import sys
|
||||
import json
|
||||
import numpy as np
|
||||
from gnpy.core.utils import lin2db, db2lin
|
||||
|
||||
"""amplifier file names
|
||||
convert a set of amplifier files + input json definiton file into a valid edfa_json_file:
|
||||
nf_fit_coeff: NF 3rd order polynomial coefficients txt file
|
||||
nf = f(dg)
|
||||
with dg = gain_operational - gain_max
|
||||
nf_ripple: NF ripple excursion txt file
|
||||
gain_ripple: gain ripple txt file
|
||||
dgt: dynamic gain txt file
|
||||
input json file in argument (defult = 'OA.json')
|
||||
|
||||
the json input file should have the following fields:
|
||||
{
|
||||
"nf_fit_coeff": "nf_filename.txt",
|
||||
"nf_ripple": "nf_ripple_filename.txt",
|
||||
"gain_ripple": "DFG_filename.txt",
|
||||
"dgt": "DGT_filename.txt",
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
input_json_file_name = "OA.json" #default path
|
||||
output_json_file_name = "default_edfa_config.json"
|
||||
gain_ripple_field = "gain_ripple"
|
||||
nf_ripple_field = "nf_ripple"
|
||||
nf_fit_coeff = "nf_fit_coeff"
|
||||
|
||||
def read_file(field, file_name):
|
||||
"""read and format the 96 channels txt files describing the amplifier NF and ripple
|
||||
convert dfg into gain ripple by removing the mean component
|
||||
"""
|
||||
|
||||
#with open(path + file_name,'r') as this_file:
|
||||
# data = this_file.read()
|
||||
#data.strip()
|
||||
#data = re.sub(r"([0-9])([ ]{1,3})([0-9-+])",r"\1,\3",data)
|
||||
#data = list(data.split(","))
|
||||
#data = [float(x) for x in data]
|
||||
data = np.loadtxt(file_name)
|
||||
print(len(data), file_name)
|
||||
if field == gain_ripple_field or field == nf_ripple_field:
|
||||
#consider ripple excursion only to avoid redundant information
|
||||
#because the max flat_gain is already given by the 'gain_flat' field in json
|
||||
#remove the mean component
|
||||
print(file_name, ', mean value =', data.mean(), ' is substracted')
|
||||
data = data - data.mean()
|
||||
data = data.tolist()
|
||||
return data
|
||||
|
||||
def input_json(path):
|
||||
"""read the json input file and add all the 96 channels txt files
|
||||
create the output json file with output_json_file_name"""
|
||||
with open(path,'r') as edfa_json_file:
|
||||
amp_text = edfa_json_file.read()
|
||||
amp_dict = json.loads(amp_text)
|
||||
|
||||
for k, v in amp_dict.items():
|
||||
if re.search(r'.txt$',str(v)) :
|
||||
amp_dict[k] = read_file(k, v)
|
||||
|
||||
amp_text = json.dumps(amp_dict, indent=4)
|
||||
#print(amp_text)
|
||||
with open(output_json_file_name,'w') as edfa_json_file:
|
||||
edfa_json_file.write(amp_text)
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) == 2:
|
||||
path = sys.argv[1]
|
||||
else:
|
||||
path = input_json_file_name
|
||||
input_json(path)
|
||||
1
examples/edfa_model/pNFfit3.txt
Normal file
1
examples/edfa_model/pNFfit3.txt
Normal file
@@ -0,0 +1 @@
|
||||
1.6824099999999999e-04 4.6996099999999999e-02 3.5954899999999998e-02 5.8285099999999996e+00
|
||||
307
examples/eqpt_config.json
Normal file
307
examples/eqpt_config.json
Normal file
@@ -0,0 +1,307 @@
|
||||
{ "Edfa":[{
|
||||
"type_variety": "high_detail_model_example",
|
||||
"type_def": "advanced_model",
|
||||
"gain_flatmax": 25,
|
||||
"gain_min": 15,
|
||||
"p_max": 21,
|
||||
"advanced_config_from_json": "std_medium_gain_advanced_config.json",
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": false
|
||||
}, {
|
||||
"type_variety": "Juniper_BoosterHG",
|
||||
"type_def": "advanced_model",
|
||||
"gain_flatmax": 25,
|
||||
"gain_min": 10,
|
||||
"p_max": 21,
|
||||
"advanced_config_from_json": "Juniper-BoosterHG.json",
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": false
|
||||
},
|
||||
{
|
||||
"type_variety": "operator_model_example",
|
||||
"type_def": "variable_gain",
|
||||
"gain_flatmax": 26,
|
||||
"gain_min": 15,
|
||||
"p_max": 23,
|
||||
"nf_min": 6,
|
||||
"nf_max": 10,
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": false
|
||||
},
|
||||
{
|
||||
"type_variety": "low_noise",
|
||||
"type_def": "openroadm",
|
||||
"gain_flatmax": 27,
|
||||
"gain_min": 12,
|
||||
"p_max": 22,
|
||||
"nf_coef": [-8.104e-4,-6.221e-2,-5.889e-1,37.62],
|
||||
"allowed_for_design": false
|
||||
},
|
||||
{
|
||||
"type_variety": "standard",
|
||||
"type_def": "openroadm",
|
||||
"gain_flatmax": 27,
|
||||
"gain_min": 12,
|
||||
"p_max": 22,
|
||||
"nf_coef": [-5.952e-4,-6.250e-2,-1.071,28.99],
|
||||
"allowed_for_design": false
|
||||
},
|
||||
{
|
||||
"type_variety": "std_high_gain",
|
||||
"type_def": "variable_gain",
|
||||
"gain_flatmax": 35,
|
||||
"gain_min": 25,
|
||||
"p_max": 21,
|
||||
"nf_min": 5.5,
|
||||
"nf_max": 7,
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": true
|
||||
},
|
||||
{
|
||||
"type_variety": "std_medium_gain",
|
||||
"type_def": "variable_gain",
|
||||
"gain_flatmax": 26,
|
||||
"gain_min": 15,
|
||||
"p_max": 23,
|
||||
"nf_min": 6,
|
||||
"nf_max": 10,
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": true
|
||||
},
|
||||
{
|
||||
"type_variety": "std_low_gain",
|
||||
"type_def": "variable_gain",
|
||||
"gain_flatmax": 16,
|
||||
"gain_min": 8,
|
||||
"p_max": 23,
|
||||
"nf_min": 6.5,
|
||||
"nf_max": 11,
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": true
|
||||
},
|
||||
{
|
||||
"type_variety": "high_power",
|
||||
"type_def": "variable_gain",
|
||||
"gain_flatmax": 16,
|
||||
"gain_min": 8,
|
||||
"p_max": 25,
|
||||
"nf_min": 9,
|
||||
"nf_max": 15,
|
||||
"out_voa_auto": false,
|
||||
"allowed_for_design": false
|
||||
},
|
||||
{
|
||||
"type_variety": "std_fixed_gain",
|
||||
"type_def": "fixed_gain",
|
||||
"gain_flatmax": 21,
|
||||
"gain_min": 20,
|
||||
"p_max": 21,
|
||||
"nf0": 5.5,
|
||||
"allowed_for_design": false
|
||||
},
|
||||
{
|
||||
"type_variety": "4pumps_raman",
|
||||
"type_def": "fixed_gain",
|
||||
"gain_flatmax": 12,
|
||||
"gain_min": 12,
|
||||
"p_max": 21,
|
||||
"nf0": -1,
|
||||
"allowed_for_design": false
|
||||
},
|
||||
{
|
||||
"type_variety": "hybrid_4pumps_lowgain",
|
||||
"type_def": "dual_stage",
|
||||
"raman": true,
|
||||
"gain_min": 25,
|
||||
"preamp_variety": "4pumps_raman",
|
||||
"booster_variety": "std_low_gain",
|
||||
"allowed_for_design": true
|
||||
},
|
||||
{
|
||||
"type_variety": "hybrid_4pumps_mediumgain",
|
||||
"type_def": "dual_stage",
|
||||
"raman": true,
|
||||
"gain_min": 25,
|
||||
"preamp_variety": "4pumps_raman",
|
||||
"booster_variety": "std_medium_gain",
|
||||
"allowed_for_design": true
|
||||
},
|
||||
{
|
||||
"type_variety": "medium+low_gain",
|
||||
"type_def": "dual_stage",
|
||||
"gain_min": 25,
|
||||
"preamp_variety": "std_medium_gain",
|
||||
"booster_variety": "std_low_gain",
|
||||
"allowed_for_design": true
|
||||
},
|
||||
{
|
||||
"type_variety": "medium+high_power",
|
||||
"type_def": "dual_stage",
|
||||
"gain_min": 25,
|
||||
"preamp_variety": "std_medium_gain",
|
||||
"booster_variety": "high_power",
|
||||
"allowed_for_design": false
|
||||
}
|
||||
],
|
||||
"Fiber":[{
|
||||
"type_variety": "SSMF",
|
||||
"dispersion": 1.67e-05,
|
||||
"gamma": 0.00127
|
||||
},
|
||||
{
|
||||
"type_variety": "NZDF",
|
||||
"dispersion": 0.5e-05,
|
||||
"gamma": 0.00146
|
||||
},
|
||||
{
|
||||
"type_variety": "LOF",
|
||||
"dispersion": 2.2e-05,
|
||||
"gamma": 0.000843
|
||||
}
|
||||
],
|
||||
"RamanFiber":[{
|
||||
"type_variety": "SSMF",
|
||||
"dispersion": 1.67e-05,
|
||||
"gamma": 0.00127,
|
||||
"raman_efficiency": {
|
||||
"cr":[
|
||||
0, 9.4E-06, 2.92E-05, 4.88E-05, 6.82E-05, 8.31E-05, 9.4E-05, 0.0001014, 0.0001069, 0.0001119,
|
||||
0.0001217, 0.0001268, 0.0001365, 0.000149, 0.000165, 0.000181, 0.0001977, 0.0002192, 0.0002469,
|
||||
0.0002749, 0.0002999, 0.0003206, 0.0003405, 0.0003592, 0.000374, 0.0003826, 0.0003841, 0.0003826,
|
||||
0.0003802, 0.0003756, 0.0003549, 0.0003795, 0.000344, 0.0002933, 0.0002024, 0.0001158, 8.46E-05,
|
||||
7.14E-05, 6.86E-05, 8.5E-05, 8.93E-05, 9.01E-05, 8.15E-05, 6.67E-05, 4.37E-05, 3.28E-05, 2.96E-05,
|
||||
2.65E-05, 2.57E-05, 2.81E-05, 3.08E-05, 3.67E-05, 5.85E-05, 6.63E-05, 6.36E-05, 5.5E-05, 4.06E-05,
|
||||
2.77E-05, 2.42E-05, 1.87E-05, 1.6E-05, 1.4E-05, 1.13E-05, 1.05E-05, 9.8E-06, 9.8E-06, 1.13E-05,
|
||||
1.64E-05, 1.95E-05, 2.38E-05, 2.26E-05, 2.03E-05, 1.48E-05, 1.09E-05, 9.8E-06, 1.05E-05, 1.17E-05,
|
||||
1.25E-05, 1.21E-05, 1.09E-05, 9.8E-06, 8.2E-06, 6.6E-06, 4.7E-06, 2.7E-06, 1.9E-06, 1.2E-06, 4E-07,
|
||||
2E-07, 1E-07
|
||||
],
|
||||
"frequency_offset":[
|
||||
0, 0.5e12, 1e12, 1.5e12, 2e12, 2.5e12, 3e12, 3.5e12, 4e12, 4.5e12, 5e12, 5.5e12, 6e12, 6.5e12, 7e12,
|
||||
7.5e12, 8e12, 8.5e12, 9e12, 9.5e12, 10e12, 10.5e12, 11e12, 11.5e12, 12e12, 12.5e12, 12.75e12,
|
||||
13e12, 13.25e12, 13.5e12, 14e12, 14.5e12, 14.75e12, 15e12, 15.5e12, 16e12, 16.5e12, 17e12,
|
||||
17.5e12, 18e12, 18.25e12, 18.5e12, 18.75e12, 19e12, 19.5e12, 20e12, 20.5e12, 21e12, 21.5e12,
|
||||
22e12, 22.5e12, 23e12, 23.5e12, 24e12, 24.5e12, 25e12, 25.5e12, 26e12, 26.5e12, 27e12, 27.5e12, 28e12,
|
||||
28.5e12, 29e12, 29.5e12, 30e12, 30.5e12, 31e12, 31.5e12, 32e12, 32.5e12, 33e12, 33.5e12, 34e12, 34.5e12,
|
||||
35e12, 35.5e12, 36e12, 36.5e12, 37e12, 37.5e12, 38e12, 38.5e12, 39e12, 39.5e12, 40e12, 40.5e12, 41e12,
|
||||
41.5e12, 42e12
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"Span":[{
|
||||
"power_mode":true,
|
||||
"delta_power_range_db": [-2,3,0.5],
|
||||
"max_fiber_lineic_loss_for_raman": 0.25,
|
||||
"target_extended_gain": 2.5,
|
||||
"max_length": 150,
|
||||
"length_units": "km",
|
||||
"max_loss": 28,
|
||||
"padding": 10,
|
||||
"EOL": 0,
|
||||
"con_in": 0,
|
||||
"con_out": 0
|
||||
}
|
||||
],
|
||||
"Roadm":[{
|
||||
"target_pch_out_db": -20,
|
||||
"add_drop_osnr": 38,
|
||||
"restrictions": {
|
||||
"preamp_variety_list":[],
|
||||
"booster_variety_list":[]
|
||||
}
|
||||
}],
|
||||
"SI":[{
|
||||
"f_min": 191.3e12,
|
||||
"baud_rate": 32e9,
|
||||
"f_max":195.1e12,
|
||||
"spacing": 50e9,
|
||||
"power_dbm": 0,
|
||||
"power_range_db": [0,0,1],
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"sys_margins": 2
|
||||
}],
|
||||
"Transceiver":[
|
||||
{
|
||||
"type_variety": "vendorA_trx-type1",
|
||||
"frequency":{
|
||||
"min": 191.35e12,
|
||||
"max": 196.1e12
|
||||
},
|
||||
"mode":[
|
||||
{
|
||||
|
||||
"format": "mode 1",
|
||||
"baud_rate": 32e9,
|
||||
"OSNR": 11,
|
||||
"bit_rate": 100e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 37.5e9,
|
||||
"cost":1
|
||||
},
|
||||
{
|
||||
"format": "mode 2",
|
||||
"baud_rate": 66e9,
|
||||
"OSNR": 15,
|
||||
"bit_rate": 200e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 75e9,
|
||||
"cost":1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type_variety": "Voyager",
|
||||
"frequency":{
|
||||
"min": 191.35e12,
|
||||
"max": 196.1e12
|
||||
},
|
||||
"mode":[
|
||||
{
|
||||
"format": "mode 1",
|
||||
"baud_rate": 32e9,
|
||||
"OSNR": 12,
|
||||
"bit_rate": 100e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 37.5e9,
|
||||
"cost":1
|
||||
},
|
||||
{
|
||||
"format": "mode 3",
|
||||
"baud_rate": 44e9,
|
||||
"OSNR": 18,
|
||||
"bit_rate": 300e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 62.5e9,
|
||||
"cost":1
|
||||
},
|
||||
{
|
||||
"format": "mode 2",
|
||||
"baud_rate": 66e9,
|
||||
"OSNR": 21,
|
||||
"bit_rate": 400e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 75e9,
|
||||
"cost":1
|
||||
},
|
||||
{
|
||||
"format": "mode 4",
|
||||
"baud_rate": 66e9,
|
||||
"OSNR": 16,
|
||||
"bit_rate": 200e9,
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 40,
|
||||
"min_spacing": 75e9,
|
||||
"cost":1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
}
|
||||
196
examples/fused_roadm_example_network.json
Normal file
196
examples/fused_roadm_example_network.json
Normal file
@@ -0,0 +1,196 @@
|
||||
{
|
||||
"elements": [
|
||||
{
|
||||
"uid": "trx Site_A",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_A",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Transceiver"
|
||||
},
|
||||
{
|
||||
"uid": "trx Site_C",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_C",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Transceiver"
|
||||
},
|
||||
{
|
||||
"uid": "roadm Site_A",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_A",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Roadm",
|
||||
"params": {
|
||||
"loss": 17
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "roadm Site_C",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_C",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Roadm"
|
||||
},
|
||||
{
|
||||
"uid": "ingress fused spans in Site_B",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_B",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Fused",
|
||||
"params": {
|
||||
"loss": 0.5
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "egress fused spans in Site_B",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"city": "Site_B",
|
||||
"region": "",
|
||||
"latitude": 0,
|
||||
"longitude": 0
|
||||
}
|
||||
},
|
||||
"type": "Fused"
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Site_A \u2192 Site_B)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 40.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Site_B \u2192 Site_C)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 50.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Site_B \u2192 Site_A)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 40.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "fiber (Site_C \u2192 Site_B)-",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.0,
|
||||
"longitude": 0.0
|
||||
}
|
||||
},
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"length": 50.0,
|
||||
"length_units": "km",
|
||||
"loss_coef": 0.2
|
||||
}
|
||||
}
|
||||
],
|
||||
"connections": [
|
||||
{
|
||||
"from_node": "roadm Site_A",
|
||||
"to_node": "fiber (Site_A \u2192 Site_B)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Site_B \u2192 Site_A)-",
|
||||
"to_node": "roadm Site_A"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Site_A \u2192 Site_B)-",
|
||||
"to_node": "ingress fused spans in Site_B"
|
||||
},
|
||||
{
|
||||
"from_node": "ingress fused spans in Site_B",
|
||||
"to_node": "fiber (Site_B \u2192 Site_C)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Site_C \u2192 Site_B)-",
|
||||
"to_node": "egress fused spans in Site_B"
|
||||
},
|
||||
{
|
||||
"from_node": "egress fused spans in Site_B",
|
||||
"to_node": "fiber (Site_B \u2192 Site_A)-"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Site_C",
|
||||
"to_node": "fiber (Site_C \u2192 Site_B)-"
|
||||
},
|
||||
{
|
||||
"from_node": "fiber (Site_B \u2192 Site_C)-",
|
||||
"to_node": "roadm Site_C"
|
||||
},
|
||||
{
|
||||
"from_node": "trx Site_A",
|
||||
"to_node": "roadm Site_A"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Site_A",
|
||||
"to_node": "trx Site_A"
|
||||
},
|
||||
{
|
||||
"from_node": "trx Site_C",
|
||||
"to_node": "roadm Site_C"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm Site_C",
|
||||
"to_node": "trx Site_C"
|
||||
}
|
||||
]
|
||||
}
|
||||
BIN
examples/juniperTopologyExampleV2J.xls
Normal file
BIN
examples/juniperTopologyExampleV2J.xls
Normal file
Binary file not shown.
1324
examples/meshTopologyExampleV2.json
Normal file
1324
examples/meshTopologyExampleV2.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
examples/meshTopologyExampleV2.xls
Normal file
BIN
examples/meshTopologyExampleV2.xls
Normal file
Binary file not shown.
268
examples/meshTopologyExampleV2_services.json
Normal file
268
examples/meshTopologyExampleV2_services.json
Normal file
@@ -0,0 +1,268 @@
|
||||
{
|
||||
"path-request": [
|
||||
{
|
||||
"request-id": "0",
|
||||
"source": "trx Lorient_KMA",
|
||||
"destination": "trx Vannes_KBE",
|
||||
"src-tp-id": "trx Lorient_KMA",
|
||||
"dst-tp-id": "trx Vannes_KBE",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": null,
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": 80,
|
||||
"output-power": 0.0012589254117941673,
|
||||
"path_bandwidth": 100000000000.0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "1",
|
||||
"source": "trx Brest_KLA",
|
||||
"destination": "trx Vannes_KBE",
|
||||
"src-tp-id": "trx Brest_KLA",
|
||||
"dst-tp-id": "trx Vannes_KBE",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": 0.0012589254117941673,
|
||||
"path_bandwidth": 200000000000.0
|
||||
}
|
||||
},
|
||||
"explicit-route-objects": {
|
||||
"route-object-include-exclude": [
|
||||
{
|
||||
"explicit-route-usage": "route-include-ero",
|
||||
"index": 0,
|
||||
"num-unnum-hop": {
|
||||
"node-id": "roadm Brest_KLA",
|
||||
"link-tp-id": "link-tp-id is not used",
|
||||
"hop-type": "LOOSE"
|
||||
}
|
||||
},
|
||||
{
|
||||
"explicit-route-usage": "route-include-ero",
|
||||
"index": 1,
|
||||
"num-unnum-hop": {
|
||||
"node-id": "roadm Lannion_CAS",
|
||||
"link-tp-id": "link-tp-id is not used",
|
||||
"hop-type": "LOOSE"
|
||||
}
|
||||
},
|
||||
{
|
||||
"explicit-route-usage": "route-include-ero",
|
||||
"index": 2,
|
||||
"num-unnum-hop": {
|
||||
"node-id": "roadm Lorient_KMA",
|
||||
"link-tp-id": "link-tp-id is not used",
|
||||
"hop-type": "LOOSE"
|
||||
}
|
||||
},
|
||||
{
|
||||
"explicit-route-usage": "route-include-ero",
|
||||
"index": 3,
|
||||
"num-unnum-hop": {
|
||||
"node-id": "roadm Vannes_KBE",
|
||||
"link-tp-id": "link-tp-id is not used",
|
||||
"hop-type": "LOOSE"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "3",
|
||||
"source": "trx Lannion_CAS",
|
||||
"destination": "trx Rennes_STA",
|
||||
"src-tp-id": "trx Lannion_CAS",
|
||||
"dst-tp-id": "trx Rennes_STA",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "vendorA_trx-type1",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": null,
|
||||
"path_bandwidth": 60000000000.0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "4",
|
||||
"source": "trx Rennes_STA",
|
||||
"destination": "trx Lannion_CAS",
|
||||
"src-tp-id": "trx Rennes_STA",
|
||||
"dst-tp-id": "trx Lannion_CAS",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "vendorA_trx-type1",
|
||||
"trx_mode": null,
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 75000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": 0.0019952623149688794,
|
||||
"path_bandwidth": 150000000000.0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "5",
|
||||
"source": "trx Rennes_STA",
|
||||
"destination": "trx Lannion_CAS",
|
||||
"src-tp-id": "trx Rennes_STA",
|
||||
"dst-tp-id": "trx Lannion_CAS",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "vendorA_trx-type1",
|
||||
"trx_mode": "mode 2",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 75000000000.0,
|
||||
"max-nb-of-channel": 63,
|
||||
"output-power": 0.0019952623149688794,
|
||||
"path_bandwidth": 20000000000.0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "6",
|
||||
"source": "trx Lannion_CAS",
|
||||
"destination": "trx Lorient_KMA",
|
||||
"src-tp-id": "trx Lannion_CAS",
|
||||
"dst-tp-id": "trx Lorient_KMA",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": 76,
|
||||
"output-power": 0.001,
|
||||
"path_bandwidth": 300000000000.0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "7",
|
||||
"source": "trx Lannion_CAS",
|
||||
"destination": "trx Lorient_KMA",
|
||||
"src-tp-id": "trx Lannion_CAS",
|
||||
"dst-tp-id": "trx Lorient_KMA",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": 76,
|
||||
"output-power": 0.001,
|
||||
"path_bandwidth": 400000000000.0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "7b",
|
||||
"source": "trx Lannion_CAS",
|
||||
"destination": "trx Lorient_KMA",
|
||||
"src-tp-id": "trx Lannion_CAS",
|
||||
"dst-tp-id": "trx Lorient_KMA",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 75000000000.0,
|
||||
"max-nb-of-channel": 50,
|
||||
"output-power": 0.001,
|
||||
"path_bandwidth": 400000000000.0
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"synchronization": [
|
||||
{
|
||||
"synchronization-id": "3",
|
||||
"svec": {
|
||||
"relaxable": "false",
|
||||
"disjointness": "node link",
|
||||
"request-id-number": [
|
||||
"3",
|
||||
"1"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"synchronization-id": "4",
|
||||
"svec": {
|
||||
"relaxable": "false",
|
||||
"disjointness": "node link",
|
||||
"request-id-number": [
|
||||
"4",
|
||||
"5"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
569
examples/path_requests_run.py
Executable file
569
examples/path_requests_run.py
Executable file
@@ -0,0 +1,569 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
path_requests_run.py
|
||||
====================
|
||||
|
||||
Reads a JSON request file in accordance with the Yang model
|
||||
for requesting path computation and returns path results in terms
|
||||
of path and feasibilty.
|
||||
|
||||
See: draft-ietf-teas-yang-path-computation-01.txt
|
||||
"""
|
||||
|
||||
from sys import exit
|
||||
from argparse import ArgumentParser
|
||||
from pathlib import Path
|
||||
from collections import namedtuple
|
||||
from logging import getLogger, basicConfig, CRITICAL, DEBUG, INFO
|
||||
from json import dumps, loads
|
||||
from numpy import mean
|
||||
from gnpy.core.service_sheet import convert_service_sheet, Request_element, Element
|
||||
from gnpy.core.utils import load_json
|
||||
from gnpy.core.network import load_network, build_network, save_network, network_from_json
|
||||
from gnpy.core.equipment import load_equipment, trx_mode_params, automatic_nch
|
||||
from gnpy.core.elements import Transceiver, Roadm
|
||||
from gnpy.core.utils import db2lin, lin2db
|
||||
from gnpy.core.request import (Path_request, Result_element,
|
||||
propagate, jsontocsv, Disjunction, compute_path_dsjctn,
|
||||
requests_aggregation, propagate_and_optimize_mode,
|
||||
BLOCKING_NOPATH, BLOCKING_NOMODE,
|
||||
find_reversed_path)
|
||||
from gnpy.core.exceptions import (ConfigurationError, EquipmentConfigError, NetworkTopologyError,
|
||||
ServiceError, DisjunctionError)
|
||||
import gnpy.core.ansi_escapes as ansi_escapes
|
||||
from gnpy.core.spectrum_assignment import (build_oms_list, pth_assign_spectrum)
|
||||
from copy import copy, deepcopy
|
||||
from textwrap import dedent
|
||||
from math import ceil
|
||||
|
||||
from flask import Flask, jsonify, make_response, request
|
||||
from flask_restful import Api, Resource, reqparse, fields
|
||||
|
||||
#EQPT_LIBRARY_FILENAME = Path(__file__).parent / 'eqpt_config.json'
|
||||
|
||||
LOGGER = getLogger(__name__)
|
||||
|
||||
PARSER = ArgumentParser(description='A function that computes performances for a list of ' +
|
||||
'services provided in a json file or an excel sheet.')
|
||||
PARSER.add_argument('network_filename', nargs='?', type=Path,\
|
||||
default=Path(__file__).parent / 'meshTopologyExampleV2.xls',\
|
||||
help='input topology file in xls or json')
|
||||
PARSER.add_argument('service_filename', nargs='?', type=Path,\
|
||||
default=Path(__file__).parent / 'meshTopologyExampleV2.xls',\
|
||||
help='input service file in xls or json')
|
||||
PARSER.add_argument('eqpt_filename', nargs='?', type=Path,\
|
||||
default=Path(__file__).parent / 'eqpt_config.json',\
|
||||
help='input equipment library in json. Default is eqpt_config.json')
|
||||
PARSER.add_argument('-bi', '--bidir', action='store_true',\
|
||||
help='considers that all demands are bidir')
|
||||
PARSER.add_argument('-v', '--verbose', action='count', default=0,\
|
||||
help='increases verbosity for each occurence')
|
||||
PARSER.add_argument('-o', '--output', type=Path)
|
||||
PARSER.add_argument('-r', '--rest', action='count', default=0, help='use the REST API')
|
||||
|
||||
NETWORK_FILENAME = 'topoDemov1.json' #'disagregatedTopoDemov1.json' #
|
||||
|
||||
APP = Flask(__name__, static_url_path="")
|
||||
API = Api(APP)
|
||||
|
||||
def requests_from_json(json_data, equipment):
|
||||
""" converts the json data into a list of requests elements
|
||||
"""
|
||||
requests_list = []
|
||||
|
||||
for req in json_data['path-request']:
|
||||
# init all params from request
|
||||
params = {}
|
||||
params['request_id'] = req['request-id']
|
||||
params['source'] = req['source']
|
||||
params['bidir'] = req['bidirectional']
|
||||
params['destination'] = req['destination']
|
||||
params['trx_type'] = req['path-constraints']['te-bandwidth']['trx_type']
|
||||
params['trx_mode'] = req['path-constraints']['te-bandwidth']['trx_mode']
|
||||
params['format'] = params['trx_mode']
|
||||
params['spacing'] = req['path-constraints']['te-bandwidth']['spacing']
|
||||
try:
|
||||
nd_list = req['explicit-route-objects']['route-object-include-exclude']
|
||||
except KeyError:
|
||||
nd_list = []
|
||||
params['nodes_list'] = [n['num-unnum-hop']['node-id'] for n in nd_list]
|
||||
params['loose_list'] = [n['num-unnum-hop']['hop-type'] for n in nd_list]
|
||||
# recover trx physical param (baudrate, ...) from type and mode
|
||||
# in trx_mode_params optical power is read from equipment['SI']['default'] and
|
||||
# nb_channel is computed based on min max frequency and spacing
|
||||
trx_params = trx_mode_params(equipment, params['trx_type'], params['trx_mode'], True)
|
||||
params.update(trx_params)
|
||||
# print(trx_params['min_spacing'])
|
||||
# optical power might be set differently in the request. if it is indicated then the
|
||||
# params['power'] is updated
|
||||
try:
|
||||
if req['path-constraints']['te-bandwidth']['output-power']:
|
||||
params['power'] = req['path-constraints']['te-bandwidth']['output-power']
|
||||
except KeyError:
|
||||
pass
|
||||
# same process for nb-channel
|
||||
f_min = params['f_min']
|
||||
f_max_from_si = params['f_max']
|
||||
try:
|
||||
if req['path-constraints']['te-bandwidth']['max-nb-of-channel'] is not None:
|
||||
nch = req['path-constraints']['te-bandwidth']['max-nb-of-channel']
|
||||
params['nb_channel'] = nch
|
||||
spacing = params['spacing']
|
||||
params['f_max'] = f_min + nch*spacing
|
||||
else:
|
||||
params['nb_channel'] = automatic_nch(f_min, f_max_from_si, params['spacing'])
|
||||
except KeyError:
|
||||
params['nb_channel'] = automatic_nch(f_min, f_max_from_si, params['spacing'])
|
||||
consistency_check(params, f_max_from_si)
|
||||
|
||||
try:
|
||||
params['path_bandwidth'] = req['path-constraints']['te-bandwidth']['path_bandwidth']
|
||||
except KeyError:
|
||||
pass
|
||||
requests_list.append(Path_request(**params))
|
||||
return requests_list
|
||||
|
||||
def consistency_check(params, f_max_from_si):
|
||||
""" checks that the requested parameters are consistant (spacing vs nb channel,
|
||||
vs transponder mode...)
|
||||
"""
|
||||
f_min = params['f_min']
|
||||
f_max = params['f_max']
|
||||
max_recommanded_nb_channels = automatic_nch(f_min, f_max, params['spacing'])
|
||||
if params['baud_rate'] is not None:
|
||||
#implicitely means that a mode is defined with min_spacing
|
||||
if params['min_spacing'] > params['spacing']:
|
||||
msg = f'Request {params["request_id"]} has spacing below transponder ' +\
|
||||
f'{params["trx_type"]} {params["trx_mode"]} min spacing value ' +\
|
||||
f'{params["min_spacing"]*1e-9}GHz.\nComputation stopped'
|
||||
print(msg)
|
||||
LOGGER.critical(msg)
|
||||
raise ServiceError(msg)
|
||||
if f_max > f_max_from_si:
|
||||
msg = dedent(f'''
|
||||
Requested channel number {params["nb_channel"]}, baud rate {params["baud_rate"]} GHz and requested spacing {params["spacing"]*1e-9}GHz
|
||||
is not consistent with frequency range {f_min*1e-12} THz, {f_max*1e-12} THz, min recommanded spacing {params["min_spacing"]*1e-9}GHz.
|
||||
max recommanded nb of channels is {max_recommanded_nb_channels}
|
||||
Computation stopped.''')
|
||||
LOGGER.critical(msg)
|
||||
raise ServiceError(msg)
|
||||
|
||||
|
||||
def disjunctions_from_json(json_data):
|
||||
""" reads the disjunction requests from the json dict and create the list
|
||||
of requested disjunctions for this set of requests
|
||||
"""
|
||||
disjunctions_list = []
|
||||
try:
|
||||
temp_test = json_data['synchronization']
|
||||
except KeyError:
|
||||
temp_test = []
|
||||
if temp_test:
|
||||
for snc in json_data['synchronization']:
|
||||
params = {}
|
||||
params['disjunction_id'] = snc['synchronization-id']
|
||||
params['relaxable'] = snc['svec']['relaxable']
|
||||
params['link_diverse'] = 'link' in snc['svec']['disjointness']
|
||||
params['node_diverse'] = 'node' in snc['svec']['disjointness']
|
||||
params['disjunctions_req'] = snc['svec']['request-id-number']
|
||||
disjunctions_list.append(Disjunction(**params))
|
||||
|
||||
return disjunctions_list
|
||||
|
||||
|
||||
def load_requests(filename, eqpt_filename, bidir):
|
||||
""" loads the requests from a json or an excel file into a data string
|
||||
"""
|
||||
if filename.suffix.lower() == '.xls':
|
||||
LOGGER.info('Automatically converting requests from XLS to JSON')
|
||||
try:
|
||||
json_data = convert_service_sheet(filename, eqpt_filename, bidir=bidir)
|
||||
except ServiceError as this_e:
|
||||
print(f'{ansi_escapes.red}Service error:{ansi_escapes.reset} {this_e}')
|
||||
exit(1)
|
||||
else:
|
||||
with open(filename, encoding='utf-8') as my_f:
|
||||
json_data = loads(my_f.read())
|
||||
return json_data
|
||||
|
||||
def compute_path_with_disjunction(network, equipment, pathreqlist, pathlist):
|
||||
""" use a list but a dictionnary might be helpful to find path based on request_id
|
||||
TODO change all these req, dsjct, res lists into dict !
|
||||
"""
|
||||
path_res_list = []
|
||||
reversed_path_res_list = []
|
||||
propagated_reversed_path_res_list = []
|
||||
|
||||
for i, pathreq in enumerate(pathreqlist):
|
||||
|
||||
# use the power specified in requests but might be different from the one
|
||||
# specified for design the power is an optional parameter for requests
|
||||
# definition if optional, use the one defines in eqt_config.json
|
||||
p_db = lin2db(pathreq.power*1e3)
|
||||
p_total_db = p_db + lin2db(pathreq.nb_channel)
|
||||
print(f'request {pathreq.request_id}')
|
||||
print(f'Computing path from {pathreq.source} to {pathreq.destination}')
|
||||
# adding first node to be clearer on the output
|
||||
print(f'with path constraint: {[pathreq.source] + pathreq.nodes_list}')
|
||||
|
||||
# pathlist[i] contains the whole path information for request i
|
||||
# last element is a transciver and where the result of the propagation is
|
||||
# recorded.
|
||||
# Important Note: since transceivers attached to roadms are actually logical
|
||||
# elements to simulate performance, several demands having the same destination
|
||||
# may use the same transponder for the performance simulation. This is why
|
||||
# we use deepcopy: to ensure that each propagation is recorded and not overwritten
|
||||
total_path = deepcopy(pathlist[i])
|
||||
print(f'Computed path (roadms):{[e.uid for e in total_path if isinstance(e, Roadm)]}')
|
||||
# for debug
|
||||
# print(f'{pathreq.baud_rate} {pathreq.power} {pathreq.spacing} {pathreq.nb_channel}')
|
||||
if total_path:
|
||||
if pathreq.baud_rate is not None:
|
||||
# means that at this point the mode was entered/forced by user and thus a
|
||||
# baud_rate was defined
|
||||
total_path = propagate(total_path, pathreq, equipment)
|
||||
temp_snr01nm = round(mean(total_path[-1].snr+lin2db(pathreq.baud_rate/(12.5e9))), 2)
|
||||
if temp_snr01nm < pathreq.OSNR:
|
||||
msg = f'\tWarning! Request {pathreq.request_id} computed path from' +\
|
||||
f' {pathreq.source} to {pathreq.destination} does not pass with' +\
|
||||
f' {pathreq.tsp_mode}\n\tcomputedSNR in 0.1nm = {temp_snr01nm} ' +\
|
||||
f'- required osnr {pathreq.OSNR}'
|
||||
print(msg)
|
||||
LOGGER.warning(msg)
|
||||
pathreq.blocking_reason = 'MODE_NOT_FEASIBLE'
|
||||
else:
|
||||
total_path, mode = propagate_and_optimize_mode(total_path, pathreq, equipment)
|
||||
# if no baudrate satisfies spacing, no mode is returned and the last explored mode
|
||||
# a warning is shown in the propagate_and_optimize_mode
|
||||
# propagate_and_optimize_mode function returns the mode with the highest bitrate
|
||||
# that passes. if no mode passes, then a attribute blocking_reason is added on
|
||||
# pathreq that contains the reason for blocking: 'NO_PATH', 'NO_FEASIBLE_MODE', ...
|
||||
try:
|
||||
if pathreq.blocking_reason in BLOCKING_NOPATH:
|
||||
total_path = []
|
||||
elif pathreq.blocking_reason in BLOCKING_NOMODE:
|
||||
pathreq.baud_rate = mode['baud_rate']
|
||||
pathreq.tsp_mode = mode['format']
|
||||
pathreq.format = mode['format']
|
||||
pathreq.OSNR = mode['OSNR']
|
||||
pathreq.tx_osnr = mode['tx_osnr']
|
||||
pathreq.bit_rate = mode['bit_rate']
|
||||
# other blocking reason should not appear at this point
|
||||
except AttributeError:
|
||||
pathreq.baud_rate = mode['baud_rate']
|
||||
pathreq.tsp_mode = mode['format']
|
||||
pathreq.format = mode['format']
|
||||
pathreq.OSNR = mode['OSNR']
|
||||
pathreq.tx_osnr = mode['tx_osnr']
|
||||
pathreq.bit_rate = mode['bit_rate']
|
||||
|
||||
# reversed path is needed for correct spectrum assignment
|
||||
reversed_path = find_reversed_path(pathlist[i])
|
||||
if pathreq.bidir:
|
||||
# only propagate if bidir is true, but needs the reversed path anyway for
|
||||
# correct spectrum assignment
|
||||
rev_p = deepcopy(reversed_path)
|
||||
|
||||
print(f'\n\tPropagating Z to A direction {pathreq.destination} to {pathreq.source}')
|
||||
print(f'\tPath (roadsm) {[r.uid for r in rev_p if isinstance(r,Roadm)]}\n')
|
||||
propagated_reversed_path = propagate(rev_p, pathreq, equipment)
|
||||
temp_snr01nm = round(mean(propagated_reversed_path[-1].snr +\
|
||||
lin2db(pathreq.baud_rate/(12.5e9))), 2)
|
||||
if temp_snr01nm < pathreq.OSNR:
|
||||
msg = f'\tWarning! Request {pathreq.request_id} computed path from' +\
|
||||
f' {pathreq.source} to {pathreq.destination} does not pass with' +\
|
||||
f' {pathreq.tsp_mode}\n' +\
|
||||
f'\tcomputedSNR in 0.1nm = {temp_snr01nm} - required osnr {pathreq.OSNR}'
|
||||
print(msg)
|
||||
LOGGER.warning(msg)
|
||||
# TODO selection of mode should also be on reversed direction !!
|
||||
pathreq.blocking_reason = 'MODE_NOT_FEASIBLE'
|
||||
else:
|
||||
propagated_reversed_path = []
|
||||
else:
|
||||
msg = 'Total path is empty. No propagation'
|
||||
print(msg)
|
||||
LOGGER.info(msg)
|
||||
reversed_path = []
|
||||
propagated_reversed_path = []
|
||||
|
||||
path_res_list.append(total_path)
|
||||
reversed_path_res_list.append(reversed_path)
|
||||
propagated_reversed_path_res_list.append(propagated_reversed_path)
|
||||
# print to have a nice output
|
||||
print('')
|
||||
return path_res_list, reversed_path_res_list, propagated_reversed_path_res_list
|
||||
|
||||
def correct_route_list(network, pathreqlist):
|
||||
""" prepares the format of route list of nodes to be consistant
|
||||
remove wrong names, remove endpoints
|
||||
also correct source and destination
|
||||
"""
|
||||
anytype = [n.uid for n in network.nodes()]
|
||||
# TODO there is a problem of identification of fibers in case of parallel fibers
|
||||
# between two adjacent roadms so fiber constraint is not supported
|
||||
transponders = [n.uid for n in network.nodes() if isinstance(n, Transceiver)]
|
||||
for pathreq in pathreqlist:
|
||||
for i, n_id in enumerate(pathreq.nodes_list):
|
||||
# replace possibly wrong name with a formated roadm name
|
||||
# print(n_id)
|
||||
if n_id not in anytype:
|
||||
# find nodes name that include constraint among all possible names except
|
||||
# transponders (not yet supported as constraints).
|
||||
nodes_suggestion = [uid for uid in anytype \
|
||||
if n_id.lower() in uid.lower() and uid not in transponders]
|
||||
if pathreq.loose_list[i] == 'LOOSE':
|
||||
if len(nodes_suggestion) > 0:
|
||||
new_n = nodes_suggestion[0]
|
||||
print(f'invalid route node specified:\
|
||||
\n\'{n_id}\', replaced with \'{new_n}\'')
|
||||
pathreq.nodes_list[i] = new_n
|
||||
else:
|
||||
print(f'\x1b[1;33;40m'+f'invalid route node specified \'{n_id}\',' +\
|
||||
f' could not use it as constraint, skipped!'+'\x1b[0m')
|
||||
pathreq.nodes_list.remove(n_id)
|
||||
pathreq.loose_list.pop(i)
|
||||
else:
|
||||
msg = f'\x1b[1;33;40m'+f'could not find node: {n_id} in network topology.' +\
|
||||
f' Strict constraint can not be applied.' + '\x1b[0m'
|
||||
LOGGER.critical(msg)
|
||||
raise ValueError(msg)
|
||||
if pathreq.source not in transponders:
|
||||
msg = f'\x1b[1;31;40m' + f'Request: {pathreq.request_id}: could not find' +\
|
||||
f' transponder source: {pathreq.source}.'+'\x1b[0m'
|
||||
LOGGER.critical(msg)
|
||||
print(f'{msg}\nComputation stopped.')
|
||||
raise ServiceError(msg)
|
||||
|
||||
if pathreq.destination not in transponders:
|
||||
msg = f'\x1b[1;31;40m'+f'Request: {pathreq.request_id}: could not find' +\
|
||||
f' transponder destination: {pathreq.destination}.'+'\x1b[0m'
|
||||
LOGGER.critical(msg)
|
||||
print(f'{msg}\nComputation stopped.')
|
||||
raise ServiceError(msg)
|
||||
|
||||
# TODO remove endpoints from this list in case they were added by the user
|
||||
# in the xls or json files
|
||||
return pathreqlist
|
||||
|
||||
def correct_disjn(disjn):
|
||||
""" clean disjunctions to remove possible repetition
|
||||
"""
|
||||
local_disjn = disjn.copy()
|
||||
for elem in local_disjn:
|
||||
for dis_elem in local_disjn:
|
||||
if set(elem.disjunctions_req) == set(dis_elem.disjunctions_req) and\
|
||||
elem.disjunction_id != dis_elem.disjunction_id:
|
||||
local_disjn.remove(dis_elem)
|
||||
return local_disjn
|
||||
|
||||
|
||||
def path_result_json(pathresult):
|
||||
""" create the response dictionnary
|
||||
"""
|
||||
data = {
|
||||
'response': [n.json for n in pathresult]
|
||||
}
|
||||
return data
|
||||
|
||||
def compute_requests(network, data, equipment):
|
||||
""" Main program calling functions
|
||||
"""
|
||||
# Build the network once using the default power defined in SI in eqpt config
|
||||
# TODO power density: db2linp(ower_dbm": 0)/power_dbm": 0 * nb channels as defined by
|
||||
# spacing, f_min and f_max
|
||||
p_db = equipment['SI']['default'].power_dbm
|
||||
|
||||
p_total_db = p_db + lin2db(automatic_nch(equipment['SI']['default'].f_min,\
|
||||
equipment['SI']['default'].f_max, equipment['SI']['default'].spacing))
|
||||
build_network(network, equipment, p_db, p_total_db)
|
||||
save_network(ARGS.network_filename, network)
|
||||
|
||||
oms_list = build_oms_list(network, equipment)
|
||||
|
||||
try:
|
||||
rqs = requests_from_json(data, equipment)
|
||||
except ServiceError as this_e:
|
||||
print(f'{ansi_escapes.red}Service error:{ansi_escapes.reset} {this_e}')
|
||||
raise this_e
|
||||
# check that request ids are unique. Non unique ids, may
|
||||
# mess the computation: better to stop the computation
|
||||
all_ids = [r.request_id for r in rqs]
|
||||
if len(all_ids) != len(set(all_ids)):
|
||||
for item in list(set(all_ids)):
|
||||
all_ids.remove(item)
|
||||
msg = f'Requests id {all_ids} are not unique'
|
||||
LOGGER.critical(msg)
|
||||
raise ServiceError(msg)
|
||||
try:
|
||||
rqs = correct_route_list(network, rqs)
|
||||
except ServiceError as this_e:
|
||||
print(f'{ansi_escapes.red}Service error:{ansi_escapes.reset} {this_e}')
|
||||
raise this_e
|
||||
#exit(1)
|
||||
# pths = compute_path(network, equipment, rqs)
|
||||
dsjn = disjunctions_from_json(data)
|
||||
|
||||
print('\x1b[1;34;40m' + f'List of disjunctions' + '\x1b[0m')
|
||||
print(dsjn)
|
||||
# need to warn or correct in case of wrong disjunction form
|
||||
# disjunction must not be repeated with same or different ids
|
||||
dsjn = correct_disjn(dsjn)
|
||||
|
||||
# Aggregate demands with same exact constraints
|
||||
print('\x1b[1;34;40m' + f'Aggregating similar requests' + '\x1b[0m')
|
||||
|
||||
rqs, dsjn = requests_aggregation(rqs, dsjn)
|
||||
# TODO export novel set of aggregated demands in a json file
|
||||
|
||||
print('\x1b[1;34;40m' + 'The following services have been requested:' + '\x1b[0m')
|
||||
print(rqs)
|
||||
|
||||
print('\x1b[1;34;40m' + f'Computing all paths with constraints' + '\x1b[0m')
|
||||
try:
|
||||
pths = compute_path_dsjctn(network, equipment, rqs, dsjn)
|
||||
except DisjunctionError as this_e:
|
||||
print(f'{ansi_escapes.red}Disjunction error:{ansi_escapes.reset} {this_e}')
|
||||
raise this_e
|
||||
|
||||
print('\x1b[1;34;40m' + f'Propagating on selected path' + '\x1b[0m')
|
||||
propagatedpths, reversed_pths, reversed_propagatedpths = \
|
||||
compute_path_with_disjunction(network, equipment, rqs, pths)
|
||||
# Note that deepcopy used in compute_path_with_disjunction returns
|
||||
# a list of nodes which are not belonging to network (they are copies of the node objects).
|
||||
# so there can not be propagation on these nodes.
|
||||
|
||||
pth_assign_spectrum(pths, rqs, oms_list, reversed_pths)
|
||||
|
||||
print('\x1b[1;34;40m'+f'Result summary'+ '\x1b[0m')
|
||||
header = ['req id', ' demand', ' snr@bandwidth A-Z (Z-A)', ' snr@0.1nm A-Z (Z-A)',\
|
||||
' Receiver minOSNR', ' mode', ' Gbit/s', ' nb of tsp pairs',\
|
||||
'N,M or blocking reason']
|
||||
data = []
|
||||
data.append(header)
|
||||
for i, this_p in enumerate(propagatedpths):
|
||||
rev_pth = reversed_propagatedpths[i]
|
||||
if rev_pth and this_p:
|
||||
psnrb = f'{round(mean(this_p[-1].snr),2)} ({round(mean(rev_pth[-1].snr),2)})'
|
||||
psnr = f'{round(mean(this_p[-1].snr_01nm), 2)}' +\
|
||||
f' ({round(mean(rev_pth[-1].snr_01nm),2)})'
|
||||
elif this_p:
|
||||
psnrb = f'{round(mean(this_p[-1].snr),2)}'
|
||||
psnr = f'{round(mean(this_p[-1].snr_01nm),2)}'
|
||||
|
||||
try :
|
||||
if rqs[i].blocking_reason in BLOCKING_NOPATH:
|
||||
line = [f'{rqs[i].request_id}', f' {rqs[i].source} to {rqs[i].destination} :',\
|
||||
f'-', f'-', f'-', f'{rqs[i].tsp_mode}', f'{round(rqs[i].path_bandwidth * 1e-9,2)}',\
|
||||
f'-', f'{rqs[i].blocking_reason}']
|
||||
else:
|
||||
line = [f'{rqs[i].request_id}', f' {rqs[i].source} to {rqs[i].destination} : ', psnrb,\
|
||||
psnr, f'-', f'{rqs[i].tsp_mode}', f'{round(rqs[i].path_bandwidth * 1e-9, 2)}',\
|
||||
f'-', f'{rqs[i].blocking_reason}']
|
||||
except AttributeError:
|
||||
line = [f'{rqs[i].request_id}', f' {rqs[i].source} to {rqs[i].destination} : ', psnrb,\
|
||||
psnr, f'{rqs[i].OSNR}', f'{rqs[i].tsp_mode}', f'{round(rqs[i].path_bandwidth * 1e-9,2)}',\
|
||||
f'{ceil(rqs[i].path_bandwidth / rqs[i].bit_rate) }', f'({rqs[i].N},{rqs[i].M})']
|
||||
data.append(line)
|
||||
|
||||
col_width = max(len(word) for row in data for word in row[2:]) # padding
|
||||
firstcol_width = max(len(row[0]) for row in data) # padding
|
||||
secondcol_width = max(len(row[1]) for row in data) # padding
|
||||
for row in data:
|
||||
firstcol = ''.join(row[0].ljust(firstcol_width))
|
||||
secondcol = ''.join(row[1].ljust(secondcol_width))
|
||||
remainingcols = ''.join(word.center(col_width, ' ') for word in row[2:])
|
||||
print(f'{firstcol} {secondcol} {remainingcols}')
|
||||
print('\x1b[1;33;40m'+f'Result summary shows mean SNR and OSNR (average over all channels)' +\
|
||||
'\x1b[0m')
|
||||
|
||||
return propagatedpths, reversed_propagatedpths, rqs
|
||||
|
||||
|
||||
def launch_cli(network, data, equipment):
|
||||
""" Compute requests using network, data and equipment with client line interface
|
||||
"""
|
||||
propagatedpths, reversed_propagatedpths, rqs = compute_requests(network, data, equipment)
|
||||
#Generate the output
|
||||
if ARGS.output :
|
||||
result = []
|
||||
# assumes that list of rqs and list of propgatedpths have same order
|
||||
for i, pth in enumerate(propagatedpths):
|
||||
result.append(Result_element(rqs[i], pth, reversed_propagatedpths[i]))
|
||||
temp = path_result_json(result)
|
||||
fnamecsv = f'{str(ARGS.output)[0:len(str(ARGS.output))-len(str(ARGS.output.suffix))]}.csv'
|
||||
fnamejson = f'{str(ARGS.output)[0:len(str(ARGS.output))-len(str(ARGS.output.suffix))]}.json'
|
||||
with open(fnamejson, 'w', encoding='utf-8') as fjson:
|
||||
fjson.write(dumps(path_result_json(result), indent=2, ensure_ascii=False))
|
||||
with open(fnamecsv, "w", encoding='utf-8') as fcsv:
|
||||
jsontocsv(temp, equipment, fcsv)
|
||||
print('\x1b[1;34;40m'+f'saving in {ARGS.output} and {fnamecsv}'+ '\x1b[0m')
|
||||
|
||||
class GnpyAPI(Resource):
|
||||
""" Compute requests using network, data and equipment with rest api
|
||||
"""
|
||||
def get(self):
|
||||
return {"ping": True}, 200
|
||||
|
||||
def post(self):
|
||||
data = request.get_json()
|
||||
equipment = load_equipment('examples/2019-demo-equipment.json')
|
||||
topo_json = load_json('examples/2019-demo-topology.json')
|
||||
network = network_from_json(topo_json, equipment)
|
||||
try:
|
||||
propagatedpths, reversed_propagatedpths, rqs = compute_requests(network, data, equipment)
|
||||
# Generate the output
|
||||
result = []
|
||||
#assumes that list of rqs and list of propgatedpths have same order
|
||||
for i, pth in enumerate(propagatedpths):
|
||||
result.append(Result_element(rqs[i], pth, reversed_propagatedpths[i]))
|
||||
|
||||
return {"result":path_result_json(result)}, 201
|
||||
except ServiceError as this_e:
|
||||
msg = f'Service error: {this_e}'
|
||||
return {"result": msg}, 400
|
||||
|
||||
API.add_resource(GnpyAPI, '/gnpy-experimental')
|
||||
|
||||
def main(args):
|
||||
""" main function that calls all functions
|
||||
"""
|
||||
LOGGER.info(f'Computing path requests {args.service_filename} into JSON format')
|
||||
print('\x1b[1;34;40m' +\
|
||||
f'Computing path requests {args.service_filename} into JSON format'+ '\x1b[0m')
|
||||
# for debug
|
||||
# print( args.eqpt_filename)
|
||||
|
||||
try:
|
||||
data = load_requests(args.service_filename, args.eqpt_filename, args.bidir)
|
||||
equipment = load_equipment(args.eqpt_filename)
|
||||
network = load_network(args.network_filename, equipment)
|
||||
except EquipmentConfigError as this_e:
|
||||
print(f'{ansi_escapes.red}Configuration error in the equipment library:{ansi_escapes.reset} {this_e}')
|
||||
exit(1)
|
||||
except NetworkTopologyError as this_e:
|
||||
print(f'{ansi_escapes.red}Invalid network definition:{ansi_escapes.reset} {this_e}')
|
||||
exit(1)
|
||||
except ConfigurationError as this_e:
|
||||
print(f'{ansi_escapes.red}Configuration error:{ansi_escapes.reset} {this_e}')
|
||||
exit(1)
|
||||
except ServiceError as this_e:
|
||||
print(f'{ansi_escapes.red}Service error:{ansi_escapes.reset} {this_e}')
|
||||
exit(1)
|
||||
# input_str = raw_input("How will you use your program: c:[cli] , a:[api] ?")
|
||||
# print(input_str)
|
||||
#
|
||||
if ((args.rest == 1) and (args.output is None)):
|
||||
print('you have chosen the rest mode')
|
||||
APP.run(host='0.0.0.0', port=5000, debug=True)
|
||||
elif ((args.rest > 1) or ((args.rest == 1) and (args.output is not None))):
|
||||
print('command is not well formulated')
|
||||
else:
|
||||
launch_cli(network, data, equipment)
|
||||
|
||||
if __name__ == '__main__':
|
||||
ARGS = PARSER.parse_args()
|
||||
basicConfig(level={2: DEBUG, 1: INFO, 0: CRITICAL}.get(ARGS.verbose, DEBUG))
|
||||
main(ARGS)
|
||||
98
examples/raman_edfa_example_network.json
Normal file
98
examples/raman_edfa_example_network.json
Normal file
@@ -0,0 +1,98 @@
|
||||
{
|
||||
"elements": [
|
||||
{
|
||||
"uid": "Site_A",
|
||||
"type": "Transceiver",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0,
|
||||
"longitude": 0,
|
||||
"city": "Site A",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Span1",
|
||||
"type": "RamanFiber",
|
||||
"type_variety": "SSMF",
|
||||
"operational": {
|
||||
"temperature": 283,
|
||||
"raman_pumps": [
|
||||
{
|
||||
"power": 200e-3,
|
||||
"frequency": 205e12,
|
||||
"propagation_direction": "counterprop"
|
||||
},
|
||||
{
|
||||
"power": 206e-3,
|
||||
"frequency": 201e12,
|
||||
"propagation_direction": "counterprop"
|
||||
}
|
||||
]
|
||||
},
|
||||
"params": {
|
||||
"type_variety": "SSMF",
|
||||
"length": 80.0,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1,
|
||||
"longitude": 0,
|
||||
"city": null,
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Edfa1",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_low_gain",
|
||||
"operational": {
|
||||
"gain_target": 15.0,
|
||||
"delta_p": -2,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2,
|
||||
"longitude": 0,
|
||||
"city": null,
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Site_B",
|
||||
"type": "Transceiver",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2,
|
||||
"longitude": 0,
|
||||
"city": "Site B",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"connections": [
|
||||
{
|
||||
"from_node": "Site_A",
|
||||
"to_node": "Span1"
|
||||
},
|
||||
{
|
||||
"from_node": "Span1",
|
||||
"to_node": "Edfa1"
|
||||
},
|
||||
{
|
||||
"from_node": "Edfa1",
|
||||
"to_node": "Site_B"
|
||||
}
|
||||
]
|
||||
}
|
||||
180
examples/serviceDemov1.json
Normal file
180
examples/serviceDemov1.json
Normal file
@@ -0,0 +1,180 @@
|
||||
{
|
||||
"path-request": [
|
||||
{
|
||||
"request-id": "0",
|
||||
"source": "trx site_a",
|
||||
"destination": "trx site_b",
|
||||
"src-tp-id": "trx site_a",
|
||||
"dst-tp-id": "trx site_b",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": null,
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": null,
|
||||
"path_bandwidth": 100000000000.0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "1",
|
||||
"source": "trx site_a",
|
||||
"destination": "trx site_b",
|
||||
"src-tp-id": "trx site_a",
|
||||
"dst-tp-id": "trx site_b",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": null,
|
||||
"path_bandwidth": 200000000000.0
|
||||
}
|
||||
},
|
||||
"explicit-route-objects": {
|
||||
"route-object-include-exclude": [
|
||||
{
|
||||
"explicit-route-usage": "route-include-ero",
|
||||
"index": 0,
|
||||
"num-unnum-hop": {
|
||||
"node-id": "Span1ab",
|
||||
"link-tp-id": "link-tp-id is not used",
|
||||
"hop-type": "STRICT"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "2",
|
||||
"source": "trx site_a",
|
||||
"destination": "trx site_b",
|
||||
"src-tp-id": "trx site_a",
|
||||
"dst-tp-id": "trx site_b",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": "mode 1",
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": null,
|
||||
"path_bandwidth": 200000000000.0
|
||||
}
|
||||
},
|
||||
"explicit-route-objects": {
|
||||
"route-object-include-exclude": [
|
||||
{
|
||||
"explicit-route-usage": "route-include-ero",
|
||||
"index": 0,
|
||||
"num-unnum-hop": {
|
||||
"node-id": "roadm site_c",
|
||||
"link-tp-id": "link-tp-id is not used",
|
||||
"hop-type": "STRICT"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "3",
|
||||
"source": "trx site_a",
|
||||
"destination": "trx site_b",
|
||||
"src-tp-id": "trx site_a",
|
||||
"dst-tp-id": "trx site_b",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": null,
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": null,
|
||||
"path_bandwidth": 100000000000.0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"request-id": "4",
|
||||
"source": "trx site_a",
|
||||
"destination": "trx site_b",
|
||||
"src-tp-id": "trx site_a",
|
||||
"dst-tp-id": "trx site_b",
|
||||
"bidirectional": false,
|
||||
"path-constraints": {
|
||||
"te-bandwidth": {
|
||||
"technology": "flexi-grid",
|
||||
"trx_type": "Voyager",
|
||||
"trx_mode": null,
|
||||
"effective-freq-slot": [
|
||||
{
|
||||
"N": "null",
|
||||
"M": "null"
|
||||
}
|
||||
],
|
||||
"spacing": 50000000000.0,
|
||||
"max-nb-of-channel": null,
|
||||
"output-power": null,
|
||||
"path_bandwidth": 100000000000.0
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"synchronization": [
|
||||
{
|
||||
"synchronization-id": "x",
|
||||
"svec": {
|
||||
"relaxable": "false",
|
||||
"disjointness": "node link",
|
||||
"request-id-number": [
|
||||
"3",
|
||||
"0"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"synchronization-id": "y",
|
||||
"svec": {
|
||||
"relaxable": "false",
|
||||
"disjointness": "node link",
|
||||
"request-id-number": [
|
||||
"4",
|
||||
"3",
|
||||
"0"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
14
examples/sim_params.json
Normal file
14
examples/sim_params.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"raman_computed_channels": [1, 18, 37, 56, 75],
|
||||
"raman_parameters": {
|
||||
"flag_raman": true,
|
||||
"space_resolution": 10e3,
|
||||
"tolerance": 1e-8
|
||||
},
|
||||
"nli_parameters": {
|
||||
"nli_method_name": "ggn_spectrally_separated",
|
||||
"wdm_grid_size": 50e9,
|
||||
"dispersion_tolerance": 1,
|
||||
"phase_shift_tollerance": 0.1
|
||||
}
|
||||
}
|
||||
303
examples/std_medium_gain_advanced_config.json
Normal file
303
examples/std_medium_gain_advanced_config.json
Normal file
@@ -0,0 +1,303 @@
|
||||
{ "nf_fit_coeff": [
|
||||
0.000168241,
|
||||
0.0469961,
|
||||
0.0359549,
|
||||
5.82851
|
||||
],
|
||||
"f_min": 191.35e12,
|
||||
"f_max": 196.1e12,
|
||||
"nf_ripple": [
|
||||
-0.3110761646066259,
|
||||
-0.3110761646066259,
|
||||
-0.31110274831665313,
|
||||
-0.31419329378173544,
|
||||
-0.3172854168606314,
|
||||
-0.32037911876162584,
|
||||
-0.3233255190215882,
|
||||
-0.31624321721895354,
|
||||
-0.30915729645781326,
|
||||
-0.30206775396360075,
|
||||
-0.2949045115165272,
|
||||
-0.26632156113294336,
|
||||
-0.23772399031437283,
|
||||
-0.20911178784023846,
|
||||
-0.18048410390821285,
|
||||
-0.14379944379052215,
|
||||
-0.10709599992470213,
|
||||
-0.07037375788020579,
|
||||
-0.03372858157230583,
|
||||
-0.015660302006048,
|
||||
0.0024172385953583004,
|
||||
0.020504047353947653,
|
||||
0.03860013139908377,
|
||||
0.05670549786742816,
|
||||
0.07482015390297145,
|
||||
0.0838762040768461,
|
||||
0.09284481475528361,
|
||||
0.1018180306253394,
|
||||
0.11079585523492333,
|
||||
0.1020395478432815,
|
||||
0.09310160456603413,
|
||||
0.08415906712621996,
|
||||
0.07521193198077789,
|
||||
0.0676340601339394,
|
||||
0.06005437964543287,
|
||||
0.052470799141237305,
|
||||
0.044883315610536455,
|
||||
0.037679759069084225,
|
||||
0.03047647598902483,
|
||||
0.02326948274513522,
|
||||
0.01605877647020772,
|
||||
0.021248462316134083,
|
||||
0.02657315875107553,
|
||||
0.03190060058247842,
|
||||
0.03723078993416436,
|
||||
0.04256372893215024,
|
||||
0.047899419704645264,
|
||||
0.03915515813685565,
|
||||
0.030289222542492025,
|
||||
0.021418708618354456,
|
||||
0.012573926129294415,
|
||||
0.006240488799898697,
|
||||
-9.622162373026585e-05,
|
||||
-0.006436207679519103,
|
||||
-0.012779471908040341,
|
||||
-0.02038153550619876,
|
||||
-0.027999803010447587,
|
||||
-0.035622012697103154,
|
||||
-0.043236398934156144,
|
||||
-0.04493583574805963,
|
||||
-0.04663615264317309,
|
||||
-0.048337350303318156,
|
||||
-0.050039429413028365,
|
||||
-0.051742390657545205,
|
||||
-0.05342028484370278,
|
||||
-0.05254242298580185,
|
||||
-0.05166410580536087,
|
||||
-0.05078533294804249,
|
||||
-0.04990610405914272,
|
||||
-0.05409792133358102,
|
||||
-0.05832916277634124,
|
||||
-0.06256260169582961,
|
||||
-0.06660356886269536,
|
||||
-0.04779792991567815,
|
||||
-0.028982516728038848,
|
||||
-0.010157321677553965,
|
||||
0.00861320615127981,
|
||||
0.01913736978785662,
|
||||
0.029667009055877668,
|
||||
0.04020212822983975,
|
||||
0.050742731588695494,
|
||||
0.061288823415841555,
|
||||
0.07184040799914815,
|
||||
0.1043252636301016,
|
||||
0.13687829834471027,
|
||||
0.1694483010211072,
|
||||
0.202035284929368,
|
||||
0.23624619427167134,
|
||||
0.27048596623174515,
|
||||
0.30474360397422756,
|
||||
0.3390191214858807,
|
||||
0.36358851509924695,
|
||||
0.38814205928193013,
|
||||
0.41270842850729195,
|
||||
0.4372876328262819,
|
||||
0.4372876328262819
|
||||
],
|
||||
"dgt": [
|
||||
2.714526681131686,
|
||||
2.705443819238505,
|
||||
2.6947834587664494,
|
||||
2.6841217449620203,
|
||||
2.6681935771243177,
|
||||
2.6521732021128046,
|
||||
2.630396440815385,
|
||||
2.602860350286428,
|
||||
2.5696460593920065,
|
||||
2.5364027376452056,
|
||||
2.499446286796604,
|
||||
2.4587748041127506,
|
||||
2.414398437185221,
|
||||
2.3699990328716107,
|
||||
2.322373696229342,
|
||||
2.271520771371253,
|
||||
2.2174389328192197,
|
||||
2.16337565384239,
|
||||
2.1183028432496016,
|
||||
2.082225099873648,
|
||||
2.055100772005235,
|
||||
2.0279625371819305,
|
||||
2.0008103857988204,
|
||||
1.9736443063300082,
|
||||
1.9482128147680253,
|
||||
1.9245345552113182,
|
||||
1.9026104247588487,
|
||||
1.8806927939516411,
|
||||
1.862235672444246,
|
||||
1.847275503201129,
|
||||
1.835814081380705,
|
||||
1.824381436842932,
|
||||
1.8139629377087627,
|
||||
1.8045606557581335,
|
||||
1.7961751115773796,
|
||||
1.7877868031023945,
|
||||
1.7793941781790852,
|
||||
1.7709972329654864,
|
||||
1.7625959636196327,
|
||||
1.7541903672600494,
|
||||
1.7459181197626403,
|
||||
1.737780757913635,
|
||||
1.7297783508684146,
|
||||
1.7217732861435076,
|
||||
1.7137640932265894,
|
||||
1.7057507692361864,
|
||||
1.6918150918099673,
|
||||
1.6719047669939942,
|
||||
1.6460167077689267,
|
||||
1.6201194134191075,
|
||||
1.5986915141218316,
|
||||
1.5817353179379183,
|
||||
1.569199764184379,
|
||||
1.5566577309558969,
|
||||
1.545374152761467,
|
||||
1.5353620432989845,
|
||||
1.5266220576235803,
|
||||
1.5178910621476225,
|
||||
1.5097346239790443,
|
||||
1.502153039909686,
|
||||
1.495145456062699,
|
||||
1.488134243479226,
|
||||
1.48111939735681,
|
||||
1.474100442252211,
|
||||
1.4670307626366115,
|
||||
1.4599103316162523,
|
||||
1.45273959485914,
|
||||
1.445565137158368,
|
||||
1.4340878115214444,
|
||||
1.418273806730323,
|
||||
1.3981208704326855,
|
||||
1.3779439775587023,
|
||||
1.3598972673004606,
|
||||
1.3439818461440451,
|
||||
1.3301807335621048,
|
||||
1.316383926863083,
|
||||
1.3040618749785347,
|
||||
1.2932153453410835,
|
||||
1.2838336236692311,
|
||||
1.2744470198196236,
|
||||
1.2650555289898042,
|
||||
1.2556591482982988,
|
||||
1.2428104897182262,
|
||||
1.2264996957264114,
|
||||
1.2067249615595257,
|
||||
1.1869318618366975,
|
||||
1.1672278304018044,
|
||||
1.1476135933863398,
|
||||
1.1280891949729075,
|
||||
1.108555289615659,
|
||||
1.0895983485572227,
|
||||
1.0712204022764056,
|
||||
1.0534217504465226,
|
||||
1.0356155337864215,
|
||||
1.017807767853702,
|
||||
1.0
|
||||
],
|
||||
"gain_ripple": [
|
||||
0.1359703369791596,
|
||||
0.11822862697916037,
|
||||
0.09542181697916163,
|
||||
0.06245819697916133,
|
||||
0.02602813697916062,
|
||||
-0.0036199830208403228,
|
||||
-0.018326963020840026,
|
||||
-0.0246928330208398,
|
||||
-0.016792253020838643,
|
||||
-0.0028138630208403015,
|
||||
0.017572956979162058,
|
||||
0.038328296979159404,
|
||||
0.054956336979159914,
|
||||
0.0670723869791594,
|
||||
0.07091459697916136,
|
||||
0.07094413697916124,
|
||||
0.07114372697916238,
|
||||
0.07533675697916209,
|
||||
0.08731066697916035,
|
||||
0.10313984697916112,
|
||||
0.12276252697916235,
|
||||
0.14239527697916188,
|
||||
0.15945681697916214,
|
||||
0.1739275269791598,
|
||||
0.1767381569791624,
|
||||
0.17037189697916233,
|
||||
0.15216302697916007,
|
||||
0.13114358697916018,
|
||||
0.10802383697916085,
|
||||
0.08548825697916129,
|
||||
0.06916723697916183,
|
||||
0.05848224697916038,
|
||||
0.05447361697916264,
|
||||
0.05154489697916276,
|
||||
0.04946107697915991,
|
||||
0.04717897697916129,
|
||||
0.04551704697916037,
|
||||
0.04467697697916151,
|
||||
0.04072968697916224,
|
||||
0.03285456697916089,
|
||||
0.023488786979161347,
|
||||
0.01659282697915998,
|
||||
0.013321846979160057,
|
||||
0.011234826979162449,
|
||||
0.01030063697916006,
|
||||
0.00936596697916059,
|
||||
0.00874012697916271,
|
||||
0.00842583697916055,
|
||||
0.006965146979162284,
|
||||
0.0040435869791615175,
|
||||
0.0007104669791608842,
|
||||
-0.0015763130208377163,
|
||||
-0.006936193020838033,
|
||||
-0.016475303020840215,
|
||||
-0.028748483020837767,
|
||||
-0.039618433020837784,
|
||||
-0.051112303020840244,
|
||||
-0.06468462302083822,
|
||||
-0.07868024302083754,
|
||||
-0.09101254302083817,
|
||||
-0.10103437302083762,
|
||||
-0.11041488302083735,
|
||||
-0.11916081302083725,
|
||||
-0.12789859302083784,
|
||||
-0.1353792530208402,
|
||||
-0.14160178302083892,
|
||||
-0.1455411330208385,
|
||||
-0.1484450830208388,
|
||||
-0.14823350302084037,
|
||||
-0.14591937302083835,
|
||||
-0.1409032730208395,
|
||||
-0.13525493302083902,
|
||||
-0.1279646530208396,
|
||||
-0.11963431302083904,
|
||||
-0.11089282302084058,
|
||||
-0.1027863830208382,
|
||||
-0.09717347302083823,
|
||||
-0.09343261302083761,
|
||||
-0.0913487130208388,
|
||||
-0.08906007302083907,
|
||||
-0.0865687230208394,
|
||||
-0.08407607302083875,
|
||||
-0.07844600302084004,
|
||||
-0.06968090302083851,
|
||||
-0.05947139302083926,
|
||||
-0.05095282302083959,
|
||||
-0.042428283020839785,
|
||||
-0.03218106302083967,
|
||||
-0.01819858302084043,
|
||||
-0.0021726530208390216,
|
||||
0.01393231697916164,
|
||||
0.028098946979159933,
|
||||
0.040326236979161934,
|
||||
0.05257029697916238,
|
||||
0.06479749697916048,
|
||||
0.07704745697916238
|
||||
]
|
||||
}
|
||||
703
examples/topoDemov1.json
Normal file
703
examples/topoDemov1.json
Normal file
@@ -0,0 +1,703 @@
|
||||
{
|
||||
"elements": [
|
||||
{
|
||||
"uid": "trx site_a",
|
||||
"type": "Transceiver",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0,
|
||||
"longitude": 0,
|
||||
"city": "Site a",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "roadm site_a",
|
||||
"type": "Roadm",
|
||||
"params": {
|
||||
"target_pch_out_db": -20,
|
||||
"restrictions": {
|
||||
"preamp_variety_list": [],
|
||||
"booster_variety_list": []
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0,
|
||||
"longitude": 0,
|
||||
"city": "Site a",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Span1ab",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"type_variety": "SSMF",
|
||||
"length": 100.0,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1,
|
||||
"longitude": 0,
|
||||
"city": null,
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Span1ba",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"type_variety": "SSMF",
|
||||
"length": 100.0,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1,
|
||||
"longitude": 0,
|
||||
"city": null,
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Span2ab",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"type_variety": "SSMF",
|
||||
"length": 80.0,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1,
|
||||
"longitude": 0,
|
||||
"city": null,
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Span2ba",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"type_variety": "SSMF",
|
||||
"length": 80.0,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1,
|
||||
"longitude": 0,
|
||||
"city": null,
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "roadm site_b",
|
||||
"type": "Roadm",
|
||||
"params": {
|
||||
"target_pch_out_db": -20,
|
||||
"restrictions": {
|
||||
"preamp_variety_list": [],
|
||||
"booster_variety_list": []
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0,
|
||||
"longitude": 0,
|
||||
"city": "Site b",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "trx site_b",
|
||||
"type": "Transceiver",
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 2,
|
||||
"longitude": 0,
|
||||
"city": "Site b",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "booster1 site_a",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_medium_gain",
|
||||
"operational": {
|
||||
"gain_target": 19.0,
|
||||
"delta_p": -1.0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site a",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "preamp site_b",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_low_gain",
|
||||
"operational": {
|
||||
"gain_target": 18.0,
|
||||
"delta_p": 0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site b",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "booster1 site_b",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_medium_gain",
|
||||
"operational": {
|
||||
"gain_target": 19.0,
|
||||
"delta_p": -1.0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site b",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "preamp1 site_a",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_low_gain",
|
||||
"operational": {
|
||||
"gain_target": 18.0,
|
||||
"delta_p": 0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site_a",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "booster2 site_a",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_medium_gain",
|
||||
"operational": {
|
||||
"gain_target": 19.0,
|
||||
"delta_p": -1.0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site a",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "preamp2 site_b",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_low_gain",
|
||||
"operational": {
|
||||
"gain_target": 18.0,
|
||||
"delta_p": 0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site_b",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "booster2 site_b",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_medium_gain",
|
||||
"operational": {
|
||||
"gain_target": 19.0,
|
||||
"delta_p": -1.0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site b",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "preamp2 site_a",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_low_gain",
|
||||
"operational": {
|
||||
"gain_target": 18.0,
|
||||
"delta_p": 0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site_a",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "booster3 site_a",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_medium_gain",
|
||||
"operational": {
|
||||
"gain_target": 19.0,
|
||||
"delta_p": -1.0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site a",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "preamp3 site_b",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_low_gain",
|
||||
"operational": {
|
||||
"gain_target": 18.0,
|
||||
"delta_p": 0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site_b",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "booster3 site_b",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_medium_gain",
|
||||
"operational": {
|
||||
"gain_target": 19.0,
|
||||
"delta_p": -1.0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site b",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "preamp3 site_a",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_low_gain",
|
||||
"operational": {
|
||||
"gain_target": 18.0,
|
||||
"delta_p": 0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site_a",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "roadm site_c",
|
||||
"type": "Roadm",
|
||||
"params": {
|
||||
"target_pch_out_db": -20,
|
||||
"restrictions": {
|
||||
"preamp_variety_list": [],
|
||||
"booster_variety_list": []
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0,
|
||||
"longitude": 0,
|
||||
"city": "Site c",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "booster1 site_c",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_medium_gain",
|
||||
"operational": {
|
||||
"gain_target": 19.0,
|
||||
"delta_p": -1.0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site c",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "preamp1 site_c",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_low_gain",
|
||||
"operational": {
|
||||
"gain_target": 18.0,
|
||||
"delta_p": 0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site_c",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "booster2 site_c",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_medium_gain",
|
||||
"operational": {
|
||||
"gain_target": 19.0,
|
||||
"delta_p": -1.0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site c",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "preamp2 site_c",
|
||||
"type": "Edfa",
|
||||
"type_variety": "std_low_gain",
|
||||
"operational": {
|
||||
"gain_target": 18.0,
|
||||
"delta_p": 0,
|
||||
"tilt_target": 0,
|
||||
"out_voa": 0
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 0.5,
|
||||
"longitude": 0.0,
|
||||
"city": "Site_c",
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Span1ac",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"type_variety": "SSMF",
|
||||
"length": 80.0,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1,
|
||||
"longitude": 0,
|
||||
"city": null,
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Span1ca",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"type_variety": "SSMF",
|
||||
"length": 80.0,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1,
|
||||
"longitude": 0,
|
||||
"city": null,
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Span1bc",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"type_variety": "SSMF",
|
||||
"length": 80.0,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1,
|
||||
"longitude": 0,
|
||||
"city": null,
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"uid": "Span1cb",
|
||||
"type": "Fiber",
|
||||
"type_variety": "SSMF",
|
||||
"params": {
|
||||
"type_variety": "SSMF",
|
||||
"length": 80.0,
|
||||
"loss_coef": 0.2,
|
||||
"length_units": "km",
|
||||
"att_in": 0,
|
||||
"con_in": 0.5,
|
||||
"con_out": 0.5
|
||||
},
|
||||
"metadata": {
|
||||
"location": {
|
||||
"latitude": 1,
|
||||
"longitude": 0,
|
||||
"city": null,
|
||||
"region": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"connections": [
|
||||
{
|
||||
"from_node": "trx site_a",
|
||||
"to_node": "roadm site_a"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm site_a",
|
||||
"to_node": "booster1 site_a"
|
||||
},
|
||||
{
|
||||
"from_node": "booster1 site_a",
|
||||
"to_node": "Span1ab"
|
||||
},
|
||||
{
|
||||
"from_node": "Span1ab",
|
||||
"to_node": "preamp site_b"
|
||||
},
|
||||
{
|
||||
"from_node": "preamp site_b",
|
||||
"to_node": "roadm site_b"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm site_b",
|
||||
"to_node": "trx site_b"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm site_a",
|
||||
"to_node": "booster2 site_a"
|
||||
},
|
||||
{
|
||||
"from_node": "booster2 site_a",
|
||||
"to_node": "Span2ab"
|
||||
},
|
||||
{
|
||||
"from_node": "Span2ab",
|
||||
"to_node": "preamp2 site_b"
|
||||
},
|
||||
{
|
||||
"from_node": "preamp2 site_b",
|
||||
"to_node": "roadm site_b"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm site_b",
|
||||
"to_node": "booster1 site_b"
|
||||
},
|
||||
{
|
||||
"from_node": "booster1 site_b",
|
||||
"to_node": "Span1ba"
|
||||
},
|
||||
{
|
||||
"from_node": "Span1ba",
|
||||
"to_node": "preamp1 site_a"
|
||||
},
|
||||
{
|
||||
"from_node": "preamp1 site_a",
|
||||
"to_node": "roadm site_a"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm site_b",
|
||||
"to_node": "booster2 site_b"
|
||||
},
|
||||
{
|
||||
"from_node": "booster2 site_b",
|
||||
"to_node": "Span2ba"
|
||||
},
|
||||
{
|
||||
"from_node": "Span2ba",
|
||||
"to_node": "preamp2 site_a"
|
||||
},
|
||||
{
|
||||
"from_node": "preamp2 site_a",
|
||||
"to_node": "roadm site_a"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm site_a",
|
||||
"to_node": "booster3 site_a"
|
||||
},
|
||||
{
|
||||
"from_node": "booster3 site_a",
|
||||
"to_node": "Span1ac"
|
||||
},
|
||||
{
|
||||
"from_node": "Span1ac",
|
||||
"to_node": "preamp1 site_c"
|
||||
},
|
||||
{
|
||||
"from_node": "preamp1 site_c",
|
||||
"to_node": "roadm site_c"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm site_c",
|
||||
"to_node": "booster1 site_c"
|
||||
},
|
||||
{
|
||||
"from_node": "booster1 site_c",
|
||||
"to_node": "Span1cb"
|
||||
},
|
||||
{
|
||||
"from_node": "Span1cb",
|
||||
"to_node": "preamp3 site_b"
|
||||
},
|
||||
{
|
||||
"from_node": "preamp3 site_b",
|
||||
"to_node": "roadm site_b"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm site_b",
|
||||
"to_node": "booster3 site_b"
|
||||
},
|
||||
{
|
||||
"from_node": "booster3 site_b",
|
||||
"to_node": "Span1bc"
|
||||
},
|
||||
{
|
||||
"from_node": "Span1bc",
|
||||
"to_node": "preamp2 site_c"
|
||||
},
|
||||
{
|
||||
"from_node": "preamp2 site_c",
|
||||
"to_node": "roadm site_c"
|
||||
},
|
||||
{
|
||||
"from_node": "roadm site_c",
|
||||
"to_node": "booster2 site_c"
|
||||
},
|
||||
{
|
||||
"from_node": "booster2 site_c",
|
||||
"to_node": "Span1ca"
|
||||
},
|
||||
{
|
||||
"from_node": "Span1ca",
|
||||
"to_node": "preamp3 site_a"
|
||||
},
|
||||
{
|
||||
"from_node": "preamp3 site_a",
|
||||
"to_node": "roadm site_a"
|
||||
}
|
||||
]
|
||||
}
|
||||
319
examples/transmission_main_example.py
Executable file
319
examples/transmission_main_example.py
Executable file
@@ -0,0 +1,319 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
transmission_main_example.py
|
||||
============================
|
||||
|
||||
Main example for transmission simulation.
|
||||
|
||||
Reads from network JSON (by default, `edfa_example_network.json`)
|
||||
'''
|
||||
|
||||
from gnpy.core.equipment import load_equipment, trx_mode_params
|
||||
from gnpy.core.utils import db2lin, lin2db, write_csv
|
||||
from argparse import ArgumentParser
|
||||
from sys import exit
|
||||
from pathlib import Path
|
||||
from json import loads
|
||||
from collections import Counter
|
||||
from logging import getLogger, basicConfig, INFO, ERROR, DEBUG
|
||||
from numpy import linspace, mean, log10
|
||||
from matplotlib.pyplot import show, axis, figure, title, text
|
||||
from networkx import (draw_networkx_nodes, draw_networkx_edges,
|
||||
draw_networkx_labels, dijkstra_path)
|
||||
from gnpy.core.network import load_network, build_network, save_network, load_sim_params, configure_network
|
||||
from gnpy.core.elements import Transceiver, Fiber, RamanFiber, Edfa, Roadm
|
||||
from gnpy.core.info import create_input_spectral_information, SpectralInformation, Channel, Power, Pref
|
||||
from gnpy.core.request import Path_request, RequestParams, compute_constrained_path, propagate2
|
||||
from gnpy.core.exceptions import ConfigurationError, EquipmentConfigError, NetworkTopologyError
|
||||
import gnpy.core.ansi_escapes as ansi_escapes
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
def plot_baseline(network):
|
||||
edges = set(network.edges())
|
||||
pos = {n: (n.lng, n.lat) for n in network.nodes()}
|
||||
labels = {n: n.location.city for n in network.nodes() if isinstance(n, Transceiver)}
|
||||
city_labels = set(labels.values())
|
||||
for n in network.nodes():
|
||||
if n.location.city and n.location.city not in city_labels:
|
||||
labels[n] = n.location.city
|
||||
city_labels.add(n.location.city)
|
||||
label_pos = pos
|
||||
|
||||
fig = figure()
|
||||
kwargs = {'figure': fig, 'pos': pos}
|
||||
plot = draw_networkx_nodes(network, nodelist=network.nodes(), node_color='#ababab', **kwargs)
|
||||
draw_networkx_edges(network, edgelist=edges, edge_color='#ababab', **kwargs)
|
||||
draw_networkx_labels(network, labels=labels, font_size=14, **{**kwargs, 'pos': label_pos})
|
||||
axis('off')
|
||||
show()
|
||||
|
||||
def plot_results(network, path, source, destination, infos):
|
||||
path_edges = set(zip(path[:-1], path[1:]))
|
||||
edges = set(network.edges()) - path_edges
|
||||
pos = {n: (n.lng, n.lat) for n in network.nodes()}
|
||||
nodes = {}
|
||||
for k, (x, y) in pos.items():
|
||||
nodes.setdefault((round(x, 1), round(y, 1)), []).append(k)
|
||||
labels = {n: n.location.city for n in network.nodes() if isinstance(n, Transceiver)}
|
||||
city_labels = set(labels.values())
|
||||
for n in network.nodes():
|
||||
if n.location.city and n.location.city not in city_labels:
|
||||
labels[n] = n.location.city
|
||||
city_labels.add(n.location.city)
|
||||
label_pos = pos
|
||||
|
||||
fig = figure()
|
||||
kwargs = {'figure': fig, 'pos': pos}
|
||||
all_nodes = [n for n in network.nodes() if n not in path]
|
||||
plot = draw_networkx_nodes(network, nodelist=all_nodes, node_color='#ababab', node_size=50, **kwargs)
|
||||
draw_networkx_nodes(network, nodelist=path, node_color='#ff0000', node_size=55, **kwargs)
|
||||
draw_networkx_edges(network, edgelist=edges, edge_color='#ababab', **kwargs)
|
||||
draw_networkx_edges(network, edgelist=path_edges, edge_color='#ff0000', **kwargs)
|
||||
draw_networkx_labels(network, labels=labels, font_size=14, **{**kwargs, 'pos': label_pos})
|
||||
title(f'Propagating from {source.loc.city} to {destination.loc.city}')
|
||||
axis('off')
|
||||
|
||||
heading = 'Spectral Information\n\n'
|
||||
textbox = text(0.85, 0.20, heading, fontsize=14, fontname='Ubuntu Mono',
|
||||
verticalalignment='top', transform=fig.axes[0].transAxes,
|
||||
bbox={'boxstyle': 'round', 'facecolor': 'wheat', 'alpha': 0.5})
|
||||
|
||||
msgs = {(x, y): heading + '\n\n'.join(str(n) for n in ns if n in path)
|
||||
for (x, y), ns in nodes.items()}
|
||||
|
||||
def hover(event):
|
||||
if event.xdata is None or event.ydata is None:
|
||||
return
|
||||
if fig.contains(event):
|
||||
x, y = round(event.xdata, 1), round(event.ydata, 1)
|
||||
if (x, y) in msgs:
|
||||
textbox.set_text(msgs[x, y])
|
||||
else:
|
||||
textbox.set_text(heading)
|
||||
fig.canvas.draw_idle()
|
||||
|
||||
fig.canvas.mpl_connect('motion_notify_event', hover)
|
||||
show()
|
||||
|
||||
|
||||
def main(network, equipment, source, destination, sim_params, req=None):
|
||||
result_dicts = {}
|
||||
network_data = [{
|
||||
'network_name' : str(args.filename),
|
||||
'source' : source.uid,
|
||||
'destination' : destination.uid
|
||||
}]
|
||||
result_dicts.update({'network': network_data})
|
||||
design_data = [{
|
||||
'power_mode' : equipment['Span']['default'].power_mode,
|
||||
'span_power_range' : equipment['Span']['default'].delta_power_range_db,
|
||||
'design_pch' : equipment['SI']['default'].power_dbm,
|
||||
'baud_rate' : equipment['SI']['default'].baud_rate
|
||||
}]
|
||||
result_dicts.update({'design': design_data})
|
||||
simulation_data = []
|
||||
result_dicts.update({'simulation results': simulation_data})
|
||||
|
||||
power_mode = equipment['Span']['default'].power_mode
|
||||
print('\n'.join([f'Power mode is set to {power_mode}',
|
||||
f'=> it can be modified in eqpt_config.json - Span']))
|
||||
|
||||
pref_ch_db = lin2db(req.power*1e3) #reference channel power / span (SL=20dB)
|
||||
pref_total_db = pref_ch_db + lin2db(req.nb_channel) #reference total power / span (SL=20dB)
|
||||
build_network(network, equipment, pref_ch_db, pref_total_db)
|
||||
path = compute_constrained_path(network, req)
|
||||
|
||||
if len([s.length for s in path if isinstance(s, RamanFiber)]):
|
||||
if sim_params is None:
|
||||
print(f'{ansi_escapes.red}Invocation error:{ansi_escapes.reset} RamanFiber requires passing simulation params via --sim-params')
|
||||
exit(1)
|
||||
configure_network(network, sim_params)
|
||||
|
||||
spans = [s.length for s in path if isinstance(s, RamanFiber) or isinstance(s, Fiber)]
|
||||
print(f'\nThere are {len(spans)} fiber spans over {sum(spans)/1000:.0f} km between {source.uid} and {destination.uid}')
|
||||
print(f'\nNow propagating between {source.uid} and {destination.uid}:')
|
||||
|
||||
try:
|
||||
p_start, p_stop, p_step = equipment['SI']['default'].power_range_db
|
||||
p_num = abs(int(round((p_stop - p_start)/p_step))) + 1 if p_step != 0 else 1
|
||||
power_range = list(linspace(p_start, p_stop, p_num))
|
||||
except TypeError:
|
||||
print('invalid power range definition in eqpt_config, should be power_range_db: [lower, upper, step]')
|
||||
power_range = [0]
|
||||
|
||||
if not power_mode:
|
||||
#power cannot be changed in gain mode
|
||||
power_range = [0]
|
||||
for dp_db in power_range:
|
||||
req.power = db2lin(pref_ch_db + dp_db)*1e-3
|
||||
if power_mode:
|
||||
print(f'\nPropagating with input power = {ansi_escapes.cyan}{lin2db(req.power*1e3):.2f} dBm{ansi_escapes.reset}:')
|
||||
else:
|
||||
print(f'\nPropagating in {ansi_escapes.cyan}gain mode{ansi_escapes.reset}: power cannot be set manually')
|
||||
infos = propagate2(path, req, equipment)
|
||||
if len(power_range) == 1:
|
||||
for elem in path:
|
||||
print(elem)
|
||||
if power_mode:
|
||||
print(f'\nTransmission result for input power = {lin2db(req.power*1e3):.2f} dBm:')
|
||||
else:
|
||||
print(f'\nTransmission results:')
|
||||
print(f' Final SNR total (0.1 nm): {ansi_escapes.cyan}{mean(destination.snr_01nm):.02f} dB{ansi_escapes.reset}')
|
||||
else:
|
||||
print(path[-1])
|
||||
|
||||
#print(f'\n !!!!!!!!!!!!!!!!! TEST POINT !!!!!!!!!!!!!!!!!!!!!')
|
||||
#print(f'carriers ase output of {path[1]} =\n {list(path[1].carriers("out", "nli"))}')
|
||||
# => use "in" or "out" parameter
|
||||
# => use "nli" or "ase" or "signal" or "total" parameter
|
||||
if power_mode:
|
||||
simulation_data.append({
|
||||
'Pch_dBm' : pref_ch_db + dp_db,
|
||||
'OSNR_ASE_0.1nm' : round(mean(destination.osnr_ase_01nm),2),
|
||||
'OSNR_ASE_signal_bw' : round(mean(destination.osnr_ase),2),
|
||||
'SNR_nli_signal_bw' : round(mean(destination.osnr_nli),2),
|
||||
'SNR_total_signal_bw' : round(mean(destination.snr),2)
|
||||
})
|
||||
else:
|
||||
simulation_data.append({
|
||||
'gain_mode' : 'power canot be set',
|
||||
'OSNR_ASE_0.1nm' : round(mean(destination.osnr_ase_01nm),2),
|
||||
'OSNR_ASE_signal_bw' : round(mean(destination.osnr_ase),2),
|
||||
'SNR_nli_signal_bw' : round(mean(destination.osnr_nli),2),
|
||||
'SNR_total_signal_bw' : round(mean(destination.snr),2)
|
||||
})
|
||||
write_csv(result_dicts, 'simulation_result.csv')
|
||||
return path, infos
|
||||
|
||||
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('-e', '--equipment', type=Path,
|
||||
default=Path(__file__).parent / 'eqpt_config.json')
|
||||
parser.add_argument('--sim-params', type=Path,
|
||||
default=None, help='Path to the JSON containing simulation parameters (required for Raman)')
|
||||
parser.add_argument('--show-channels', action='store_true', help='Show final per-channel OSNR summary')
|
||||
parser.add_argument('-pl', '--plot', action='store_true')
|
||||
parser.add_argument('-v', '--verbose', action='count', default=0, help='increases verbosity for each occurence')
|
||||
parser.add_argument('-l', '--list-nodes', action='store_true', help='list all transceiver nodes')
|
||||
parser.add_argument('-po', '--power', default=0, help='channel ref power in dBm')
|
||||
parser.add_argument('-names', '--names-matching', action='store_true', help='display network names that are closed matches')
|
||||
parser.add_argument('filename', nargs='?', type=Path,
|
||||
default=Path(__file__).parent / 'edfa_example_network.json')
|
||||
parser.add_argument('source', nargs='?', help='source node')
|
||||
parser.add_argument('destination', nargs='?', help='destination node')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
basicConfig(level={0: ERROR, 1: INFO, 2: DEBUG}.get(args.verbose, DEBUG))
|
||||
|
||||
try:
|
||||
equipment = load_equipment(args.equipment)
|
||||
network = load_network(args.filename, equipment, args.names_matching)
|
||||
sim_params = load_sim_params(args.sim_params) if args.sim_params is not None else None
|
||||
except EquipmentConfigError as e:
|
||||
print(f'{ansi_escapes.red}Configuration error in the equipment library:{ansi_escapes.reset} {e}')
|
||||
exit(1)
|
||||
except NetworkTopologyError as e:
|
||||
print(f'{ansi_escapes.red}Invalid network definition:{ansi_escapes.reset} {e}')
|
||||
exit(1)
|
||||
except ConfigurationError as e:
|
||||
print(f'{ansi_escapes.red}Configuration error:{ansi_escapes.reset} {e}')
|
||||
exit(1)
|
||||
|
||||
if args.plot:
|
||||
plot_baseline(network)
|
||||
|
||||
transceivers = {n.uid: n for n in network.nodes() if isinstance(n, Transceiver)}
|
||||
|
||||
if not transceivers:
|
||||
exit('Network has no transceivers!')
|
||||
if len(transceivers) < 2:
|
||||
exit('Network has only one transceiver!')
|
||||
|
||||
if args.list_nodes:
|
||||
for uid in transceivers:
|
||||
print(uid)
|
||||
exit()
|
||||
|
||||
#First try to find exact match if source/destination provided
|
||||
if args.source:
|
||||
source = transceivers.pop(args.source, None)
|
||||
valid_source = True if source else False
|
||||
else:
|
||||
source = None
|
||||
logger.info('No source node specified: picking random transceiver')
|
||||
|
||||
if args.destination:
|
||||
destination = transceivers.pop(args.destination, None)
|
||||
valid_destination = True if destination else False
|
||||
else:
|
||||
destination = None
|
||||
logger.info('No destination node specified: picking random transceiver')
|
||||
|
||||
#If no exact match try to find partial match
|
||||
if args.source and not source:
|
||||
#TODO code a more advanced regex to find nodes match
|
||||
source = next((transceivers.pop(uid) for uid in transceivers \
|
||||
if args.source.lower() in uid.lower()), None)
|
||||
|
||||
if args.destination and not destination:
|
||||
#TODO code a more advanced regex to find nodes match
|
||||
destination = next((transceivers.pop(uid) for uid in transceivers \
|
||||
if args.destination.lower() in uid.lower()), None)
|
||||
|
||||
#If no partial match or no source/destination provided pick random
|
||||
if not source:
|
||||
source = list(transceivers.values())[0]
|
||||
del transceivers[source.uid]
|
||||
|
||||
if not destination:
|
||||
destination = list(transceivers.values())[0]
|
||||
|
||||
logger.info(f'source = {args.source!r}')
|
||||
logger.info(f'destination = {args.destination!r}')
|
||||
|
||||
params = {}
|
||||
params['request_id'] = 0
|
||||
params['trx_type'] = ''
|
||||
params['trx_mode'] = ''
|
||||
params['source'] = source.uid
|
||||
params['destination'] = destination.uid
|
||||
params['bidir'] = False
|
||||
params['nodes_list'] = [destination.uid]
|
||||
params['loose_list'] = ['strict']
|
||||
params['format'] = ''
|
||||
params['path_bandwidth'] = 0
|
||||
trx_params = trx_mode_params(equipment)
|
||||
if args.power:
|
||||
trx_params['power'] = db2lin(float(args.power))*1e-3
|
||||
params.update(trx_params)
|
||||
req = Path_request(**params)
|
||||
path, infos = main(network, equipment, source, destination, sim_params, req)
|
||||
save_network(args.filename, network)
|
||||
|
||||
if args.show_channels:
|
||||
print('\nThe total SNR per channel at the end of the line is:')
|
||||
print('{:>5}{:>26}{:>26}{:>28}{:>28}{:>28}' \
|
||||
.format('Ch. #', 'Channel frequency (THz)', 'Channel power (dBm)', 'OSNR ASE (signal bw, dB)', 'SNR NLI (signal bw, dB)', 'SNR total (signal bw, dB)'))
|
||||
for final_carrier, ch_osnr, ch_snr_nl, ch_snr in zip(infos[path[-1]][1].carriers, path[-1].osnr_ase, path[-1].osnr_nli, path[-1].snr):
|
||||
ch_freq = final_carrier.frequency * 1e-12
|
||||
ch_power = lin2db(final_carrier.power.signal*1e3)
|
||||
print('{:5}{:26.2f}{:26.2f}{:28.2f}{:28.2f}{:28.2f}' \
|
||||
.format(final_carrier.channel_number, round(ch_freq, 2), round(ch_power, 2), round(ch_osnr, 2), round(ch_snr_nl, 2), round(ch_snr, 2)))
|
||||
|
||||
if not args.source:
|
||||
print(f'\n(No source node specified: picked {source.uid})')
|
||||
elif not valid_source:
|
||||
print(f'\n(Invalid source node {args.source!r} replaced with {source.uid})')
|
||||
|
||||
if not args.destination:
|
||||
print(f'\n(No destination node specified: picked {destination.uid})')
|
||||
elif not valid_destination:
|
||||
print(f'\n(Invalid destination node {args.destination!r} replaced with {destination.uid})')
|
||||
|
||||
if args.plot:
|
||||
plot_results(network, path, source, destination, infos)
|
||||
36
examples/write_path_jsontocsv.py
Normal file
36
examples/write_path_jsontocsv.py
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
write_path_jsontocsv.py
|
||||
========================
|
||||
|
||||
Reads JSON path result file in accordance with the Yang model for requesting
|
||||
path computation and writes results to a CSV file.
|
||||
|
||||
See: draft-ietf-teas-yang-path-computation-01.txt
|
||||
"""
|
||||
|
||||
from argparse import ArgumentParser
|
||||
from pathlib import Path
|
||||
from json import loads
|
||||
from gnpy.core.equipment import load_equipment
|
||||
from gnpy.core.request import jsontocsv
|
||||
|
||||
|
||||
parser = ArgumentParser(description = 'A function that writes json path results in an excel sheet.')
|
||||
parser.add_argument('filename', nargs='?', type = Path)
|
||||
parser.add_argument('output_filename', nargs='?', type = Path)
|
||||
parser.add_argument('eqpt_filename', nargs='?', type = Path, default=Path(__file__).parent / 'eqpt_config.json')
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
|
||||
with open(args.output_filename, 'w', encoding='utf-8') as file:
|
||||
with open(args.filename, encoding='utf-8') as f:
|
||||
print(f'Reading {args.filename}')
|
||||
json_data = loads(f.read())
|
||||
equipment = load_equipment(args.eqpt_filename)
|
||||
print(f'Writing in {args.output_filename}')
|
||||
jsontocsv(json_data,equipment,file)
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from .gnpy import (raised_cosine_comb, analytic_formula, compute_psi, fwm_eff,
|
||||
get_f_computed_interp, get_freqarray, gn_analytic, gn_model,
|
||||
interpolate_in_range, GN_integral)
|
||||
|
||||
__all__ = ['gnpy']
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
import gnpy as gn
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import time
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
# Accuracy parameters
|
||||
flag_analytic = True
|
||||
num_computed_values = 2
|
||||
interp_method = 'linear'
|
||||
threshold_fwm = 50
|
||||
n_points = 500
|
||||
n_points_min = 4
|
||||
accuracy_param = {'is_analytic': flag_analytic, 'points_not_interp': num_computed_values, 'kind_interp': interp_method,
|
||||
'th_fwm': threshold_fwm, 'n_points': n_points, 'n_points_min': n_points_min}
|
||||
|
||||
# Parallelization Parameters
|
||||
n_cores = 1
|
||||
|
||||
# Spectrum parameters
|
||||
num_ch = 95
|
||||
rs = np.ones(num_ch) * 0.032
|
||||
b_ch = rs # For root raised cosine shapes, the -3 dB band is equal to the symbol rate
|
||||
roll_off = np.ones(num_ch) * 0.05
|
||||
power = np.ones(num_ch) * 0.001
|
||||
central_freq = 193.5
|
||||
if num_ch % 2 == 1: # odd number of channels
|
||||
fch = np.arange(-(num_ch // 2), (num_ch // 2) + 1, 1) * 0.05 # noqa: E501
|
||||
else:
|
||||
fch = (np.arange(0, num_ch) - (num_ch / 2.0) + 0.5) * 0.05
|
||||
spectrum_param = {'num_ch': num_ch, 'f_ch': fch, 'b_ch': b_ch, 'roll_off': roll_off, 'power': power}
|
||||
|
||||
# Fiber Parameters
|
||||
beta2 = 21.27
|
||||
l_span = 100.0
|
||||
loss = 0.2
|
||||
gam = 1.27
|
||||
fiber_param = {'alpha': loss, 'span_length': l_span, 'beta_2': beta2, 'gamma': gam}
|
||||
|
||||
# EDFA Parameters
|
||||
noise_fig = 5.5
|
||||
gain_zero = 25.0
|
||||
gain_tilting = 0.5
|
||||
|
||||
# Compute the GN model
|
||||
t = time.time()
|
||||
nli_cmp, f_nli_cmp, nli_int, f_nli_int = gn.gn_model(spectrum_param, fiber_param, accuracy_param, n_cores) # noqa: E501
|
||||
print('Elapsed: %s' % (time.time() - t))
|
||||
|
||||
# Compute the EDFA profile
|
||||
gain, g_ase = gn.compute_edfa_profile(gain_zero, gain_tilting, noise_fig, central_freq, fch)
|
||||
|
||||
# Compute the raised cosine comb
|
||||
f1_array = np.linspace(np.amin(fch), np.amax(fch), 1e3)
|
||||
gtx = gn.raised_cosine_comb(f1_array, rs, roll_off, fch, power)
|
||||
gtx = gtx + 10 ** -6 # To avoid log10 issues.
|
||||
|
||||
# Plot the results
|
||||
plt.figure(1)
|
||||
plt.plot(f1_array, 10 * np.log10(gtx), '-b', label='WDM comb')
|
||||
plt.plot(f_nli_cmp, 10 * np.log10(nli_cmp), 'ro', label='GNLI computed')
|
||||
plt.plot(f_nli_int, 10 * np.log10(nli_int), 'g+', label='GNLI interpolated')
|
||||
plt.plot(fch, 10 * np.log10(g_ase), 'yo', label='GASE')
|
||||
plt.ylabel('PSD [dB(W/THz)]')
|
||||
plt.xlabel('f [THz]')
|
||||
plt.legend(loc='upper left')
|
||||
plt.grid()
|
||||
plt.draw()
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,84 +0,0 @@
|
||||
import os
|
||||
import gnpy as gn
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import time
|
||||
|
||||
|
||||
def main_ole():
|
||||
|
||||
# String indicating the folder in which outputs will be saved
|
||||
string_date_time = time.strftime("%Y-%m-%d") + '_' + time.strftime("%H-%M-%S")
|
||||
output_path = './output/' + string_date_time + '/'
|
||||
|
||||
# Creates the directory if it doesn't exist
|
||||
if not os.path.isdir(output_path):
|
||||
os.makedirs(output_path)
|
||||
|
||||
from configuration.fiber_parameters import fibers
|
||||
from configuration.general_parameters import sys_param, control_param
|
||||
from configuration.link_description import link
|
||||
from input.spectrum_in import spectrum
|
||||
|
||||
# adapt the laser position to the grid
|
||||
if len(spectrum['laser_position']) < sys_param['ns']:
|
||||
n = sys_param['ns'] - len(spectrum['laser_position'])
|
||||
missing_zeros = [0 for _ in range(n)]
|
||||
spectrum['laser_position'] += missing_zeros
|
||||
elif len(spectrum['laser_position']) > sys_param['ns']:
|
||||
print('Error: the spectrum definition requires a larger number of slots ns in the spectrum grid')
|
||||
|
||||
delta_f = 6.25E-3
|
||||
f_0 = sys_param['f0']
|
||||
f_cent = f_0 + ((sys_param['ns'] // 2.0) * delta_f)
|
||||
|
||||
n_ch = spectrum['laser_position'].count(1)
|
||||
# Get comb parameters
|
||||
f_ch = np.zeros(n_ch)
|
||||
count = 0
|
||||
for index, bool_laser in enumerate(spectrum['laser_position']):
|
||||
if bool_laser:
|
||||
f_ch[count] = delta_f * index + (f_0 - f_cent)
|
||||
count += 1
|
||||
|
||||
t = time.time()
|
||||
# It runs the OLE
|
||||
osnr_nl_db, osnr_lin_db = gn.ole(spectrum, link, fibers, sys_param, control_param, output_path=output_path)
|
||||
print('Elapsed: %s' % (time.time() - t))
|
||||
|
||||
# Compute the raised cosine comb
|
||||
power, rs, roll_off, p_ase, p_nli, n_ch = gn.get_spectrum_param(spectrum)
|
||||
f1_array = np.linspace(np.amin(f_ch), np.amax(f_ch), 1e3)
|
||||
gtx = gn.raised_cosine_comb(f1_array, rs, roll_off, f_ch, power)
|
||||
gtx = gtx + 10 ** -6 # To avoid log10 issues.
|
||||
|
||||
# OSNR at in the central channel
|
||||
ind_c = n_ch // 2
|
||||
osnr_lin_central_db = osnr_lin_db[ind_c]
|
||||
osnr_nl_central_db = osnr_nl_db[ind_c]
|
||||
print('The linear OSNR in the central channel is: ' + str(osnr_lin_central_db) + ' dB')
|
||||
print('The non linear OSNR in the central channel is: ' + str(osnr_nl_central_db) + ' dB')
|
||||
|
||||
# Plot the results
|
||||
plt.figure(1)
|
||||
plt.plot(f1_array, 10 * np.log10(gtx), '-b', label='WDM comb PSD [dB(W/THz)]')
|
||||
plt.plot(f_ch, 10 * np.log10(p_nli), 'ro', label='NLI [dBw]')
|
||||
plt.plot(f_ch, 10 * np.log10(p_ase), 'g+', label='ASE noise [dBw]')
|
||||
plt.ylabel('')
|
||||
plt.xlabel('f [THz]')
|
||||
plt.legend(loc='upper right')
|
||||
plt.grid()
|
||||
plt.draw()
|
||||
|
||||
plt.figure(2)
|
||||
plt.plot(f_ch, osnr_nl_db, 'ro', label='non-linear OSNR')
|
||||
plt.plot(f_ch, osnr_lin_db, 'g+', label='linear OSNR')
|
||||
plt.ylabel('OSNR [dB]')
|
||||
plt.xlabel('f [THz]')
|
||||
plt.legend(loc='lower left')
|
||||
plt.grid()
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main_ole()
|
||||
17
gnpy/cli.py
17
gnpy/cli.py
@@ -1,17 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Console script for gnpy."""
|
||||
|
||||
import click
|
||||
|
||||
|
||||
@click.command()
|
||||
def main(args=None):
|
||||
"""Console script for gnpy."""
|
||||
click.echo("Replace this message by putting your code into "
|
||||
"gnpy.cli.main")
|
||||
click.echo("See click documentation at http://click.pocoo.org/")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1 +0,0 @@
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
# coding=utf-8
|
||||
""" fiber_parameters.py describes the fiber parameters.
|
||||
fibers is a dictionary containing a dictionary for each kind of fiber
|
||||
each dictionary has to report:
|
||||
reference_frequency: the frequency at which the parameters are evaluated [THz]
|
||||
alpha: the attenuation coefficient [dB/km]
|
||||
alpha_1st: the first derivative of alpha indicating the alpha slope [dB/km/THz]
|
||||
if you assume a flat attenuation with respect to the frequency you put it as zero
|
||||
beta_2: the dispersion coefficient [ps^2/km]
|
||||
n_2: second-order nonlinear refractive index [m^2/W]
|
||||
a typical value is 2.5E-20 m^2/W
|
||||
a_eff: the effective area of the fiber [um^2]
|
||||
"""
|
||||
|
||||
fibers = {
|
||||
'SMF': {
|
||||
'reference_frequency': 193.5,
|
||||
'alpha': 0.2,
|
||||
'alpha_1st': 0,
|
||||
'beta_2': 21.27,
|
||||
'n_2': 2.5E-20,
|
||||
'a_eff': 77.77,
|
||||
},
|
||||
'NZDF': {
|
||||
'reference_frequency': 193.5,
|
||||
'alpha': 0.22,
|
||||
'alpha_1st': 0,
|
||||
'beta_2': 21,
|
||||
'n_2': 2.5E-20,
|
||||
'a_eff': 70,
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
# -*- coding: utf-8 -*
|
||||
"""general_parameters.py contains the general configuration settings
|
||||
|
||||
The sectings are subdivided in two dictionaries:
|
||||
sys_param: a dictionary containing the general system parameters:
|
||||
f0: the starting frequency of the laser grid used to describe the WDM system [THz]
|
||||
ns: the number of 6.25 GHz slots in the grid
|
||||
|
||||
control_param:
|
||||
save_each_comp: a boolean flag. If true, it saves in output folder one spectrum file at the output of each
|
||||
component, otherwise it saves just the last spectrum
|
||||
is_linear: a bool flag. If true, is doesn't compute NLI, if false, OLE will consider NLI
|
||||
is_analytic: a boolean flag. If true, the NLI is computed through the analytic formula, otherwise it uses
|
||||
the double integral. Warning: the double integral is very slow.
|
||||
points_not_interp: if the double integral is used, it indicates how much points are calculated, others will
|
||||
be interpolated
|
||||
kind_interp: a string indicating the interpolation method for the double integral
|
||||
th_fwm: the threshold of the four wave mixing efficiency for the double integral
|
||||
n_points: number of points in which the double integral is computed in the high FWM efficiency region
|
||||
n_points_min: number of points in which the double integral is computed in the low FWM efficiency region
|
||||
n_cores: number of cores for parallel computation [not yet implemented]
|
||||
"""
|
||||
# System parameters
|
||||
sys_param = {
|
||||
'f0': 192.075,
|
||||
'ns': 328
|
||||
}
|
||||
|
||||
# control parameters
|
||||
control_param = {
|
||||
'save_each_comp': True,
|
||||
'is_linear': False,
|
||||
'is_analytic': True,
|
||||
'points_not_interp': 2,
|
||||
'kind_interp': 'linear',
|
||||
'th_fwm': 50,
|
||||
'n_points': 500,
|
||||
'n_points_min': 4,
|
||||
'n_cores': 1
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
# coding=utf-8
|
||||
""" link_description.py contains the full description of that OLE has to emulate.
|
||||
It contains a list of dictionaries, following the structure of the link and each element of the list describes one
|
||||
component.
|
||||
|
||||
'comp_cat': the kind of link component:
|
||||
PC: a passive component defined by a loss at a certain frequency and a loss tilt
|
||||
OA: an optical amplifier defined by a gain at a certain frequency, a gain tilt and a noise figure
|
||||
fiber: a span of fiber described by the type and the length
|
||||
'comp_id': is an id identifying the component. It has to be unique for each component!
|
||||
|
||||
extra fields for PC:
|
||||
'ref_freq': the frequency at which the 'loss' parameter is evaluated [THz]
|
||||
'loss': the loss at the frequency 'ref_freq' [dB]
|
||||
'loss_tlt': the frequency dependent loss [dB/THz]
|
||||
extra fields for OA:
|
||||
'ref_freq': the frequency at which the 'gain' parameter is evaluated [THz]
|
||||
'gain': the gain at the frequency 'ref_freq' [dB]
|
||||
'gain_tlt': the frequency dependent gain [dB/THz]
|
||||
'noise_figure': the noise figure of the optical amplifier [dB]
|
||||
extra fields for fiber:
|
||||
'fiber_type': a string calling the type of fiber described in the file fiber_parameters.py
|
||||
'length': the fiber length [km]
|
||||
|
||||
"""
|
||||
smf = {
|
||||
'comp_cat': 'fiber',
|
||||
'comp_id': '',
|
||||
'fiber_type': 'SMF',
|
||||
'length': 100
|
||||
}
|
||||
|
||||
oa = {
|
||||
'comp_cat': 'OA',
|
||||
'comp_id': '',
|
||||
'ref_freq': 193.5,
|
||||
'gain': 20,
|
||||
'gain_tlt': 0.0,
|
||||
'noise_figure': 5
|
||||
}
|
||||
|
||||
pc = {
|
||||
'comp_cat': 'PC',
|
||||
'comp_id': '04',
|
||||
'ref_freq': 193.,
|
||||
'loss': 2.0,
|
||||
'loss_tlt': 0.0
|
||||
}
|
||||
|
||||
link = []
|
||||
|
||||
for index in range(20):
|
||||
smf['comp_id'] = '%03d' % (2 * index)
|
||||
oa['comp_id'] = '%03d' % (2 * index + 1)
|
||||
link += [dict(smf)]
|
||||
link += [dict(oa)]
|
||||
|
||||
pc['comp_id'] = '%03d' % 40
|
||||
link += [dict(pc)]
|
||||
30
gnpy/core/__init__.py
Normal file
30
gnpy/core/__init__.py
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
########################################################################
|
||||
# _____ ___ ____ ____ ____ _____ #
|
||||
# |_ _|_ _| _ \ | _ \/ ___|| ____| #
|
||||
# | | | || |_) | | |_) \___ \| _| #
|
||||
# | | | || __/ | __/ ___) | |___ #
|
||||
# |_| |___|_| |_| |____/|_____| #
|
||||
# #
|
||||
# == Physical Simulation Environment == #
|
||||
# #
|
||||
########################################################################
|
||||
|
||||
|
||||
'''
|
||||
gnpy route planning and optimization library
|
||||
============================================
|
||||
|
||||
gnpy is a route planning and optimization library, written in Python, for
|
||||
operators of large-scale mesh optical networks.
|
||||
|
||||
:copyright: © 2018, Telecom Infra Project
|
||||
:license: BSD 3-Clause, see LICENSE for more details.
|
||||
'''
|
||||
|
||||
from . import elements
|
||||
from .execute import *
|
||||
from .network import *
|
||||
from .utils import *
|
||||
13
gnpy/core/ansi_escapes.py
Normal file
13
gnpy/core/ansi_escapes.py
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.ansi_escapes
|
||||
======================
|
||||
|
||||
A random subset of ANSI terminal escape codes for colored messages
|
||||
'''
|
||||
|
||||
red = '\x1b[1;31;40m'
|
||||
cyan = '\x1b[1;36;40m'
|
||||
reset = '\x1b[0m'
|
||||
631
gnpy/core/convert.py
Executable file
631
gnpy/core/convert.py
Executable file
@@ -0,0 +1,631 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
gnpy.core.convert
|
||||
=================
|
||||
|
||||
This module contains utilities for converting between XLS and JSON.
|
||||
|
||||
The input XLS file must contain sheets named "Nodes" and "Links".
|
||||
It may optionally contain a sheet named "Eqpt".
|
||||
|
||||
In the "Nodes" sheet, only the "City" column is mandatory. The column "Type"
|
||||
can be determined automatically given the topology (e.g., if degree 2, ILA;
|
||||
otherwise, ROADM.) Incorrectly specified types (e.g., ILA for node of
|
||||
degree ≠ 2) will be automatically corrected.
|
||||
|
||||
In the "Links" sheet, only the first three columns ("Node A", "Node Z" and
|
||||
"east Distance (km)") are mandatory. Missing "west" information is copied from
|
||||
the "east" information so that it is possible to input undirected data.
|
||||
"""
|
||||
|
||||
from sys import exit
|
||||
try:
|
||||
from xlrd import open_workbook
|
||||
except ModuleNotFoundError:
|
||||
exit('Required: `pip install xlrd`')
|
||||
from argparse import ArgumentParser
|
||||
from collections import namedtuple, Counter, defaultdict
|
||||
from itertools import chain
|
||||
from json import dumps
|
||||
from pathlib import Path
|
||||
from difflib import get_close_matches
|
||||
from gnpy.core.utils import silent_remove
|
||||
from gnpy.core.exceptions import NetworkTopologyError
|
||||
import time
|
||||
|
||||
all_rows = lambda sh, start=0: (sh.row(x) for x in range(start, sh.nrows))
|
||||
|
||||
class Node(object):
|
||||
def __init__(self, **kwargs):
|
||||
super(Node, self).__init__()
|
||||
self.update_attr(kwargs)
|
||||
|
||||
def update_attr(self, kwargs):
|
||||
clean_kwargs = {k:v for k,v in kwargs.items() if v !=''}
|
||||
for k,v in self.default_values.items():
|
||||
v = clean_kwargs.get(k,v)
|
||||
setattr(self, k, v)
|
||||
|
||||
default_values = \
|
||||
{
|
||||
'city': '',
|
||||
'state': '',
|
||||
'country': '',
|
||||
'region': '',
|
||||
'latitude': 0,
|
||||
'longitude': 0,
|
||||
'node_type': 'ILA',
|
||||
'booster_restriction' : '',
|
||||
'preamp_restriction' : ''
|
||||
}
|
||||
|
||||
class Link(object):
|
||||
"""attribtes from west parse_ept_headers dict
|
||||
+node_a, node_z, west_fiber_con_in, east_fiber_con_in
|
||||
"""
|
||||
def __init__(self, **kwargs):
|
||||
super(Link, self).__init__()
|
||||
self.update_attr(kwargs)
|
||||
self.distance_units = 'km'
|
||||
|
||||
def update_attr(self, kwargs):
|
||||
clean_kwargs = {k:v for k,v in kwargs.items() if v !=''}
|
||||
for k,v in self.default_values.items():
|
||||
v = clean_kwargs.get(k,v)
|
||||
setattr(self, k, v)
|
||||
k = 'west' + k.split('east')[-1]
|
||||
v = clean_kwargs.get(k,v)
|
||||
setattr(self, k, v)
|
||||
|
||||
def __eq__(self, link):
|
||||
return (self.from_city == link.from_city and self.to_city == link.to_city) \
|
||||
or (self.from_city == link.to_city and self.to_city == link.from_city)
|
||||
|
||||
default_values = \
|
||||
{
|
||||
'from_city': '',
|
||||
'to_city': '',
|
||||
'east_distance': 80,
|
||||
'east_fiber': 'SSMF',
|
||||
'east_lineic': 0.2,
|
||||
'east_con_in': None,
|
||||
'east_con_out': None,
|
||||
'east_pmd': 0.1,
|
||||
'east_cable': ''
|
||||
}
|
||||
|
||||
|
||||
class Eqpt(object):
|
||||
def __init__(self, **kwargs):
|
||||
super(Eqpt, self).__init__()
|
||||
self.update_attr(kwargs)
|
||||
|
||||
def update_attr(self, kwargs):
|
||||
clean_kwargs = {k:v for k,v in kwargs.items() if v !=''}
|
||||
for k,v in self.default_values.items():
|
||||
v_east = clean_kwargs.get(k,v)
|
||||
setattr(self, k, v_east)
|
||||
k = 'west' + k.split('east')[-1]
|
||||
v_west = clean_kwargs.get(k,v)
|
||||
setattr(self, k, v_west)
|
||||
|
||||
default_values = \
|
||||
{
|
||||
'from_city': '',
|
||||
'to_city': '',
|
||||
'east_amp_type': '',
|
||||
'east_att_in': 0,
|
||||
'east_amp_gain': None,
|
||||
'east_amp_dp': None,
|
||||
'east_tilt': 0,
|
||||
'east_att_out': None
|
||||
}
|
||||
|
||||
|
||||
def read_header(my_sheet, line, slice_):
|
||||
""" return the list of headers !:= ''
|
||||
header_i = [(header, header_column_index), ...]
|
||||
in a {line, slice1_x, slice_y} range
|
||||
"""
|
||||
Param_header = namedtuple('Param_header', 'header colindex')
|
||||
try:
|
||||
header = [x.value.strip() for x in my_sheet.row_slice(line, slice_[0], slice_[1])]
|
||||
header_i = [Param_header(header,i+slice_[0]) for i, header in enumerate(header) if header != '']
|
||||
except Exception:
|
||||
header_i = []
|
||||
if header_i != [] and header_i[-1].colindex != slice_[1]:
|
||||
header_i.append(Param_header('',slice_[1]))
|
||||
return header_i
|
||||
|
||||
def read_slice(my_sheet, line, slice_, header):
|
||||
"""return the slice range of a given header
|
||||
in a defined range {line, slice_x, slice_y}"""
|
||||
header_i = read_header(my_sheet, line, slice_)
|
||||
slice_range = (-1,-1)
|
||||
if header_i != []:
|
||||
try:
|
||||
slice_range = next((h.colindex,header_i[i+1].colindex) \
|
||||
for i,h in enumerate(header_i) if header in h.header)
|
||||
except Exception:
|
||||
pass
|
||||
return slice_range
|
||||
|
||||
|
||||
def parse_headers(my_sheet, input_headers_dict, headers, start_line, slice_in):
|
||||
"""return a dict of header_slice
|
||||
key = column index
|
||||
value = header name"""
|
||||
|
||||
|
||||
for h0 in input_headers_dict:
|
||||
slice_out = read_slice(my_sheet, start_line, slice_in, h0)
|
||||
iteration = 1
|
||||
while slice_out == (-1,-1) and iteration < 10:
|
||||
#try next lines
|
||||
#print(h0, iteration)
|
||||
slice_out = read_slice(my_sheet, start_line+iteration, slice_in, h0)
|
||||
iteration += 1
|
||||
if slice_out == (-1, -1):
|
||||
if h0 in ('east', 'Node A', 'Node Z', 'City') :
|
||||
print(f'\x1b[1;31;40m'+f'CRITICAL: missing _{h0}_ header: EXECUTION ENDS'+ '\x1b[0m')
|
||||
exit()
|
||||
else:
|
||||
print(f'missing header {h0}')
|
||||
elif not isinstance(input_headers_dict[h0], dict):
|
||||
headers[slice_out[0]] = input_headers_dict[h0]
|
||||
else:
|
||||
headers = parse_headers(my_sheet, input_headers_dict[h0], headers, start_line+1, slice_out)
|
||||
if headers == {}:
|
||||
print(f'\x1b[1;31;40m'+f'CRITICAL ERROR: could not find any header to read _ ABORT'+ '\x1b[0m')
|
||||
exit()
|
||||
return headers
|
||||
|
||||
def parse_row(row, headers):
|
||||
#print([label for label in ept.values()])
|
||||
#print([i for i in ept.keys()])
|
||||
#print(row[i for i in ept.keys()])
|
||||
return {f: r.value for f, r in \
|
||||
zip([label for label in headers.values()], [row[i] for i in headers])}
|
||||
#if r.ctype != XL_CELL_EMPTY}
|
||||
|
||||
def parse_sheet(my_sheet, input_headers_dict, header_line, start_line, column):
|
||||
headers = parse_headers(my_sheet, input_headers_dict, {}, header_line, (0,column))
|
||||
for row in all_rows(my_sheet, start=start_line):
|
||||
yield parse_row(row[0: column], headers)
|
||||
|
||||
def sanity_check(nodes, links, nodes_by_city, links_by_city, eqpts_by_city):
|
||||
|
||||
duplicate_links = []
|
||||
for l1 in links:
|
||||
for l2 in links:
|
||||
if l1 is not l2 and l1 == l2 and l2 not in duplicate_links:
|
||||
print(f'\nWARNING\n \
|
||||
link {l1.from_city}-{l1.to_city} is duplicate \
|
||||
\nthe 1st duplicate link will be removed but you should check Links sheet input')
|
||||
duplicate_links.append(l1)
|
||||
#if duplicate_links != []:
|
||||
#time.sleep(3)
|
||||
for l in duplicate_links:
|
||||
links.remove(l)
|
||||
|
||||
try :
|
||||
test_nodes = [n for n in nodes_by_city if not n in links_by_city]
|
||||
test_links = [n for n in links_by_city if not n in nodes_by_city]
|
||||
test_eqpts = [n for n in eqpts_by_city if not n in nodes_by_city]
|
||||
assert (test_nodes == [] or test_nodes == [''])\
|
||||
and (test_links == [] or test_links ==[''])\
|
||||
and (test_eqpts == [] or test_eqpts ==[''])
|
||||
except AssertionError:
|
||||
print(f'CRITICAL error: \nNames in Nodes and Links sheets do no match, check:\
|
||||
\n{test_nodes} in Nodes sheet\
|
||||
\n{test_links} in Links sheet\
|
||||
\n{test_eqpts} in Eqpt sheet')
|
||||
exit(1)
|
||||
|
||||
for city,link in links_by_city.items():
|
||||
if nodes_by_city[city].node_type.lower()=='ila' and len(link) != 2:
|
||||
#wrong input: ILA sites can only be Degree 2
|
||||
# => correct to make it a ROADM and remove entry in links_by_city
|
||||
#TODO : put in log rather than print
|
||||
print(f'invalid node type ({nodes_by_city[city].node_type})\
|
||||
specified in {city}, replaced by ROADM')
|
||||
nodes_by_city[city].node_type = 'ROADM'
|
||||
for n in nodes:
|
||||
if n.city==city:
|
||||
n.node_type='ROADM'
|
||||
return nodes, links
|
||||
|
||||
def convert_file(input_filename, names_matching=False, filter_region=[]):
|
||||
nodes, links, eqpts = parse_excel(input_filename)
|
||||
if filter_region:
|
||||
nodes = [n for n in nodes if n.region.lower() in filter_region]
|
||||
cities = {n.city for n in nodes}
|
||||
links = [lnk for lnk in links if lnk.from_city in cities and
|
||||
lnk.to_city in cities]
|
||||
cities = {lnk.from_city for lnk in links} | {lnk.to_city for lnk in links}
|
||||
nodes = [n for n in nodes if n.city in cities]
|
||||
|
||||
global nodes_by_city
|
||||
nodes_by_city = {n.city: n for n in nodes}
|
||||
#create matching dictionary for node name mismatch analysis
|
||||
|
||||
cities = {''.join(c.strip() for c in n.city.split('C+L')).lower(): n.city for n in nodes}
|
||||
cities_to_match = [k for k in cities]
|
||||
city_match_dic = defaultdict(list)
|
||||
for city in cities:
|
||||
if city in cities_to_match:
|
||||
cities_to_match.remove(city)
|
||||
matches = get_close_matches(city, cities_to_match, 4, 0.85)
|
||||
for m in matches:
|
||||
city_match_dic[cities[city]].append(cities[m])
|
||||
#check lower case/upper case
|
||||
for city in nodes_by_city:
|
||||
for match_city in nodes_by_city:
|
||||
if match_city.lower() == city.lower() and match_city != city:
|
||||
city_match_dic[city].append(match_city)
|
||||
|
||||
if names_matching:
|
||||
print('\ncity match dictionary:',city_match_dic)
|
||||
with open('name_match_dictionary.json', 'w', encoding='utf-8') as city_match_dic_file:
|
||||
city_match_dic_file.write(dumps(city_match_dic, indent=2, ensure_ascii=False))
|
||||
|
||||
global links_by_city
|
||||
links_by_city = defaultdict(list)
|
||||
for link in links:
|
||||
links_by_city[link.from_city].append(link)
|
||||
links_by_city[link.to_city].append(link)
|
||||
|
||||
global eqpts_by_city
|
||||
eqpts_by_city = defaultdict(list)
|
||||
for eqpt in eqpts:
|
||||
eqpts_by_city[eqpt.from_city].append(eqpt)
|
||||
|
||||
nodes, links = sanity_check(nodes, links, nodes_by_city, links_by_city, eqpts_by_city)
|
||||
|
||||
data = {
|
||||
'elements':
|
||||
[{'uid': f'trx {x.city}',
|
||||
'metadata': {'location': {'city': x.city,
|
||||
'region': x.region,
|
||||
'latitude': x.latitude,
|
||||
'longitude': x.longitude}},
|
||||
'type': 'Transceiver'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower() == 'roadm'] +
|
||||
[{'uid': f'roadm {x.city}',
|
||||
'metadata': {'location': {'city': x.city,
|
||||
'region': x.region,
|
||||
'latitude': x.latitude,
|
||||
'longitude': x.longitude}},
|
||||
'type': 'Roadm'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower() == 'roadm' \
|
||||
and x.booster_restriction == '' and x.preamp_restriction == ''] +
|
||||
[{'uid': f'roadm {x.city}',
|
||||
'params' : {
|
||||
'restrictions': {
|
||||
'preamp_variety_list': silent_remove(x.preamp_restriction.split(' | '),''),
|
||||
'booster_variety_list': silent_remove(x.booster_restriction.split(' | '),'')
|
||||
}
|
||||
},
|
||||
'metadata': {'location': {'city': x.city,
|
||||
'region': x.region,
|
||||
'latitude': x.latitude,
|
||||
'longitude': x.longitude}},
|
||||
'type': 'Roadm'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower() == 'roadm' and \
|
||||
(x.booster_restriction != '' or x.preamp_restriction != '')] +
|
||||
[{'uid': f'west fused spans in {x.city}',
|
||||
'metadata': {'location': {'city': x.city,
|
||||
'region': x.region,
|
||||
'latitude': x.latitude,
|
||||
'longitude': x.longitude}},
|
||||
'type': 'Fused'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower() == 'fused'] +
|
||||
[{'uid': f'east fused spans in {x.city}',
|
||||
'metadata': {'location': {'city': x.city,
|
||||
'region': x.region,
|
||||
'latitude': x.latitude,
|
||||
'longitude': x.longitude}},
|
||||
'type': 'Fused'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower() == 'fused'] +
|
||||
[{'uid': f'fiber ({x.from_city} \u2192 {x.to_city})-{x.east_cable}',
|
||||
'metadata': {'location': midpoint(nodes_by_city[x.from_city],
|
||||
nodes_by_city[x.to_city])},
|
||||
'type': 'Fiber',
|
||||
'type_variety': x.east_fiber,
|
||||
'params': {'length': round(x.east_distance, 3),
|
||||
'length_units': x.distance_units,
|
||||
'loss_coef': x.east_lineic,
|
||||
'con_in':x.east_con_in,
|
||||
'con_out':x.east_con_out}
|
||||
}
|
||||
for x in links] +
|
||||
[{'uid': f'fiber ({x.to_city} \u2192 {x.from_city})-{x.west_cable}',
|
||||
'metadata': {'location': midpoint(nodes_by_city[x.from_city],
|
||||
nodes_by_city[x.to_city])},
|
||||
'type': 'Fiber',
|
||||
'type_variety': x.west_fiber,
|
||||
'params': {'length': round(x.west_distance, 3),
|
||||
'length_units': x.distance_units,
|
||||
'loss_coef': x.west_lineic,
|
||||
'con_in':x.west_con_in,
|
||||
'con_out':x.west_con_out}
|
||||
} # missing ILA construction
|
||||
for x in links] +
|
||||
[{'uid': f'east edfa in {e.from_city} to {e.to_city}',
|
||||
'metadata': {'location': {'city': nodes_by_city[e.from_city].city,
|
||||
'region': nodes_by_city[e.from_city].region,
|
||||
'latitude': nodes_by_city[e.from_city].latitude,
|
||||
'longitude': nodes_by_city[e.from_city].longitude}},
|
||||
'type': 'Edfa',
|
||||
'type_variety': e.east_amp_type,
|
||||
'operational': {'gain_target': e.east_amp_gain,
|
||||
'delta_p': e.east_amp_dp,
|
||||
'tilt_target': e.east_tilt,
|
||||
'out_voa' : e.east_att_out}
|
||||
}
|
||||
for e in eqpts if (e.east_amp_type.lower() != '' and \
|
||||
e.east_amp_type.lower() != 'fused')] +
|
||||
[{'uid': f'west edfa in {e.from_city} to {e.to_city}',
|
||||
'metadata': {'location': {'city': nodes_by_city[e.from_city].city,
|
||||
'region': nodes_by_city[e.from_city].region,
|
||||
'latitude': nodes_by_city[e.from_city].latitude,
|
||||
'longitude': nodes_by_city[e.from_city].longitude}},
|
||||
'type': 'Edfa',
|
||||
'type_variety': e.west_amp_type,
|
||||
'operational': {'gain_target': e.west_amp_gain,
|
||||
'delta_p': e.west_amp_dp,
|
||||
'tilt_target': e.west_tilt,
|
||||
'out_voa' : e.west_att_out}
|
||||
}
|
||||
for e in eqpts if (e.west_amp_type.lower() != '' and \
|
||||
e.west_amp_type.lower() != 'fused')] +
|
||||
# fused edfa variety is a hack to indicate that there should not be
|
||||
# booster amplifier out the roadm.
|
||||
# If user specifies ILA in Nodes sheet and fused in Eqpt sheet, then assumes that
|
||||
# this is a fused nodes.
|
||||
[{'uid': f'east edfa in {e.from_city} to {e.to_city}',
|
||||
'metadata': {'location': {'city': nodes_by_city[e.from_city].city,
|
||||
'region': nodes_by_city[e.from_city].region,
|
||||
'latitude': nodes_by_city[e.from_city].latitude,
|
||||
'longitude': nodes_by_city[e.from_city].longitude}},
|
||||
'type': 'Fused',
|
||||
'params': {'loss': 0}
|
||||
}
|
||||
for e in eqpts if e.east_amp_type.lower() == 'fused'] +
|
||||
[{'uid': f'west edfa in {e.from_city} to {e.to_city}',
|
||||
'metadata': {'location': {'city': nodes_by_city[e.from_city].city,
|
||||
'region': nodes_by_city[e.from_city].region,
|
||||
'latitude': nodes_by_city[e.from_city].latitude,
|
||||
'longitude': nodes_by_city[e.from_city].longitude}},
|
||||
'type': 'Fused',
|
||||
'params': {'loss': 0}
|
||||
}
|
||||
for e in eqpts if e.west_amp_type.lower() == 'fused'],
|
||||
'connections':
|
||||
list(chain.from_iterable([eqpt_connection_by_city(n.city)
|
||||
for n in nodes]))
|
||||
+
|
||||
list(chain.from_iterable(zip(
|
||||
[{'from_node': f'trx {x.city}',
|
||||
'to_node': f'roadm {x.city}'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower()=='roadm'],
|
||||
[{'from_node': f'roadm {x.city}',
|
||||
'to_node': f'trx {x.city}'}
|
||||
for x in nodes_by_city.values() if x.node_type.lower()=='roadm'])))
|
||||
}
|
||||
|
||||
suffix_filename = str(input_filename.suffixes[0])
|
||||
full_input_filename = str(input_filename)
|
||||
split_filename = [full_input_filename[0:len(full_input_filename)-len(suffix_filename)] , suffix_filename[1:]]
|
||||
output_json_file_name = split_filename[0]+'.json'
|
||||
with open(output_json_file_name, 'w', encoding='utf-8') as edfa_json_file:
|
||||
edfa_json_file.write(dumps(data, indent=2, ensure_ascii=False))
|
||||
return output_json_file_name
|
||||
|
||||
def parse_excel(input_filename):
|
||||
link_headers = \
|
||||
{ 'Node A': 'from_city',
|
||||
'Node Z': 'to_city',
|
||||
'east':{
|
||||
'Distance (km)': 'east_distance',
|
||||
'Fiber type': 'east_fiber',
|
||||
'lineic att': 'east_lineic',
|
||||
'Con_in': 'east_con_in',
|
||||
'Con_out': 'east_con_out',
|
||||
'PMD': 'east_pmd',
|
||||
'Cable id': 'east_cable'
|
||||
},
|
||||
'west':{
|
||||
'Distance (km)': 'west_distance',
|
||||
'Fiber type': 'west_fiber',
|
||||
'lineic att': 'west_lineic',
|
||||
'Con_in': 'west_con_in',
|
||||
'Con_out': 'west_con_out',
|
||||
'PMD': 'west_pmd',
|
||||
'Cable id': 'west_cable'
|
||||
}
|
||||
}
|
||||
node_headers = \
|
||||
{ 'City': 'city',
|
||||
'State': 'state',
|
||||
'Country': 'country',
|
||||
'Region': 'region',
|
||||
'Latitude': 'latitude',
|
||||
'Longitude': 'longitude',
|
||||
'Type': 'node_type',
|
||||
'Booster_restriction': 'booster_restriction',
|
||||
'Preamp_restriction': 'preamp_restriction'
|
||||
}
|
||||
eqpt_headers = \
|
||||
{ 'Node A': 'from_city',
|
||||
'Node Z': 'to_city',
|
||||
'east':{
|
||||
'amp type': 'east_amp_type',
|
||||
'att_in': 'east_att_in',
|
||||
'amp gain': 'east_amp_gain',
|
||||
'delta p': 'east_amp_dp',
|
||||
'tilt': 'east_tilt',
|
||||
'att_out': 'east_att_out'
|
||||
},
|
||||
'west':{
|
||||
'amp type': 'west_amp_type',
|
||||
'att_in': 'west_att_in',
|
||||
'amp gain': 'west_amp_gain',
|
||||
'delta p': 'west_amp_dp',
|
||||
'tilt': 'west_tilt',
|
||||
'att_out': 'west_att_out'
|
||||
}
|
||||
}
|
||||
|
||||
with open_workbook(input_filename) as wb:
|
||||
nodes_sheet = wb.sheet_by_name('Nodes')
|
||||
links_sheet = wb.sheet_by_name('Links')
|
||||
try:
|
||||
eqpt_sheet = wb.sheet_by_name('Eqpt')
|
||||
except Exception:
|
||||
#eqpt_sheet is optional
|
||||
eqpt_sheet = None
|
||||
|
||||
nodes = []
|
||||
for node in parse_sheet(nodes_sheet, node_headers, NODES_LINE, NODES_LINE+1, NODES_COLUMN):
|
||||
nodes.append(Node(**node))
|
||||
expected_node_types = {'ROADM', 'ILA', 'FUSED'}
|
||||
for n in nodes:
|
||||
if n.node_type not in expected_node_types:
|
||||
n.node_type = 'ILA'
|
||||
|
||||
links = []
|
||||
for link in parse_sheet(links_sheet, link_headers, LINKS_LINE, LINKS_LINE+2, LINKS_COLUMN):
|
||||
links.append(Link(**link))
|
||||
#print('\n', [l.__dict__ for l in links])
|
||||
|
||||
eqpts = []
|
||||
if eqpt_sheet != None:
|
||||
for eqpt in parse_sheet(eqpt_sheet, eqpt_headers, EQPTS_LINE, EQPTS_LINE+2, EQPTS_COLUMN):
|
||||
eqpts.append(Eqpt(**eqpt))
|
||||
|
||||
# sanity check
|
||||
all_cities = Counter(n.city for n in nodes)
|
||||
if len(all_cities) != len(nodes):
|
||||
raise ValueError(f'Duplicate city: {all_cities}')
|
||||
bad_links = []
|
||||
for lnk in links:
|
||||
if lnk.from_city not in all_cities or lnk.to_city not in all_cities:
|
||||
bad_links.append([lnk.from_city, lnk.to_city])
|
||||
if bad_links:
|
||||
raise NetworkTopologyError(f'Bad link(s): {bad_links}.')
|
||||
|
||||
return nodes, links, eqpts
|
||||
|
||||
|
||||
def eqpt_connection_by_city(city_name):
|
||||
other_cities = fiber_dest_from_source(city_name)
|
||||
subdata = []
|
||||
if nodes_by_city[city_name].node_type.lower() in {'ila', 'fused'}:
|
||||
# Then len(other_cities) == 2
|
||||
direction = ['west', 'east']
|
||||
for i in range(2):
|
||||
from_ = fiber_link(other_cities[i], city_name)
|
||||
in_ = eqpt_in_city_to_city(city_name, other_cities[0],direction[i])
|
||||
to_ = fiber_link(city_name, other_cities[1-i])
|
||||
subdata += connect_eqpt(from_, in_, to_)
|
||||
elif nodes_by_city[city_name].node_type.lower() == 'roadm':
|
||||
for other_city in other_cities:
|
||||
from_ = f'roadm {city_name}'
|
||||
in_ = eqpt_in_city_to_city(city_name, other_city)
|
||||
to_ = fiber_link(city_name, other_city)
|
||||
subdata += connect_eqpt(from_, in_, to_)
|
||||
|
||||
from_ = fiber_link(other_city, city_name)
|
||||
in_ = eqpt_in_city_to_city(city_name, other_city, "west")
|
||||
to_ = f'roadm {city_name}'
|
||||
subdata += connect_eqpt(from_, in_, to_)
|
||||
return subdata
|
||||
|
||||
|
||||
def connect_eqpt(from_, in_, to_):
|
||||
connections = []
|
||||
if in_ !='':
|
||||
connections = [{'from_node': from_, 'to_node': in_},
|
||||
{'from_node': in_, 'to_node': to_}]
|
||||
else:
|
||||
connections = [{'from_node': from_, 'to_node': to_}]
|
||||
return connections
|
||||
|
||||
|
||||
def eqpt_in_city_to_city(in_city, to_city, direction='east'):
|
||||
rev_direction = 'west' if direction == 'east' else 'east'
|
||||
amp_direction = f'{direction}_amp_type'
|
||||
amp_rev_direction = f'{rev_direction}_amp_type'
|
||||
return_eqpt = ''
|
||||
if in_city in eqpts_by_city:
|
||||
for e in eqpts_by_city[in_city]:
|
||||
if nodes_by_city[in_city].node_type.lower() == 'roadm':
|
||||
if e.to_city == to_city and getattr(e, amp_direction) != '':
|
||||
return_eqpt = f'{direction} edfa in {e.from_city} to {e.to_city}'
|
||||
elif nodes_by_city[in_city].node_type.lower() == 'ila':
|
||||
if e.to_city != to_city:
|
||||
direction = rev_direction
|
||||
amp_direction = amp_rev_direction
|
||||
if getattr(e, amp_direction) != '':
|
||||
return_eqpt = f'{direction} edfa in {e.from_city} to {e.to_city}'
|
||||
if nodes_by_city[in_city].node_type.lower() == 'fused':
|
||||
return_eqpt = f'{direction} fused spans in {in_city}'
|
||||
return return_eqpt
|
||||
|
||||
|
||||
def fiber_dest_from_source(city_name):
|
||||
destinations = []
|
||||
links_from_city = links_by_city[city_name]
|
||||
for l in links_from_city:
|
||||
if l.from_city == city_name:
|
||||
destinations.append(l.to_city)
|
||||
else:
|
||||
destinations.append(l.from_city)
|
||||
return destinations
|
||||
|
||||
|
||||
def fiber_link(from_city, to_city):
|
||||
source_dest = (from_city, to_city)
|
||||
link = links_by_city[from_city]
|
||||
l = next(l for l in link if l.from_city in source_dest and l.to_city in source_dest)
|
||||
if l.from_city == from_city:
|
||||
fiber = f'fiber ({l.from_city} \u2192 {l.to_city})-{l.east_cable}'
|
||||
else:
|
||||
fiber = f'fiber ({l.to_city} \u2192 {l.from_city})-{l.west_cable}'
|
||||
return fiber
|
||||
|
||||
|
||||
def midpoint(city_a, city_b):
|
||||
lats = city_a.latitude, city_b.latitude
|
||||
longs = city_a.longitude, city_b.longitude
|
||||
try:
|
||||
result = {
|
||||
'latitude': sum(lats) / 2,
|
||||
'longitude': sum(longs) / 2
|
||||
}
|
||||
except :
|
||||
result = {
|
||||
'latitude': 0,
|
||||
'longitude': 0
|
||||
}
|
||||
return result
|
||||
|
||||
#output_json_file_name = 'coronet_conus_example.json'
|
||||
#TODO get column size automatically from tupple size
|
||||
|
||||
NODES_COLUMN = 10
|
||||
NODES_LINE = 4
|
||||
LINKS_COLUMN = 16
|
||||
LINKS_LINE = 3
|
||||
EQPTS_LINE = 3
|
||||
EQPTS_COLUMN = 14
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('workbook', nargs='?', type=Path , default='meshTopologyExampleV2.xls')
|
||||
parser.add_argument('-f', '--filter-region', action='append', default=[])
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
convert_file(args.workbook, args.filter_region)
|
||||
909
gnpy/core/elements.py
Normal file
909
gnpy/core/elements.py
Normal file
@@ -0,0 +1,909 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.elements
|
||||
==================
|
||||
|
||||
This module contains standard network elements.
|
||||
|
||||
A network element is a Python callable. It takes a :class:`.info.SpectralInformation`
|
||||
object and returns a copy with appropriate fields affected. This structure
|
||||
represents spectral information that is "propogated" by this network element.
|
||||
Network elements must have only a local "view" of the network and propogate
|
||||
:class:`.info.SpectralInformation` using only this information. They should be independent and
|
||||
self-contained.
|
||||
|
||||
Network elements MUST implement two attributes .uid and .name representing a
|
||||
unique identifier and a printable name.
|
||||
'''
|
||||
|
||||
from numpy import abs, arange, array, exp, divide, errstate
|
||||
from numpy import interp, log10, mean, pi, polyfit, polyval, sum
|
||||
from scipy.constants import c, h
|
||||
from collections import namedtuple
|
||||
|
||||
from gnpy.core.node import Node
|
||||
from gnpy.core.units import UNITS
|
||||
from gnpy.core.utils import lin2db, db2lin, arrange_frequencies, snr_sum
|
||||
from gnpy.core.science_utils import propagate_raman_fiber, _psi
|
||||
|
||||
class Transceiver(Node):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.osnr_ase_01nm = None
|
||||
self.osnr_ase = None
|
||||
self.osnr_nli = None
|
||||
self.snr = None
|
||||
self.passive = False
|
||||
self.baud_rate = None
|
||||
|
||||
def _calc_snr(self, spectral_info):
|
||||
with errstate(divide='ignore'):
|
||||
self.baud_rate = [c.baud_rate for c in spectral_info.carriers]
|
||||
ratio_01nm = [lin2db(12.5e9/b_rate) for b_rate in self.baud_rate]
|
||||
#set raw values to record original calculation, before update_snr()
|
||||
self.raw_osnr_ase = [lin2db(divide(c.power.signal, c.power.ase))
|
||||
for c in spectral_info.carriers]
|
||||
self.raw_osnr_ase_01nm = [ase - ratio for ase, ratio
|
||||
in zip(self.raw_osnr_ase, ratio_01nm)]
|
||||
self.raw_osnr_nli = [lin2db(divide(c.power.signal, c.power.nli))
|
||||
for c in spectral_info.carriers]
|
||||
self.raw_snr = [lin2db(divide(c.power.signal, c.power.nli+c.power.ase))
|
||||
for c in spectral_info.carriers]
|
||||
self.raw_snr_01nm = [snr - ratio for snr, ratio
|
||||
in zip(self.raw_snr, ratio_01nm)]
|
||||
|
||||
self.osnr_ase = self.raw_osnr_ase
|
||||
self.osnr_ase_01nm = self.raw_osnr_ase_01nm
|
||||
self.osnr_nli = self.raw_osnr_nli
|
||||
self.snr = self.raw_snr
|
||||
self.snr_01nm = self.raw_snr_01nm
|
||||
|
||||
def update_snr(self, *args):
|
||||
"""
|
||||
snr_added in 0.1nm
|
||||
compute SNR penalties such as transponder Tx_osnr or Roadm add_drop_osnr
|
||||
only applied in request.py / propagate on the last Trasceiver node of the path
|
||||
all penalties are added in a single call because to avoid uncontrolled cumul
|
||||
"""
|
||||
#use raw_values so that the added snr penalties are not cumulated
|
||||
snr_added = 0
|
||||
for s in args:
|
||||
snr_added += db2lin(-s)
|
||||
snr_added = -lin2db(snr_added)
|
||||
self.osnr_ase = list(map(lambda x,y:snr_sum(x,y,snr_added),
|
||||
self.raw_osnr_ase, self.baud_rate))
|
||||
self.snr = list(map(lambda x,y:snr_sum(x,y,snr_added),
|
||||
self.raw_snr, self.baud_rate))
|
||||
self.osnr_ase_01nm = list(map(lambda x:snr_sum(x,12.5e9,snr_added),
|
||||
self.raw_osnr_ase_01nm))
|
||||
self.snr_01nm = list(map(lambda x:snr_sum(x,12.5e9,snr_added),
|
||||
self.raw_snr_01nm))
|
||||
|
||||
@property
|
||||
def to_json(self):
|
||||
return {'uid' : self.uid,
|
||||
'type' : type(self).__name__,
|
||||
'metadata' : {
|
||||
'location': self.metadata['location']._asdict()
|
||||
}
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return (f'{type(self).__name__}('
|
||||
f'uid={self.uid!r}, '
|
||||
f'osnr_ase_01nm={self.osnr_ase_01nm!r}, '
|
||||
f'osnr_ase={self.osnr_ase!r}, '
|
||||
f'osnr_nli={self.osnr_nli!r}, '
|
||||
f'snr={self.snr!r})')
|
||||
|
||||
def __str__(self):
|
||||
if self.snr is None or self.osnr_ase is None:
|
||||
return f'{type(self).__name__} {self.uid}'
|
||||
|
||||
snr = round(mean(self.snr),2)
|
||||
osnr_ase = round(mean(self.osnr_ase),2)
|
||||
osnr_ase_01nm = round(mean(self.osnr_ase_01nm), 2)
|
||||
snr_01nm = round(mean(self.snr_01nm),2)
|
||||
|
||||
return '\n'.join([f'{type(self).__name__} {self.uid}',
|
||||
|
||||
f' OSNR ASE (0.1nm, dB): {osnr_ase_01nm:.2f}',
|
||||
f' OSNR ASE (signal bw, dB): {osnr_ase:.2f}',
|
||||
f' SNR total (signal bw, dB): {snr:.2f}',
|
||||
f' SNR total (0.1nm, dB): {snr_01nm:.2f}'])
|
||||
|
||||
def __call__(self, spectral_info):
|
||||
self._calc_snr(spectral_info)
|
||||
return spectral_info
|
||||
|
||||
RoadmParams = namedtuple('RoadmParams', 'target_pch_out_db add_drop_osnr restrictions per_degree_target_pch_out_db')
|
||||
|
||||
class Roadm(Node):
|
||||
def __init__(self, *args, params, **kwargs):
|
||||
if 'per_degree_target_pch_out_db' not in params.keys():
|
||||
params['per_degree_target_pch_out_db'] = []
|
||||
super().__init__(*args, params=RoadmParams(**params), **kwargs)
|
||||
self.loss = 0 #auto-design interest
|
||||
self.effective_loss = None
|
||||
self.effective_pch_out_db = self.params.target_pch_out_db
|
||||
self.passive = True
|
||||
self.restrictions = self.params.restrictions
|
||||
self.per_degree_target_pch_out_db = self.params.per_degree_target_pch_out_db
|
||||
|
||||
@property
|
||||
def to_json(self):
|
||||
return {'uid' : self.uid,
|
||||
'type' : type(self).__name__,
|
||||
'params' : {
|
||||
'target_pch_out_db' : self.effective_pch_out_db,
|
||||
'restrictions' : self.restrictions,
|
||||
'per_degree_target_pch_out_db': self.per_degree_target_pch_out_db
|
||||
},
|
||||
'metadata' : {
|
||||
'location': self.metadata['location']._asdict()
|
||||
}
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self).__name__}(uid={self.uid!r}, loss={self.loss!r})'
|
||||
|
||||
def __str__(self):
|
||||
return '\n'.join([f'{type(self).__name__} {self.uid}',
|
||||
f' effective loss (dB): {self.effective_loss:.2f}',
|
||||
f' pch out (dBm): {self.effective_pch_out_db!r}'])
|
||||
|
||||
def propagate(self, pref, *carriers, degree):
|
||||
#pin_target and loss are read from eqpt_config.json['Roadm']
|
||||
#all ingress channels in xpress are set to this power level
|
||||
#but add channels are not, so we define an effective loss
|
||||
#in the case of add channels
|
||||
if self.per_degree_target_pch_out_db:
|
||||
# find the target power on this degree
|
||||
try:
|
||||
temp = next(el['target_pch_out_db'] \
|
||||
for el in self.per_degree_target_pch_out_db if el['to_node']==degree)
|
||||
except StopIteration:
|
||||
# if no target power is defined on this degree use the global one
|
||||
temp = self.params.target_pch_out_db
|
||||
else:
|
||||
# if no per degree target power are defined, use the global one
|
||||
temp = self.params.target_pch_out_db
|
||||
self.effective_pch_out_db = min(pref.p_spani, temp)
|
||||
self.effective_loss = pref.p_spani - self.effective_pch_out_db
|
||||
carriers_power = array([c.power.signal +c.power.nli+c.power.ase for c in carriers])
|
||||
carriers_att = list(map(lambda x : lin2db(x*1e3)-self.effective_pch_out_db, carriers_power))
|
||||
exceeding_att = -min(list(filter(lambda x: x < 0, carriers_att)), default = 0)
|
||||
carriers_att = list(map(lambda x: db2lin(x+exceeding_att), carriers_att))
|
||||
for carrier_att, carrier in zip(carriers_att, carriers) :
|
||||
pwr = carrier.power
|
||||
pwr = pwr._replace( signal = pwr.signal/carrier_att,
|
||||
nli = pwr.nli/carrier_att,
|
||||
ase = pwr.ase/carrier_att)
|
||||
yield carrier._replace(power=pwr)
|
||||
|
||||
def update_pref(self, pref):
|
||||
return pref._replace(p_span0=pref.p_span0, p_spani=self.effective_pch_out_db)
|
||||
|
||||
def __call__(self, spectral_info, degree):
|
||||
carriers = tuple(self.propagate(spectral_info.pref, *spectral_info.carriers, degree=degree))
|
||||
pref = self.update_pref(spectral_info.pref)
|
||||
return spectral_info._replace(carriers=carriers, pref=pref)
|
||||
|
||||
FusedParams = namedtuple('FusedParams', 'loss')
|
||||
|
||||
class Fused(Node):
|
||||
def __init__(self, *args, params=None, **kwargs):
|
||||
if params is None:
|
||||
# default loss value if not mentioned in loaded network json
|
||||
params = {'loss':1}
|
||||
super().__init__(*args, params=FusedParams(**params), **kwargs)
|
||||
self.loss = self.params.loss
|
||||
self.passive = True
|
||||
|
||||
@property
|
||||
def to_json(self):
|
||||
return {'uid' : self.uid,
|
||||
'type' : type(self).__name__,
|
||||
'params' :{
|
||||
'loss': self.loss
|
||||
},
|
||||
'metadata' : {
|
||||
'location': self.metadata['location']._asdict()
|
||||
}
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self).__name__}(uid={self.uid!r}, loss={self.loss!r})'
|
||||
|
||||
def __str__(self):
|
||||
return '\n'.join([f'{type(self).__name__} {self.uid}',
|
||||
f' loss (dB): {self.loss:.2f}'])
|
||||
|
||||
def propagate(self, *carriers):
|
||||
attenuation = db2lin(self.loss)
|
||||
|
||||
for carrier in carriers:
|
||||
pwr = carrier.power
|
||||
pwr = pwr._replace(signal=pwr.signal/attenuation,
|
||||
nli=pwr.nli/attenuation,
|
||||
ase=pwr.ase/attenuation)
|
||||
yield carrier._replace(power=pwr)
|
||||
|
||||
def update_pref(self, pref):
|
||||
return pref._replace(p_span0=pref.p_span0, p_spani=pref.p_spani - self.loss)
|
||||
|
||||
def __call__(self, spectral_info):
|
||||
carriers = tuple(self.propagate(*spectral_info.carriers))
|
||||
pref = self.update_pref(spectral_info.pref)
|
||||
return spectral_info._replace(carriers=carriers, pref=pref)
|
||||
|
||||
FiberParams = namedtuple('FiberParams', 'type_variety length loss_coef length_units \
|
||||
att_in con_in con_out dispersion gamma')
|
||||
|
||||
class Fiber(Node):
|
||||
def __init__(self, *args, params=None, **kwargs):
|
||||
if params is None:
|
||||
params = {}
|
||||
if 'con_in' not in params:
|
||||
# if not defined in the network json connector loss in/out
|
||||
# the None value will be updated in network.py[build_network]
|
||||
# with default values from eqpt_config.json[Spans]
|
||||
params['con_in'] = None
|
||||
params['con_out'] = None
|
||||
if 'att_in' not in params:
|
||||
#fixed attenuator for padding
|
||||
params['att_in'] = 0
|
||||
|
||||
super().__init__(*args, params=FiberParams(**params), **kwargs)
|
||||
self.type_variety = self.params.type_variety
|
||||
self.length = self.params.length * UNITS[self.params.length_units] # in m
|
||||
self.loss_coef = self.params.loss_coef * 1e-3 # lineic loss dB/m
|
||||
self.lin_loss_coef = self.params.loss_coef / (20 * log10(exp(1)))
|
||||
self.att_in = self.params.att_in
|
||||
self.con_in = self.params.con_in
|
||||
self.con_out = self.params.con_out
|
||||
self.dispersion = self.params.dispersion # s/m/m
|
||||
self.gamma = self.params.gamma # 1/W/m
|
||||
self.pch_out_db = None
|
||||
self.carriers_in = None
|
||||
self.carriers_out = None
|
||||
# TODO|jla: discuss factor 2 in the linear lineic attenuation
|
||||
|
||||
@property
|
||||
def to_json(self):
|
||||
return {'uid' : self.uid,
|
||||
'type' : type(self).__name__,
|
||||
'type_variety' : self.type_variety,
|
||||
'params' : {
|
||||
#have to specify each because namedtupple cannot be updated :(
|
||||
'type_variety' : self.type_variety,
|
||||
'length' : self.length/UNITS[self.params.length_units],
|
||||
'loss_coef' : self.loss_coef*1e3,
|
||||
'length_units' : self.params.length_units,
|
||||
'att_in' : self.att_in,
|
||||
'con_in' : self.con_in,
|
||||
'con_out' : self.con_out
|
||||
},
|
||||
'metadata' : {
|
||||
'location': self.metadata['location']._asdict()
|
||||
}
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self).__name__}(uid={self.uid!r}, length={round(self.length*1e-3,1)!r}km, loss={round(self.loss,1)!r}dB)'
|
||||
|
||||
def __str__(self):
|
||||
return '\n'.join([f'{type(self).__name__} {self.uid}',
|
||||
f' type_variety: {self.type_variety}',
|
||||
f' length (km): {round(self.length*1e-3):.2f}',
|
||||
f' pad att_in (dB): {self.att_in:.2f}',
|
||||
f' total loss (dB): {self.loss:.2f}',
|
||||
f' (includes conn loss (dB) in: {self.con_in:.2f} out: {self.con_out:.2f})',
|
||||
f' (conn loss out includes EOL margin defined in eqpt_config.json)',
|
||||
f' pch out (dBm): {self.pch_out_db!r}'])
|
||||
|
||||
@property
|
||||
def fiber_loss(self):
|
||||
"""Fiber loss in dB, not including padding attenuator"""
|
||||
return self.loss_coef * self.length + self.con_in + self.con_out
|
||||
|
||||
@property
|
||||
def loss(self):
|
||||
"""total loss including padding att_in: useful for polymorphism with roadm loss"""
|
||||
return self.loss_coef * self.length + self.con_in + self.con_out + self.att_in
|
||||
|
||||
@property
|
||||
def passive(self):
|
||||
return True
|
||||
|
||||
@property
|
||||
def lin_attenuation(self):
|
||||
return db2lin(self.length * self.loss_coef)
|
||||
|
||||
@property
|
||||
def effective_length(self):
|
||||
_, alpha = self.dbkm_2_lin()
|
||||
leff = (1 - exp(-2 * alpha * self.length)) / (2 * alpha)
|
||||
return leff
|
||||
|
||||
@property
|
||||
def asymptotic_length(self):
|
||||
_, alpha = self.dbkm_2_lin()
|
||||
aleff = 1 / (2 * alpha)
|
||||
return aleff
|
||||
|
||||
def carriers(self, loc, attr):
|
||||
"""retrieve carriers information
|
||||
|
||||
:param loc: (in, out) of the class element
|
||||
:param attr: (ase, nli, signal, total) power information
|
||||
"""
|
||||
if not (loc in ('in', 'out') and attr in ('nli', 'signal', 'total', 'ase')):
|
||||
yield None
|
||||
return
|
||||
loc_attr = 'carriers_'+loc
|
||||
for c in getattr(self, loc_attr) :
|
||||
if attr == 'total':
|
||||
yield c.power.ase+c.power.nli+c.power.signal
|
||||
else:
|
||||
yield c.power._asdict().get(attr, None)
|
||||
|
||||
def beta2(self, ref_wavelength=1550e-9):
|
||||
"""Returns beta2 from dispersion parameter.
|
||||
Dispersion is entered in ps/nm/km.
|
||||
Disperion can be a numpy array or a single value.
|
||||
|
||||
:param ref_wavelength: can be a numpy array; default: 1550nm
|
||||
"""
|
||||
# TODO|jla: discuss beta2 as method or attribute
|
||||
D = abs(self.dispersion)
|
||||
b2 = (ref_wavelength ** 2) * D / (2 * pi * c) # 10^21 scales [ps^2/km]
|
||||
return b2 # s/Hz/m
|
||||
|
||||
def dbkm_2_lin(self):
|
||||
"""calculates the linear loss coefficient"""
|
||||
# linear loss coefficient in dB/km^-1
|
||||
alpha_pcoef = self.loss_coef
|
||||
# linear loss field amplitude coefficient in m^-1
|
||||
alpha_acoef = alpha_pcoef / (2 * 10 * log10(exp(1)))
|
||||
return alpha_pcoef, alpha_acoef
|
||||
|
||||
def _gn_analytic(self, carrier, *carriers):
|
||||
"""Computes the nonlinear interference power on a single carrier.
|
||||
The method uses eq. 120 from `arXiv:1209.0394 <https://arxiv.org/abs/1209.0394>`__.
|
||||
|
||||
:param carrier: the signal under analysis
|
||||
:param carriers: the full WDM comb
|
||||
:return: carrier_nli: the amount of nonlinear interference in W on the under analysis
|
||||
"""
|
||||
|
||||
g_nli = 0
|
||||
for interfering_carrier in carriers:
|
||||
psi = _psi(carrier, interfering_carrier, beta2=self.beta2(), asymptotic_length=self.asymptotic_length)
|
||||
g_nli += (interfering_carrier.power.signal/interfering_carrier.baud_rate)**2 \
|
||||
* (carrier.power.signal/carrier.baud_rate) * psi
|
||||
|
||||
g_nli *= (16 / 27) * (self.gamma * self.effective_length)**2 \
|
||||
/ (2 * pi * abs(self.beta2()) * self.asymptotic_length)
|
||||
|
||||
carrier_nli = carrier.baud_rate * g_nli
|
||||
return carrier_nli
|
||||
|
||||
def propagate(self, *carriers):
|
||||
|
||||
# apply connector_att_in on all carriers before computing gn analytics premiere partie pas bonne
|
||||
attenuation = db2lin(self.con_in + self.att_in)
|
||||
|
||||
chan = []
|
||||
for carrier in carriers:
|
||||
pwr = carrier.power
|
||||
pwr = pwr._replace(signal=pwr.signal/attenuation,
|
||||
nli=pwr.nli/attenuation,
|
||||
ase=pwr.ase/attenuation)
|
||||
carrier = carrier._replace(power=pwr)
|
||||
chan.append(carrier)
|
||||
|
||||
carriers = tuple(f for f in chan)
|
||||
|
||||
# propagate in the fiber and apply attenuation out
|
||||
attenuation = db2lin(self.con_out)
|
||||
for carrier in carriers:
|
||||
pwr = carrier.power
|
||||
carrier_nli = self._gn_analytic(carrier, *carriers)
|
||||
pwr = pwr._replace(signal=pwr.signal/self.lin_attenuation/attenuation,
|
||||
nli=(pwr.nli+carrier_nli)/self.lin_attenuation/attenuation,
|
||||
ase=pwr.ase/self.lin_attenuation/attenuation)
|
||||
yield carrier._replace(power=pwr)
|
||||
|
||||
def update_pref(self, pref):
|
||||
self.pch_out_db = round(pref.p_spani - self.loss, 2)
|
||||
return pref._replace(p_span0=pref.p_span0, p_spani=self.pch_out_db)
|
||||
|
||||
def __call__(self, spectral_info):
|
||||
self.carriers_in = spectral_info.carriers
|
||||
carriers = tuple(self.propagate(*spectral_info.carriers))
|
||||
pref = self.update_pref(spectral_info.pref)
|
||||
self.carriers_out = carriers
|
||||
return spectral_info._replace(carriers=carriers, pref=pref)
|
||||
|
||||
RamanFiberParams = namedtuple('RamanFiberParams', 'type_variety length loss_coef length_units \
|
||||
att_in con_in con_out dispersion gamma raman_efficiency')
|
||||
|
||||
class RamanFiber(Fiber):
|
||||
def __init__(self, *args, params=None, **kwargs):
|
||||
if params is None:
|
||||
params = {}
|
||||
if 'con_in' not in params:
|
||||
# if not defined in the network json connector loss in/out
|
||||
# the None value will be updated in network.py[build_network]
|
||||
# with default values from eqpt_config.json[Spans]
|
||||
params['con_in'] = None
|
||||
params['con_out'] = None
|
||||
if 'att_in' not in params:
|
||||
#fixed attenuator for padding
|
||||
params['att_in'] = 0
|
||||
|
||||
# TODO: can we re-use the Fiber constructor in a better way?
|
||||
Node.__init__(self, *args, params=RamanFiberParams(**params), **kwargs)
|
||||
self.type_variety = self.params.type_variety
|
||||
self.length = self.params.length * UNITS[self.params.length_units] # in m
|
||||
self.loss_coef = self.params.loss_coef * 1e-3 # lineic loss dB/m
|
||||
self.lin_loss_coef = self.params.loss_coef / (20 * log10(exp(1)))
|
||||
self.att_in = self.params.att_in
|
||||
self.con_in = self.params.con_in
|
||||
self.con_out = self.params.con_out
|
||||
self.dispersion = self.params.dispersion # s/m/m
|
||||
self.gamma = self.params.gamma # 1/W/m
|
||||
self.pch_out_db = None
|
||||
self.carriers_in = None
|
||||
self.carriers_out = None
|
||||
# TODO|jla: discuss factor 2 in the linear lineic attenuation
|
||||
|
||||
@property
|
||||
def sim_params(self):
|
||||
return self._sim_params
|
||||
|
||||
@sim_params.setter
|
||||
def sim_params(self, sim_params=None):
|
||||
self._sim_params = sim_params
|
||||
|
||||
def update_pref(self, pref, *carriers):
|
||||
pch_out_db = lin2db(mean([carrier.power.signal for carrier in carriers])) + 30
|
||||
self.pch_out_db = round(pch_out_db, 2)
|
||||
return pref._replace(p_span0=pref.p_span0, p_spani=self.pch_out_db)
|
||||
|
||||
def __call__(self, spectral_info):
|
||||
self.carriers_in = spectral_info.carriers
|
||||
carriers = tuple(self.propagate(*spectral_info.carriers))
|
||||
pref = self.update_pref(spectral_info.pref, *carriers)
|
||||
self.carriers_out = carriers
|
||||
return spectral_info._replace(carriers=carriers, pref=pref)
|
||||
|
||||
def propagate(self, *carriers):
|
||||
for propagated_carrier in propagate_raman_fiber(self, *carriers):
|
||||
yield propagated_carrier
|
||||
|
||||
class EdfaParams:
|
||||
def __init__(self, **params):
|
||||
self.update_params(params)
|
||||
if params == {}:
|
||||
self.type_variety = ''
|
||||
self.type_def = ''
|
||||
# self.gain_flatmax = 0
|
||||
# self.gain_min = 0
|
||||
# self.p_max = 0
|
||||
# self.nf_model = None
|
||||
# self.nf_fit_coeff = None
|
||||
# self.nf_ripple = None
|
||||
# self.dgt = None
|
||||
# self.gain_ripple = None
|
||||
# self.out_voa_auto = False
|
||||
# self.allowed_for_design = None
|
||||
|
||||
def update_params(self, kwargs):
|
||||
for k,v in kwargs.items() :
|
||||
setattr(self, k, update_params(**v)
|
||||
if isinstance(v, dict) else v)
|
||||
|
||||
class EdfaOperational:
|
||||
default_values = \
|
||||
{
|
||||
'gain_target': None,
|
||||
'delta_p': None,
|
||||
'out_voa': None,
|
||||
'tilt_target': 0
|
||||
}
|
||||
|
||||
def __init__(self, **operational):
|
||||
self.update_attr(operational)
|
||||
|
||||
def update_attr(self, kwargs):
|
||||
clean_kwargs = {k:v for k,v in kwargs.items() if v !=''}
|
||||
for k,v in self.default_values.items():
|
||||
setattr(self, k, clean_kwargs.get(k,v))
|
||||
|
||||
def __repr__(self):
|
||||
return (f'{type(self).__name__}('
|
||||
f'gain_target={self.gain_target!r}, '
|
||||
f'tilt_target={self.tilt_target!r})')
|
||||
|
||||
class Edfa(Node):
|
||||
def __init__(self, *args, params=None, operational=None, **kwargs):
|
||||
if params is None:
|
||||
params = {}
|
||||
if operational is None:
|
||||
operational = {}
|
||||
super().__init__(
|
||||
*args,
|
||||
params=EdfaParams(**params),
|
||||
operational=EdfaOperational(**operational),
|
||||
**kwargs
|
||||
)
|
||||
self.interpol_dgt = None # interpolated dynamic gain tilt
|
||||
self.interpol_gain_ripple = None # gain ripple
|
||||
self.interpol_nf_ripple = None # nf_ripple
|
||||
self.channel_freq = None # SI channel frequencies
|
||||
# nf, gprofile, pin and pout attributes are set by interpol_params
|
||||
self.nf = None # dB edfa nf at operational.gain_target
|
||||
self.gprofile = None
|
||||
self.pin_db = None
|
||||
self.nch = None
|
||||
self.pout_db = None
|
||||
self.target_pch_out_db = None
|
||||
self.effective_pch_out_db = None
|
||||
self.passive = False
|
||||
self.att_in = None
|
||||
self.carriers_in = None
|
||||
self.carriers_out = None
|
||||
self.effective_gain = self.operational.gain_target
|
||||
self.delta_p = self.operational.delta_p #delta P with Pref (power swwep) in power mode
|
||||
self.tilt_target = self.operational.tilt_target
|
||||
self.out_voa = self.operational.out_voa
|
||||
|
||||
@property
|
||||
def to_json(self):
|
||||
return {'uid' : self.uid,
|
||||
'type' : type(self).__name__,
|
||||
'type_variety' : self.params.type_variety,
|
||||
'operational' : {
|
||||
'gain_target' : self.effective_gain,
|
||||
'delta_p' : self.delta_p,
|
||||
'tilt_target' : self.tilt_target,
|
||||
'out_voa' : self.out_voa
|
||||
},
|
||||
'metadata' : {
|
||||
'location': self.metadata['location']._asdict()
|
||||
}
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return (f'{type(self).__name__}(uid={self.uid!r}, '
|
||||
f'type_variety={self.params.type_variety!r}, '
|
||||
f'interpol_dgt={self.interpol_dgt!r}, '
|
||||
f'interpol_gain_ripple={self.interpol_gain_ripple!r}, '
|
||||
f'interpol_nf_ripple={self.interpol_nf_ripple!r}, '
|
||||
f'channel_freq={self.channel_freq!r}, '
|
||||
f'nf={self.nf!r}, '
|
||||
f'gprofile={self.gprofile!r}, '
|
||||
f'pin_db={self.pin_db!r}, '
|
||||
f'pout_db={self.pout_db!r})')
|
||||
|
||||
def __str__(self):
|
||||
if self.pin_db is None or self.pout_db is None:
|
||||
return f'{type(self).__name__} {self.uid}'
|
||||
nf = mean(self.nf)
|
||||
return '\n'.join([f'{type(self).__name__} {self.uid}',
|
||||
f' type_variety: {self.params.type_variety}',
|
||||
f' effective gain(dB): {self.effective_gain:.2f}',
|
||||
f' (before att_in and before output VOA)',
|
||||
f' noise figure (dB): {nf:.2f}',
|
||||
f' (including att_in)',
|
||||
f' pad att_in (dB): {self.att_in:.2f}',
|
||||
f' Power In (dBm): {self.pin_db:.2f}',
|
||||
f' Power Out (dBm): {self.pout_db:.2f}',
|
||||
f' Delta_P (dB): {self.delta_p!r}',
|
||||
f' target pch (dBm): {self.target_pch_out_db!r}',
|
||||
f' effective pch (dBm): {self.effective_pch_out_db!r}',
|
||||
f' output VOA (dB): {self.out_voa:.2f}'])
|
||||
|
||||
def carriers(self, loc, attr):
|
||||
"""retrieve carriers information
|
||||
|
||||
:param loc: (in, out) of the class element
|
||||
:param attr: (ase, nli, signal, total) power information
|
||||
"""
|
||||
if not (loc in ('in', 'out') and attr in ('nli', 'signal', 'total', 'ase')):
|
||||
yield None
|
||||
return
|
||||
loc_attr = 'carriers_'+loc
|
||||
for c in getattr(self, loc_attr) :
|
||||
if attr == 'total':
|
||||
yield c.power.ase+c.power.nli+c.power.signal
|
||||
else:
|
||||
yield c.power._asdict().get(attr, None)
|
||||
|
||||
def interpol_params(self, frequencies, pin, baud_rates, pref):
|
||||
"""interpolate SI channel frequencies with the edfa dgt and gain_ripple frquencies from JSON
|
||||
set the edfa class __init__ None parameters :
|
||||
self.channel_freq, self.nf, self.interpol_dgt and self.interpol_gain_ripple
|
||||
"""
|
||||
# TODO|jla: read amplifier actual frequencies from additional params in json
|
||||
amplifier_freq = arrange_frequencies(len(self.params.dgt), self.params.f_min, self.params.f_max) # Hz
|
||||
self.channel_freq = frequencies
|
||||
self.interpol_dgt = interp(self.channel_freq, amplifier_freq, self.params.dgt)
|
||||
|
||||
self.interpol_gain_ripple = interp(self.channel_freq, amplifier_freq, self.params.gain_ripple)
|
||||
self.interpol_nf_ripple =interp(self.channel_freq, amplifier_freq, self.params.nf_ripple)
|
||||
|
||||
self.nch = frequencies.size
|
||||
self.pin_db = lin2db(sum(pin*1e3))
|
||||
|
||||
"""in power mode: delta_p is defined and can be used to calculate the power target
|
||||
This power target is used calculate the amplifier gain"""
|
||||
if self.delta_p is not None:
|
||||
self.target_pch_out_db = round(self.delta_p + pref.p_span0, 2)
|
||||
self.effective_gain = self.target_pch_out_db - pref.p_spani
|
||||
|
||||
"""check power saturation and correct effective gain & power accordingly:"""
|
||||
self.effective_gain = min(
|
||||
self.effective_gain,
|
||||
self.params.p_max - (pref.p_spani + pref.neq_ch)
|
||||
)
|
||||
#print(self.uid, self.effective_gain, self.operational.gain_target)
|
||||
self.effective_pch_out_db = round(pref.p_spani + self.effective_gain, 2)
|
||||
|
||||
"""check power saturation and correct target_gain accordingly:"""
|
||||
#print(self.uid, self.effective_gain, self.pin_db, pref.p_spani)
|
||||
self.nf = self._calc_nf()
|
||||
self.gprofile = self._gain_profile(pin)
|
||||
|
||||
pout = (pin + self.noise_profile(baud_rates))*db2lin(self.gprofile)
|
||||
self.pout_db = lin2db(sum(pout*1e3))
|
||||
# ase & nli are only calculated in signal bandwidth
|
||||
# pout_db is not the absolute full output power (negligible if sufficient channels)
|
||||
|
||||
def _nf(self, type_def, nf_model, nf_fit_coeff, gain_min, gain_flatmax, gain_target):
|
||||
#if hybrid raman, use edfa_gain_flatmax attribute, else use gain_flatmax
|
||||
#gain_flatmax = getattr(params, 'edfa_gain_flatmax', params.gain_flatmax)
|
||||
pad = max(gain_min - gain_target, 0)
|
||||
gain_target += pad
|
||||
dg = max(gain_flatmax - gain_target, 0)
|
||||
if type_def == 'variable_gain':
|
||||
g1a = gain_target - nf_model.delta_p - dg
|
||||
nf_avg = lin2db(db2lin(nf_model.nf1) + db2lin(nf_model.nf2)/db2lin(g1a))
|
||||
elif type_def == 'fixed_gain':
|
||||
nf_avg = nf_model.nf0
|
||||
elif type_def == 'openroadm':
|
||||
pin_ch = self.pin_db - lin2db(self.nch)
|
||||
# model OSNR = f(Pin)
|
||||
nf_avg = pin_ch - polyval(nf_model.nf_coef, pin_ch) + 58
|
||||
elif type_def == 'advanced_model':
|
||||
nf_avg = polyval(nf_fit_coeff, -dg)
|
||||
else:
|
||||
assert False, "Unrecognized amplifier type, this should have been checked by the JSON loader"
|
||||
return nf_avg+pad, pad
|
||||
|
||||
def _calc_nf(self, avg = False):
|
||||
"""nf calculation based on 2 models: self.params.nf_model.enabled from json import:
|
||||
True => 2 stages amp modelling based on precalculated nf1, nf2 and delta_p in build_OA_json
|
||||
False => polynomial fit based on self.params.nf_fit_coeff"""
|
||||
# gain_min > gain_target TBD:
|
||||
if self.params.type_def == 'dual_stage':
|
||||
g1 = self.params.preamp_gain_flatmax
|
||||
g2 = self.effective_gain - g1
|
||||
nf1_avg, pad = self._nf( self.params.preamp_type_def,
|
||||
self.params.preamp_nf_model,
|
||||
self.params.preamp_nf_fit_coeff,
|
||||
self.params.preamp_gain_min,
|
||||
self.params.preamp_gain_flatmax,
|
||||
g1)
|
||||
#no padding expected for the 1stage because g1 = gain_max
|
||||
nf2_avg, pad = self._nf( self.params.booster_type_def,
|
||||
self.params.booster_nf_model,
|
||||
self.params.booster_nf_fit_coeff,
|
||||
self.params.booster_gain_min,
|
||||
self.params.booster_gain_flatmax,
|
||||
g2)
|
||||
nf_avg = lin2db(db2lin(nf1_avg) + db2lin(nf2_avg-g1))
|
||||
#no padding expected for the 1stage because g1 = gain_max
|
||||
pad = 0
|
||||
else:
|
||||
nf_avg, pad = self._nf( self.params.type_def,
|
||||
self.params.nf_model,
|
||||
self.params.nf_fit_coeff,
|
||||
self.params.gain_min,
|
||||
self.params.gain_flatmax,
|
||||
self.effective_gain)
|
||||
|
||||
self.att_in = pad # not used to attenuate carriers, only used in _repr_ and _str_
|
||||
if avg:
|
||||
return nf_avg
|
||||
else:
|
||||
return self.interpol_nf_ripple + nf_avg # input VOA = 1 for 1 NF degradation
|
||||
|
||||
def noise_profile(self, df):
|
||||
"""noise_profile(bw) computes amplifier ase (W) in signal bw (Hz)
|
||||
noise is calculated at amplifier input
|
||||
|
||||
:bw: signal bandwidth = baud rate in Hz
|
||||
:type bw: float
|
||||
|
||||
:return: the asepower in W in the signal bandwidth bw for 96 channels
|
||||
:return type: numpy array of float
|
||||
|
||||
ASE POWER USING PER CHANNEL GAIN PROFILE
|
||||
INPUTS:
|
||||
NF_dB - Noise figure in dB, vector of length number of channels or
|
||||
spectral slices
|
||||
G_dB - Actual gain calculated for the EDFA, vector of length number of
|
||||
channels or spectral slices
|
||||
ffs - Center frequency grid of the channels or spectral slices in
|
||||
THz, vector of length number of channels or spectral slices
|
||||
dF - width of each channel or spectral slice in THz,
|
||||
vector of length number of channels or spectral slices
|
||||
OUTPUT:
|
||||
ase_dBm - ase in dBm per channel or spectral slice
|
||||
NOTE: the output is the total ASE in the channel or spectral slice. For
|
||||
50GHz channels the ASE BW is effectively 0.4nm. To get to noise power
|
||||
in 0.1nm, subtract 6dB.
|
||||
|
||||
ONSR is usually quoted as channel power divided by
|
||||
the ASE power in 0.1nm RBW, regardless of the width of the actual
|
||||
channel. This is a historical convention from the days when optical
|
||||
signals were much smaller (155Mbps, 2.5Gbps, ... 10Gbps) than the
|
||||
resolution of the OSAs that were used to measure spectral power which
|
||||
were set to 0.1nm resolution for convenience. Moving forward into
|
||||
flexible grid and high baud rate signals, it may be convenient to begin
|
||||
quoting power spectral density in the same BW for both signal and ASE,
|
||||
e.g. 12.5GHz."""
|
||||
|
||||
ase = h * df * self.channel_freq * db2lin(self.nf) # W
|
||||
return ase # in W at amplifier input
|
||||
|
||||
def _gain_profile(self, pin, err_tolerance=1.0e-11, simple_opt=True):
|
||||
"""
|
||||
Pin : input power / channel in W
|
||||
|
||||
:param gain_ripple: design flat gain
|
||||
:param dgt: design gain tilt
|
||||
:param Pin: total input power in W
|
||||
:param gp: Average gain setpoint in dB units
|
||||
:param gtp: gain tilt setting
|
||||
:type gain_ripple: numpy.ndarray
|
||||
:type dgt: numpy.ndarray
|
||||
:type Pin: numpy.ndarray
|
||||
:type gp: float
|
||||
:type gtp: float
|
||||
:return: gain profile in dBm
|
||||
:rtype: numpy.ndarray
|
||||
|
||||
AMPLIFICATION USING INPUT PROFILE
|
||||
INPUTS:
|
||||
gain_ripple - vector of length number of channels or spectral slices
|
||||
DGT - vector of length number of channels or spectral slices
|
||||
Pin - input powers vector of length number of channels or
|
||||
spectral slices
|
||||
Gp - provisioned gain length 1
|
||||
GTp - provisioned tilt length 1
|
||||
|
||||
OUTPUT:
|
||||
amp gain per channel or spectral slice
|
||||
NOTE: there is no checking done for violations of the total output
|
||||
power capability of the amp.
|
||||
EDIT OF PREVIOUS NOTE: power violation now added in interpol_params
|
||||
Ported from Matlab version written by David Boerges at Ciena.
|
||||
Based on:
|
||||
R. di Muro, "The Er3+ fiber gain coefficient derived from a dynamic
|
||||
gain
|
||||
tilt technique", Journal of Lightwave Technology, Vol. 18, Iss. 3,
|
||||
Pp. 343-347, 2000.
|
||||
"""
|
||||
|
||||
# TODO|jla: check what param should be used (currently length(dgt))
|
||||
nb_channel = arange(len(self.interpol_dgt))
|
||||
|
||||
# TODO|jla: find a way to use these or lose them. Primarily we should have
|
||||
# a way to determine if exceeding the gain or output power of the amp
|
||||
tot_in_power_db = self.pin_db # Pin in W
|
||||
|
||||
# linear fit to get the
|
||||
p = polyfit(nb_channel, self.interpol_dgt, 1)
|
||||
dgt_slope = p[0]
|
||||
|
||||
# Calculate the target slope - currently assumes equal spaced channels
|
||||
# TODO|jla: support arbitrary channel spacing
|
||||
targ_slope = self.tilt_target / (len(nb_channel) - 1)
|
||||
|
||||
# first estimate of DGT scaling
|
||||
if abs(dgt_slope) > 0.001: # check for zero value due to flat dgt
|
||||
dgts1 = targ_slope / dgt_slope
|
||||
else:
|
||||
dgts1 = 0
|
||||
|
||||
# when simple_opt is true, make 2 attempts to compute gain and
|
||||
# the internal voa value. This is currently here to provide direct
|
||||
# comparison with original Matlab code. Will be removed.
|
||||
# TODO|jla: replace with loop
|
||||
|
||||
if not simple_opt:
|
||||
return
|
||||
|
||||
# first estimate of Er gain & VOA loss
|
||||
g1st = array(self.interpol_gain_ripple) + self.params.gain_flatmax \
|
||||
+ array(self.interpol_dgt) * dgts1
|
||||
voa = lin2db(mean(db2lin(g1st))) - self.effective_gain
|
||||
|
||||
# second estimate of amp ch gain using the channel input profile
|
||||
g2nd = g1st - voa
|
||||
|
||||
pout_db = lin2db(sum(pin*1e3*db2lin(g2nd)))
|
||||
dgts2 = self.effective_gain - (pout_db - tot_in_power_db)
|
||||
|
||||
# center estimate of amp ch gain
|
||||
xcent = dgts2
|
||||
gcent = g1st - voa + array(self.interpol_dgt) * xcent
|
||||
pout_db = lin2db(sum(pin*1e3*db2lin(gcent)))
|
||||
gavg_cent = pout_db - tot_in_power_db
|
||||
|
||||
# Lower estimate of amp ch gain
|
||||
deltax = max(g1st) - min(g1st)
|
||||
# if no ripple deltax = 0 and xlow = xcent: div 0
|
||||
# TODO|jla: add check for flat gain response
|
||||
if abs(deltax) <= 0.05: # not enough ripple to consider calculation
|
||||
return g1st - voa
|
||||
|
||||
xlow = dgts2 - deltax
|
||||
glow = g1st - voa + array(self.interpol_dgt) * xlow
|
||||
pout_db = lin2db(sum(pin * 1e3 * db2lin(glow)))
|
||||
gavg_low = pout_db - tot_in_power_db
|
||||
|
||||
# upper gain estimate
|
||||
xhigh = dgts2 + deltax
|
||||
ghigh = g1st - voa + array(self.interpol_dgt) * xhigh
|
||||
pout_db = lin2db(sum(pin * 1e3 * db2lin(ghigh)))
|
||||
gavg_high = pout_db - tot_in_power_db
|
||||
|
||||
# compute slope
|
||||
slope1 = (gavg_low - gavg_cent) / (xlow - xcent)
|
||||
slope2 = (gavg_cent - gavg_high) / (xcent - xhigh)
|
||||
|
||||
if abs(self.effective_gain - gavg_cent) <= err_tolerance:
|
||||
dgts3 = xcent
|
||||
elif self.effective_gain < gavg_cent:
|
||||
dgts3 = xcent - (gavg_cent - self.effective_gain) / slope1
|
||||
else:
|
||||
dgts3 = xcent + (-gavg_cent + self.effective_gain) / slope2
|
||||
|
||||
return g1st - voa + array(self.interpol_dgt) * dgts3
|
||||
|
||||
def propagate(self, pref, *carriers):
|
||||
"""add ASE noise to the propagating carriers of :class:`.info.SpectralInformation`"""
|
||||
pin = array([c.power.signal+c.power.nli+c.power.ase for c in carriers]) # pin in W
|
||||
freq = array([c.frequency for c in carriers])
|
||||
brate = array([c.baud_rate for c in carriers])
|
||||
# interpolate the amplifier vectors with the carriers freq, calculate nf & gain profile
|
||||
self.interpol_params(freq, pin, brate, pref)
|
||||
|
||||
gains = db2lin(self.gprofile)
|
||||
carrier_ases = self.noise_profile(brate)
|
||||
att = db2lin(self.out_voa)
|
||||
|
||||
for gain, carrier_ase, carrier in zip(gains, carrier_ases, carriers):
|
||||
pwr = carrier.power
|
||||
pwr = pwr._replace(signal=pwr.signal*gain/att,
|
||||
nli=pwr.nli*gain/att,
|
||||
ase=(pwr.ase+carrier_ase)*gain/att)
|
||||
yield carrier._replace(power=pwr)
|
||||
|
||||
def update_pref(self, pref):
|
||||
return pref._replace(p_span0=pref.p_span0,
|
||||
p_spani=pref.p_spani + self.effective_gain - self.out_voa)
|
||||
|
||||
def __call__(self, spectral_info):
|
||||
self.carriers_in = spectral_info.carriers
|
||||
carriers = tuple(self.propagate(spectral_info.pref, *spectral_info.carriers))
|
||||
pref = self.update_pref(spectral_info.pref)
|
||||
self.carriers_out = carriers
|
||||
return spectral_info._replace(carriers=carriers, pref=pref)
|
||||
401
gnpy/core/equipment.py
Normal file
401
gnpy/core/equipment.py
Normal file
@@ -0,0 +1,401 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.equipment
|
||||
===================
|
||||
|
||||
This module contains functionality for specifying equipment.
|
||||
'''
|
||||
|
||||
from numpy import clip, polyval
|
||||
from operator import itemgetter
|
||||
from math import isclose
|
||||
from pathlib import Path
|
||||
from json import load
|
||||
from gnpy.core.utils import lin2db, db2lin, load_json
|
||||
from collections import namedtuple
|
||||
from gnpy.core.elements import Edfa
|
||||
from gnpy.core.exceptions import EquipmentConfigError
|
||||
import time
|
||||
|
||||
Model_vg = namedtuple('Model_vg', 'nf1 nf2 delta_p')
|
||||
Model_fg = namedtuple('Model_fg', 'nf0')
|
||||
Model_openroadm = namedtuple('Model_openroadm', 'nf_coef')
|
||||
Model_hybrid = namedtuple('Model_hybrid', 'nf_ram gain_ram edfa_variety')
|
||||
Model_dual_stage = namedtuple('Model_dual_stage', 'preamp_variety booster_variety')
|
||||
|
||||
class common:
|
||||
def update_attr(self, default_values, kwargs, name):
|
||||
clean_kwargs = {k:v for k, v in kwargs.items() if v != ''}
|
||||
for k, v in default_values.items():
|
||||
setattr(self, k, clean_kwargs.get(k, v))
|
||||
if k not in clean_kwargs and name != 'Amp':
|
||||
print(f'\x1b[1;31;40m'+
|
||||
f'\n WARNING missing {k} attribute in eqpt_config.json[{name}]'+
|
||||
f'\n default value is {k} = {v}'+
|
||||
f'\x1b[0m')
|
||||
time.sleep(1)
|
||||
|
||||
class SI(common):
|
||||
default_values =\
|
||||
{
|
||||
"f_min": 191.35e12,
|
||||
"f_max": 196.1e12,
|
||||
"baud_rate": 32e9,
|
||||
"spacing": 50e9,
|
||||
"power_dbm": 0,
|
||||
"power_range_db": [0, 0, 0.5],
|
||||
"roll_off": 0.15,
|
||||
"tx_osnr": 45,
|
||||
"sys_margins": 0
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.update_attr(self.default_values, kwargs, 'SI')
|
||||
|
||||
class Span(common):
|
||||
default_values = \
|
||||
{
|
||||
'power_mode': True,
|
||||
'delta_power_range_db': None,
|
||||
'max_fiber_lineic_loss_for_raman': 0.25,
|
||||
'target_extended_gain': 2.5,
|
||||
'max_length': 150,
|
||||
'length_units': 'km',
|
||||
'max_loss': None,
|
||||
'padding': 10,
|
||||
'EOL': 0,
|
||||
'con_in': 0,
|
||||
'con_out': 0
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.update_attr(self.default_values, kwargs, 'Span')
|
||||
|
||||
class Roadm(common):
|
||||
default_values = \
|
||||
{
|
||||
'target_pch_out_db': -17,
|
||||
'add_drop_osnr': 100,
|
||||
'restrictions': {
|
||||
'preamp_variety_list':[],
|
||||
'booster_variety_list':[]
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.update_attr(self.default_values, kwargs, 'Roadm')
|
||||
|
||||
class Transceiver(common):
|
||||
default_values = \
|
||||
{
|
||||
'type_variety': None,
|
||||
'frequency': None,
|
||||
'mode': {}
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.update_attr(self.default_values, kwargs, 'Transceiver')
|
||||
|
||||
class Fiber(common):
|
||||
default_values = \
|
||||
{
|
||||
'type_variety': '',
|
||||
'dispersion': None,
|
||||
'gamma': 0
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.update_attr(self.default_values, kwargs, 'Fiber')
|
||||
|
||||
class RamanFiber(common):
|
||||
default_values = \
|
||||
{
|
||||
'type_variety': '',
|
||||
'dispersion': None,
|
||||
'gamma': 0,
|
||||
'raman_efficiency': None
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.update_attr(self.default_values, kwargs, 'RamanFiber')
|
||||
for param in ('cr', 'frequency_offset'):
|
||||
if param not in self.raman_efficiency:
|
||||
raise EquipmentConfigError(f'RamanFiber.raman_efficiency: missing "{param}" parameter')
|
||||
if self.raman_efficiency['frequency_offset'] != sorted(self.raman_efficiency['frequency_offset']):
|
||||
raise EquipmentConfigError(f'RamanFiber.raman_efficiency.frequency_offset is not sorted')
|
||||
|
||||
class Amp(common):
|
||||
default_values = \
|
||||
{
|
||||
'f_min': 191.35e12,
|
||||
'f_max': 196.1e12,
|
||||
'type_variety': '',
|
||||
'type_def': '',
|
||||
'gain_flatmax': None,
|
||||
'gain_min': None,
|
||||
'p_max': None,
|
||||
'nf_model': None,
|
||||
'dual_stage_model': None,
|
||||
'nf_fit_coeff': None,
|
||||
'nf_ripple': None,
|
||||
'dgt': None,
|
||||
'gain_ripple': None,
|
||||
'out_voa_auto': False,
|
||||
'allowed_for_design': False,
|
||||
'raman': False
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.update_attr(self.default_values, kwargs, 'Amp')
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, filename, **kwargs):
|
||||
config = Path(filename).parent / 'default_edfa_config.json'
|
||||
|
||||
type_variety = kwargs['type_variety']
|
||||
type_def = kwargs.get('type_def', 'variable_gain') # default compatibility with older json eqpt files
|
||||
nf_def = None
|
||||
dual_stage_def = None
|
||||
|
||||
if type_def == 'fixed_gain':
|
||||
try:
|
||||
nf0 = kwargs.pop('nf0')
|
||||
except KeyError: #nf0 is expected for a fixed gain amp
|
||||
raise EquipmentConfigError(f'missing nf0 value input for amplifier: {type_variety} in equipment config')
|
||||
for k in ('nf_min', 'nf_max'):
|
||||
try:
|
||||
del kwargs[k]
|
||||
except KeyError:
|
||||
pass
|
||||
nf_def = Model_fg(nf0)
|
||||
elif type_def == 'advanced_model':
|
||||
config = Path(filename).parent / kwargs.pop('advanced_config_from_json')
|
||||
elif type_def == 'variable_gain':
|
||||
gain_min, gain_max = kwargs['gain_min'], kwargs['gain_flatmax']
|
||||
try: #nf_min and nf_max are expected for a variable gain amp
|
||||
nf_min = kwargs.pop('nf_min')
|
||||
nf_max = kwargs.pop('nf_max')
|
||||
except KeyError:
|
||||
raise EquipmentConfigError(f'missing nf_min or nf_max value input for amplifier: {type_variety} in equipment config')
|
||||
try: #remove all remaining nf inputs
|
||||
del kwargs['nf0']
|
||||
except KeyError: pass #nf0 is not needed for variable gain amp
|
||||
nf1, nf2, delta_p = nf_model(type_variety, gain_min, gain_max, nf_min, nf_max)
|
||||
nf_def = Model_vg(nf1, nf2, delta_p)
|
||||
elif type_def == 'openroadm':
|
||||
try:
|
||||
nf_coef = kwargs.pop('nf_coef')
|
||||
except KeyError: #nf_coef is expected for openroadm amp
|
||||
raise EquipmentConfigError(f'missing nf_coef input for amplifier: {type_variety} in equipment config')
|
||||
nf_def = Model_openroadm(nf_coef)
|
||||
elif type_def == 'dual_stage':
|
||||
try: #nf_ram and gain_ram are expected for a hybrid amp
|
||||
preamp_variety = kwargs.pop('preamp_variety')
|
||||
booster_variety = kwargs.pop('booster_variety')
|
||||
except KeyError:
|
||||
raise EquipmentConfigError(f'missing preamp/booster variety input for amplifier: {type_variety} in equipment config')
|
||||
dual_stage_def = Model_dual_stage(preamp_variety, booster_variety)
|
||||
|
||||
with open(config, encoding='utf-8') as f:
|
||||
json_data = load(f)
|
||||
|
||||
return cls(**{**kwargs, **json_data,
|
||||
'nf_model': nf_def, 'dual_stage_model': dual_stage_def})
|
||||
|
||||
|
||||
def nf_model(type_variety, gain_min, gain_max, nf_min, nf_max):
|
||||
if nf_min < -10:
|
||||
raise EquipmentConfigError(f'Invalid nf_min value {nf_min!r} for amplifier {type_variety}')
|
||||
if nf_max < -10:
|
||||
raise EquipmentConfigError(f'Invalid nf_max value {nf_max!r} for amplifier {type_variety}')
|
||||
|
||||
# NF estimation model based on nf_min and nf_max
|
||||
# delta_p: max power dB difference between first and second stage coils
|
||||
# dB g1a: first stage gain - internal VOA attenuation
|
||||
# nf1, nf2: first and second stage coils
|
||||
# calculated by solving nf_{min,max} = nf1 + nf2 / g1a{min,max}
|
||||
delta_p = 5
|
||||
g1a_min = gain_min - (gain_max - gain_min) - delta_p
|
||||
g1a_max = gain_max - delta_p
|
||||
nf2 = lin2db((db2lin(nf_min) - db2lin(nf_max)) /
|
||||
(1/db2lin(g1a_max) - 1/db2lin(g1a_min)))
|
||||
nf1 = lin2db(db2lin(nf_min) - db2lin(nf2)/db2lin(g1a_max))
|
||||
|
||||
if nf1 < 4:
|
||||
raise EquipmentConfigError(f'First coil value too low {nf1} for amplifier {type_variety}')
|
||||
|
||||
# Check 1 dB < delta_p < 6 dB to ensure nf_min and nf_max values make sense.
|
||||
# There shouldn't be high nf differences between the two coils:
|
||||
# nf2 should be nf1 + 0.3 < nf2 < nf1 + 2
|
||||
# If not, recompute and check delta_p
|
||||
if not nf1 + 0.3 < nf2 < nf1 + 2:
|
||||
nf2 = clip(nf2, nf1 + 0.3, nf1 + 2)
|
||||
g1a_max = lin2db(db2lin(nf2) / (db2lin(nf_min) - db2lin(nf1)))
|
||||
delta_p = gain_max - g1a_max
|
||||
g1a_min = gain_min - (gain_max-gain_min) - delta_p
|
||||
if not 1 < delta_p < 11:
|
||||
raise EquipmentConfigError(f'Computed \N{greek capital letter delta}P invalid \
|
||||
\n 1st coil vs 2nd coil calculated DeltaP {delta_p:.2f} for \
|
||||
\n amplifier {type_variety} is not valid: revise inputs \
|
||||
\n calculated 1st coil NF = {nf1:.2f}, 2nd coil NF = {nf2:.2f}')
|
||||
# Check calculated values for nf1 and nf2
|
||||
calc_nf_min = lin2db(db2lin(nf1) + db2lin(nf2)/db2lin(g1a_max))
|
||||
if not isclose(nf_min, calc_nf_min, abs_tol=0.01):
|
||||
raise EquipmentConfigError(f'nf_min does not match calc_nf_min, {nf_min} vs {calc_nf_min} for amp {type_variety}')
|
||||
calc_nf_max = lin2db(db2lin(nf1) + db2lin(nf2)/db2lin(g1a_min))
|
||||
if not isclose(nf_max, calc_nf_max, abs_tol=0.01):
|
||||
raise EquipmentConfigError(f'nf_max does not match calc_nf_max, {nf_max} vs {calc_nf_max} for amp {type_variety}')
|
||||
|
||||
return nf1, nf2, delta_p
|
||||
|
||||
def edfa_nf(gain_target, variety_type, equipment):
|
||||
amp_params = equipment['Edfa'][variety_type]
|
||||
amp = Edfa(
|
||||
uid = f'calc_NF',
|
||||
params = amp_params.__dict__,
|
||||
operational = {
|
||||
'gain_target': gain_target,
|
||||
'tilt_target': 0
|
||||
}
|
||||
)
|
||||
amp.pin_db = 0
|
||||
amp.nch = 88
|
||||
return amp._calc_nf(True)
|
||||
|
||||
def trx_mode_params(equipment, trx_type_variety='', trx_mode='', error_message=False):
|
||||
"""return the trx and SI parameters from eqpt_config for a given type_variety and mode (ie format)"""
|
||||
trx_params = {}
|
||||
default_si_data = equipment['SI']['default']
|
||||
|
||||
try:
|
||||
trxs = equipment['Transceiver']
|
||||
#if called from path_requests_run.py, trx_mode is filled with None when not specified by user
|
||||
#if called from transmission_main.py, trx_mode is ''
|
||||
if trx_mode is not None:
|
||||
mode_params = next(mode for trx in trxs \
|
||||
if trx == trx_type_variety \
|
||||
for mode in trxs[trx].mode \
|
||||
if mode['format'] == trx_mode)
|
||||
trx_params = {**mode_params}
|
||||
# sanity check: spacing baudrate must be smaller than min spacing
|
||||
if trx_params['baud_rate'] > trx_params['min_spacing'] :
|
||||
raise EquipmentConfigError(f'Inconsistency in equipment library:\n Transpoder "{trx_type_variety}" mode "{trx_params["format"]}" '+\
|
||||
f'has baud rate: {trx_params["baud_rate"]*1e-9} GHz greater than min_spacing {trx_params["min_spacing"]*1e-9}.')
|
||||
else:
|
||||
mode_params = {"format": "undetermined",
|
||||
"baud_rate": None,
|
||||
"OSNR": None,
|
||||
"bit_rate": None,
|
||||
"roll_off": None,
|
||||
"tx_osnr":None,
|
||||
"min_spacing":None,
|
||||
"cost":None}
|
||||
trx_params = {**mode_params}
|
||||
trx_params['f_min'] = equipment['Transceiver'][trx_type_variety].frequency['min']
|
||||
trx_params['f_max'] = equipment['Transceiver'][trx_type_variety].frequency['max']
|
||||
|
||||
# TODO: novel automatic feature maybe unwanted if spacing is specified
|
||||
# trx_params['spacing'] = automatic_spacing(trx_params['baud_rate'])
|
||||
# temp = trx_params['spacing']
|
||||
# print(f'spacing {temp}')
|
||||
except StopIteration :
|
||||
if error_message:
|
||||
raise EquipmentConfigError(f'Computation stoped: could not find tsp : {trx_type_variety} with mode: {trx_mode} in eqpt library')
|
||||
else:
|
||||
# default transponder charcteristics
|
||||
# mainly used with transmission_main_example.py
|
||||
trx_params['f_min'] = default_si_data.f_min
|
||||
trx_params['f_max'] = default_si_data.f_max
|
||||
trx_params['baud_rate'] = default_si_data.baud_rate
|
||||
trx_params['spacing'] = default_si_data.spacing
|
||||
trx_params['OSNR'] = None
|
||||
trx_params['bit_rate'] = None
|
||||
trx_params['cost'] = None
|
||||
trx_params['roll_off'] = default_si_data.roll_off
|
||||
trx_params['tx_osnr'] = default_si_data.tx_osnr
|
||||
trx_params['min_spacing'] = None
|
||||
nch = automatic_nch(trx_params['f_min'], trx_params['f_max'], trx_params['spacing'])
|
||||
trx_params['nb_channel'] = nch
|
||||
print(f'There are {nch} channels propagating')
|
||||
|
||||
trx_params['power'] = db2lin(default_si_data.power_dbm)*1e-3
|
||||
|
||||
return trx_params
|
||||
|
||||
def automatic_spacing(baud_rate):
|
||||
"""return the min possible channel spacing for a given baud rate"""
|
||||
# TODO : this should parametrized in a cfg file
|
||||
# list of possible tuples [(max_baud_rate, spacing_for_this_baud_rate)]
|
||||
spacing_list = [(33e9, 37.5e9), (38e9, 50e9), (50e9, 62.5e9), (67e9, 75e9), (92e9, 100e9)]
|
||||
return min((s[1] for s in spacing_list if s[0] > baud_rate), default=baud_rate*1.2)
|
||||
|
||||
def automatic_nch(f_min, f_max, spacing):
|
||||
return int((f_max - f_min)//spacing)
|
||||
|
||||
def automatic_fmax(f_min, spacing, nch):
|
||||
return f_min + spacing * nch
|
||||
|
||||
def load_equipment(filename):
|
||||
json_data = load_json(filename)
|
||||
return equipment_from_json(json_data, filename)
|
||||
|
||||
def update_trx_osnr(equipment):
|
||||
"""add sys_margins to all Transceivers OSNR values"""
|
||||
for trx in equipment['Transceiver'].values():
|
||||
for m in trx.mode:
|
||||
m['OSNR'] = m['OSNR'] + equipment['SI']['default'].sys_margins
|
||||
return equipment
|
||||
|
||||
def update_dual_stage(equipment):
|
||||
edfa_dict = equipment['Edfa']
|
||||
for edfa in edfa_dict.values():
|
||||
if edfa.type_def == 'dual_stage':
|
||||
edfa_preamp = edfa_dict[edfa.dual_stage_model.preamp_variety]
|
||||
edfa_booster = edfa_dict[edfa.dual_stage_model.booster_variety]
|
||||
for key, value in edfa_preamp.__dict__.items():
|
||||
attr_k = 'preamp_' + key
|
||||
setattr(edfa, attr_k, value)
|
||||
for key, value in edfa_booster.__dict__.items():
|
||||
attr_k = 'booster_' + key
|
||||
setattr(edfa, attr_k, value)
|
||||
edfa.p_max = edfa_booster.p_max
|
||||
edfa.gain_flatmax = edfa_booster.gain_flatmax + edfa_preamp.gain_flatmax
|
||||
if edfa.gain_min < edfa_preamp.gain_min:
|
||||
raise EquipmentConfigError(f'Dual stage {edfa.type_variety} min gain is lower than its preamp min gain')
|
||||
return equipment
|
||||
|
||||
def roadm_restrictions_sanity_check(equipment):
|
||||
""" verifies that booster and preamp restrictions specified in roadm equipment are listed
|
||||
in the edfa.
|
||||
"""
|
||||
restrictions = equipment['Roadm']['default'].restrictions['booster_variety_list'] + \
|
||||
equipment['Roadm']['default'].restrictions['preamp_variety_list']
|
||||
for amp_name in restrictions:
|
||||
if amp_name not in equipment['Edfa']:
|
||||
raise EquipmentConfigError(f'ROADM restriction {amp_name} does not refer to a defined EDFA name')
|
||||
|
||||
def equipment_from_json(json_data, filename):
|
||||
"""build global dictionnary eqpt_library that stores all eqpt characteristics:
|
||||
edfa type type_variety, fiber type_variety
|
||||
from the eqpt_config.json (filename parameter)
|
||||
also read advanced_config_from_json file parameters for edfa if they are available:
|
||||
typically nf_ripple, dfg gain ripple, dgt and nf polynomial nf_fit_coeff
|
||||
if advanced_config_from_json file parameter is not present: use nf_model:
|
||||
requires nf_min and nf_max values boundaries of the edfa gain range
|
||||
"""
|
||||
equipment = {}
|
||||
for key, entries in json_data.items():
|
||||
equipment[key] = {}
|
||||
typ = globals()[key]
|
||||
for entry in entries:
|
||||
subkey = entry.get('type_variety', 'default')
|
||||
if key == 'Edfa':
|
||||
equipment[key][subkey] = Amp.from_json(filename, **entry)
|
||||
else:
|
||||
equipment[key][subkey] = typ(**entry)
|
||||
equipment = update_trx_osnr(equipment)
|
||||
equipment = update_dual_stage(equipment)
|
||||
roadm_restrictions_sanity_check(equipment)
|
||||
return equipment
|
||||
29
gnpy/core/exceptions.py
Normal file
29
gnpy/core/exceptions.py
Normal file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.exceptions
|
||||
====================
|
||||
|
||||
Exceptions thrown by other gnpy modules
|
||||
'''
|
||||
|
||||
|
||||
class ConfigurationError(Exception):
|
||||
'''User-provided configuration contains an error'''
|
||||
|
||||
class EquipmentConfigError(ConfigurationError):
|
||||
'''Incomplete or wrong configuration within the equipment library'''
|
||||
|
||||
class NetworkTopologyError(ConfigurationError):
|
||||
'''Topology of user-provided network is wrong'''
|
||||
|
||||
class ServiceError(Exception):
|
||||
'''Service of user-provided request is wrong'''
|
||||
|
||||
class DisjunctionError(ServiceError):
|
||||
'''Disjunction of user-provided request can not be satisfied'''
|
||||
|
||||
class SpectrumError(Exception):
|
||||
'''Spectrum errors of the program'''
|
||||
|
||||
10
gnpy/core/execute.py
Normal file
10
gnpy/core/execute.py
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.execute
|
||||
=================
|
||||
|
||||
This module contains functions for executing the propogation of
|
||||
spectral information on a `gnpy` network.
|
||||
'''
|
||||
72
gnpy/core/info.py
Normal file
72
gnpy/core/info.py
Normal file
@@ -0,0 +1,72 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.info
|
||||
==============
|
||||
|
||||
This module contains classes for modelling :class:`SpectralInformation`.
|
||||
'''
|
||||
|
||||
|
||||
from collections import namedtuple
|
||||
from numpy import array
|
||||
from gnpy.core.utils import lin2db, db2lin
|
||||
from json import loads
|
||||
from gnpy.core.utils import load_json
|
||||
from gnpy.core.equipment import automatic_nch, automatic_spacing
|
||||
|
||||
class Power(namedtuple('Power', 'signal nli ase')):
|
||||
"""carriers power in W"""
|
||||
|
||||
|
||||
class Channel(namedtuple('Channel', 'channel_number frequency baud_rate roll_off power')):
|
||||
pass
|
||||
|
||||
|
||||
class Pref(namedtuple('Pref', 'p_span0, p_spani, neq_ch ')):
|
||||
"""noiseless reference power in dBm:
|
||||
p_span0: inital target carrier power
|
||||
p_spani: carrier power after element i
|
||||
neq_ch: equivalent channel count in dB"""
|
||||
|
||||
|
||||
class SpectralInformation(namedtuple('SpectralInformation', 'pref carriers')):
|
||||
|
||||
def __new__(cls, pref, carriers):
|
||||
return super().__new__(cls, pref, carriers)
|
||||
|
||||
|
||||
def create_input_spectral_information(f_min, f_max, roll_off, baud_rate, power, spacing):
|
||||
# pref in dB : convert power lin into power in dB
|
||||
pref = lin2db(power * 1e3)
|
||||
nb_channel = automatic_nch(f_min, f_max, spacing)
|
||||
si = SpectralInformation(
|
||||
pref=Pref(pref, pref, lin2db(nb_channel)),
|
||||
carriers=[
|
||||
Channel(f, (f_min+spacing*f),
|
||||
baud_rate, roll_off, Power(power, 0, 0)) for f in range(1,nb_channel+1)
|
||||
])
|
||||
return si
|
||||
|
||||
if __name__ == '__main__':
|
||||
pref = lin2db(power * 1e3)
|
||||
si = SpectralInformation(
|
||||
Pref(pref, pref),
|
||||
Channel(1, 193.95e12, 32e9, 0.15, # 193.95 THz, 32 Gbaud
|
||||
Power(1e-3, 1e-6, 1e-6)), # 1 mW, 1uW, 1uW
|
||||
Channel(1, 195.95e12, 32e9, 0.15, # 195.95 THz, 32 Gbaud
|
||||
Power(1.2e-3, 1e-6, 1e-6)), # 1.2 mW, 1uW, 1uW
|
||||
)
|
||||
|
||||
si = SpectralInformation()
|
||||
spacing = 0.05 # THz
|
||||
|
||||
si = si._replace(carriers=tuple(Channel(f+1, 191.3+spacing*(f+1), 32e9, 0.15, Power(1e-3, f, 1)) for f in range(96)))
|
||||
|
||||
print(f'si = {si}')
|
||||
print(f'si = {si.carriers[0].power.nli}')
|
||||
print(f'si = {si.carriers[20].power.nli}')
|
||||
si2 = si._replace(carriers=tuple(c._replace(power = c.power._replace(nli = c.power.nli * 1e5))
|
||||
for c in si.carriers))
|
||||
print(f'si2 = {si2}')
|
||||
571
gnpy/core/network.py
Normal file
571
gnpy/core/network.py
Normal file
@@ -0,0 +1,571 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.network
|
||||
=================
|
||||
|
||||
This module contains functions for constructing networks of network elements.
|
||||
'''
|
||||
|
||||
from gnpy.core.convert import convert_file
|
||||
from networkx import DiGraph
|
||||
from numpy import arange
|
||||
from scipy.interpolate import interp1d
|
||||
from logging import getLogger
|
||||
from os import path
|
||||
from operator import itemgetter, attrgetter
|
||||
from gnpy.core import elements
|
||||
from gnpy.core.elements import Fiber, Edfa, Transceiver, Roadm, Fused, RamanFiber
|
||||
from gnpy.core.equipment import edfa_nf
|
||||
from gnpy.core.exceptions import ConfigurationError, NetworkTopologyError
|
||||
from gnpy.core.units import UNITS
|
||||
from gnpy.core.utils import (load_json, save_json, round2float, db2lin,
|
||||
merge_amplifier_restrictions)
|
||||
from gnpy.core.science_utils import SimParams
|
||||
from collections import namedtuple
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
def load_network(filename, equipment, name_matching = False):
|
||||
json_filename = ''
|
||||
if filename.suffix.lower() == '.xls':
|
||||
logger.info('Automatically generating topology JSON file')
|
||||
json_filename = convert_file(filename, name_matching)
|
||||
elif filename.suffix.lower() == '.json':
|
||||
json_filename = filename
|
||||
else:
|
||||
raise ValueError(f'unsuported topology filename extension {filename.suffix.lower()}')
|
||||
json_data = load_json(json_filename)
|
||||
return network_from_json(json_data, equipment)
|
||||
|
||||
def save_network(filename, network):
|
||||
filename_output = path.splitext(filename)[0] + '_auto_design.json'
|
||||
json_data = network_to_json(network)
|
||||
save_json(json_data, filename_output)
|
||||
|
||||
def network_from_json(json_data, equipment):
|
||||
# NOTE|dutc: we could use the following, but it would tie our data format
|
||||
# too closely to the graph library
|
||||
# from networkx import node_link_graph
|
||||
g = DiGraph()
|
||||
for el_config in json_data['elements']:
|
||||
typ = el_config.pop('type')
|
||||
variety = el_config.pop('type_variety', 'default')
|
||||
if typ in equipment and variety in equipment[typ]:
|
||||
extra_params = equipment[typ][variety]
|
||||
temp = el_config.setdefault('params', {})
|
||||
temp = merge_amplifier_restrictions(temp, extra_params.__dict__)
|
||||
el_config['params'] = temp
|
||||
elif typ in ['Edfa', 'Fiber']: # catch it now because the code will crash later!
|
||||
raise ConfigurationError(f'The {typ} of variety type {variety} was not recognized:'
|
||||
'\nplease check it is properly defined in the eqpt_config json file')
|
||||
cls = getattr(elements, typ)
|
||||
el = cls(**el_config)
|
||||
g.add_node(el)
|
||||
|
||||
nodes = {k.uid: k for k in g.nodes()}
|
||||
|
||||
for cx in json_data['connections']:
|
||||
from_node, to_node = cx['from_node'], cx['to_node']
|
||||
try:
|
||||
if isinstance(nodes[from_node], Fiber):
|
||||
edge_length = nodes[from_node].params.length
|
||||
else:
|
||||
edge_length = 0.01
|
||||
g.add_edge(nodes[from_node], nodes[to_node], weight = edge_length)
|
||||
except KeyError:
|
||||
raise NetworkTopologyError(f'can not find {from_node} or {to_node} defined in {cx}')
|
||||
|
||||
return g
|
||||
|
||||
def network_to_json(network):
|
||||
data = {
|
||||
'elements': [n.to_json for n in network]
|
||||
}
|
||||
connections = {
|
||||
'connections': [{"from_node": n.uid,
|
||||
"to_node": next_n.uid}
|
||||
for n in network
|
||||
for next_n in network.successors(n) if next_n is not None]
|
||||
}
|
||||
data.update(connections)
|
||||
return data
|
||||
|
||||
def select_edfa(raman_allowed, gain_target, power_target, equipment, uid, restrictions=None):
|
||||
"""amplifer selection algorithm
|
||||
@Orange Jean-Luc Augé
|
||||
"""
|
||||
Edfa_list = namedtuple('Edfa_list', 'variety power gain_min nf')
|
||||
TARGET_EXTENDED_GAIN = equipment['Span']['default'].target_extended_gain
|
||||
|
||||
# for roadm restriction only: create a dict including not allowed for design amps
|
||||
# because main use case is to have specific radm amp which are not allowed for ILA
|
||||
# with the auto design
|
||||
edfa_dict = {name: amp for (name, amp) in equipment['Edfa'].items()
|
||||
if restrictions is None or name in restrictions}
|
||||
|
||||
pin = power_target - gain_target
|
||||
|
||||
# create 2 list of available amplifiers with relevant attributes for their selection
|
||||
|
||||
# edfa list with:
|
||||
# extended gain min allowance of 3dB: could be parametrized, but a bit complex
|
||||
# extended gain max allowance TARGET_EXTENDED_GAIN is coming from eqpt_config.json
|
||||
# power attribut include power AND gain limitations
|
||||
edfa_list = [Edfa_list(
|
||||
variety=edfa_variety,
|
||||
power=min(
|
||||
pin
|
||||
+edfa.gain_flatmax
|
||||
+TARGET_EXTENDED_GAIN,
|
||||
edfa.p_max
|
||||
)
|
||||
-power_target,
|
||||
gain_min=
|
||||
gain_target+3
|
||||
-edfa.gain_min,
|
||||
nf=edfa_nf(gain_target, edfa_variety, equipment)) \
|
||||
for edfa_variety, edfa in edfa_dict.items()
|
||||
if ((edfa.allowed_for_design or restrictions is not None) and not edfa.raman)]
|
||||
|
||||
#consider a Raman list because of different gain_min requirement:
|
||||
#do not allow extended gain min for Raman
|
||||
raman_list = [Edfa_list(
|
||||
variety=edfa_variety,
|
||||
power=min(
|
||||
pin
|
||||
+edfa.gain_flatmax
|
||||
+TARGET_EXTENDED_GAIN,
|
||||
edfa.p_max
|
||||
)
|
||||
-power_target,
|
||||
gain_min=
|
||||
gain_target
|
||||
-edfa.gain_min,
|
||||
nf=edfa_nf(gain_target, edfa_variety, equipment))
|
||||
for edfa_variety, edfa in edfa_dict.items()
|
||||
if (edfa.allowed_for_design and edfa.raman)] \
|
||||
if raman_allowed else []
|
||||
|
||||
#merge raman and edfa lists
|
||||
amp_list = edfa_list + raman_list
|
||||
|
||||
#filter on min gain limitation:
|
||||
acceptable_gain_min_list = [x for x in amp_list if x.gain_min>0]
|
||||
|
||||
if len(acceptable_gain_min_list) < 1:
|
||||
#do not take this empty list into account for the rest of the code
|
||||
#but issue a warning to the user and do not consider Raman
|
||||
#Raman below min gain should not be allowed because i is meant to be a design requirement
|
||||
#and raman padding at the amplifier input is impossible!
|
||||
|
||||
if len(edfa_list) < 1:
|
||||
raise ConfigurationError(f'auto_design could not find any amplifier \
|
||||
to satisfy min gain requirement in node {uid} \
|
||||
please increase span fiber padding')
|
||||
else:
|
||||
# TODO: convert to logging
|
||||
print(
|
||||
f'\x1b[1;31;40m'\
|
||||
+ f'WARNING: target gain in node {uid} is below all available amplifiers min gain: \
|
||||
amplifier input padding will be assumed, consider increase span fiber padding instead'\
|
||||
+ '\x1b[0m'
|
||||
)
|
||||
acceptable_gain_min_list = edfa_list
|
||||
|
||||
#filter on gain+power limitation:
|
||||
#this list checks both the gain and the power requirement
|
||||
#because of the way .power is calculated in the list
|
||||
acceptable_power_list = [x for x in acceptable_gain_min_list if x.power>0]
|
||||
if len(acceptable_power_list) < 1:
|
||||
#no amplifier satisfies the required power, so pick the highest power(s):
|
||||
power_max = max(acceptable_gain_min_list, key=attrgetter('power')).power
|
||||
#check and pick if other amplifiers may have a similar gain/power
|
||||
#allow a 0.3dB power range
|
||||
#this allows to chose an amplifier with a better NF subsequentely
|
||||
acceptable_power_list = [x for x in acceptable_gain_min_list
|
||||
if x.power-power_max>-0.3]
|
||||
|
||||
|
||||
# gain and power requirements are resolved,
|
||||
# =>chose the amp with the best NF among the acceptable ones:
|
||||
selected_edfa = min(acceptable_power_list, key=attrgetter('nf')) #filter on NF
|
||||
#check what are the gain and power limitations of this amp
|
||||
power_reduction = round(min(selected_edfa.power, 0),2)
|
||||
if power_reduction < -0.5:
|
||||
print(
|
||||
f'\x1b[1;31;40m'\
|
||||
+ f'WARNING: target gain and power in node {uid}\n \
|
||||
is beyond all available amplifiers capabilities and/or extended_gain_range:\n\
|
||||
a power reduction of {power_reduction} is applied\n'\
|
||||
+ '\x1b[0m'
|
||||
)
|
||||
|
||||
|
||||
return selected_edfa.variety, power_reduction
|
||||
|
||||
def target_power(network, node, equipment): #get_fiber_dp
|
||||
SPAN_LOSS_REF = 20
|
||||
POWER_SLOPE = 0.3
|
||||
power_mode = equipment['Span']['default'].power_mode
|
||||
dp_range = list(equipment['Span']['default'].delta_power_range_db)
|
||||
node_loss = span_loss(network, node)
|
||||
|
||||
try:
|
||||
dp = round2float((node_loss - SPAN_LOSS_REF) * POWER_SLOPE, dp_range[2])
|
||||
dp = max(dp_range[0], dp)
|
||||
dp = min(dp_range[1], dp)
|
||||
except KeyError:
|
||||
raise ConfigurationError(f'invalid delta_power_range_db definition in eqpt_config[Span]'
|
||||
f'delta_power_range_db: [lower_bound, upper_bound, step]')
|
||||
|
||||
if isinstance(node, Roadm):
|
||||
dp = 0
|
||||
|
||||
return dp
|
||||
|
||||
def prev_node_generator(network, node):
|
||||
"""fused spans interest:
|
||||
iterate over all predecessors while they are Fused or Fiber type"""
|
||||
try:
|
||||
prev_node = next(n for n in network.predecessors(node))
|
||||
except StopIteration:
|
||||
raise NetworkTopologyError(f'Node {node.uid} is not properly connected, please check network topology')
|
||||
# yield and re-iterate
|
||||
if isinstance(prev_node, Fused) or isinstance(node, Fused) and not isinstance(prev_node, Roadm):
|
||||
yield prev_node
|
||||
yield from prev_node_generator(network, prev_node)
|
||||
else:
|
||||
StopIteration
|
||||
|
||||
def next_node_generator(network, node):
|
||||
"""fused spans interest:
|
||||
iterate over all successors while they are Fused or Fiber type"""
|
||||
try:
|
||||
next_node = next(n for n in network.successors(node))
|
||||
except StopIteration:
|
||||
raise NetworkTopologyError('Node {node.uid} is not properly connected, please check network topology')
|
||||
# yield and re-iterate
|
||||
if isinstance(next_node, Fused) or isinstance(node, Fused) and not isinstance(next_node, Roadm):
|
||||
yield next_node
|
||||
yield from next_node_generator(network, next_node)
|
||||
else:
|
||||
StopIteration
|
||||
|
||||
def span_loss(network, node):
|
||||
"""Fused span interest:
|
||||
return the total span loss of all the fibers spliced by a Fused node"""
|
||||
loss = node.loss if node.passive else 0
|
||||
try:
|
||||
prev_node = next(n for n in network.predecessors(node))
|
||||
if isinstance(prev_node, Fused):
|
||||
loss += sum(n.loss for n in prev_node_generator(network, node))
|
||||
except StopIteration:
|
||||
pass
|
||||
try:
|
||||
next_node = next(n for n in network.successors(node))
|
||||
if isinstance(next_node, Fused):
|
||||
loss += sum(n.loss for n in next_node_generator(network, node))
|
||||
except StopIteration:
|
||||
pass
|
||||
return loss
|
||||
|
||||
def find_first_node(network, node):
|
||||
"""Fused node interest:
|
||||
returns the 1st node at the origin of a succession of fused nodes
|
||||
(aka no amp in between)"""
|
||||
this_node = node
|
||||
for this_node in prev_node_generator(network, node):
|
||||
pass
|
||||
return this_node
|
||||
|
||||
def find_last_node(network, node):
|
||||
"""Fused node interest:
|
||||
returns the last node in a succession of fused nodes
|
||||
(aka no amp in between)"""
|
||||
this_node = node
|
||||
for this_node in next_node_generator(network, node):
|
||||
pass
|
||||
return this_node
|
||||
|
||||
def set_amplifier_voa(amp, power_target, power_mode):
|
||||
VOA_MARGIN = 1 #do not maximize the VOA optimization
|
||||
if amp.out_voa is None:
|
||||
if power_mode:
|
||||
gain_target = amp.effective_gain
|
||||
voa = min(amp.params.p_max-power_target,
|
||||
amp.params.gain_flatmax-amp.effective_gain)
|
||||
voa = max(round2float(max(voa, 0), 0.5) - VOA_MARGIN, 0) if amp.params.out_voa_auto else 0
|
||||
amp.delta_p = amp.delta_p + voa
|
||||
amp.effective_gain = amp.effective_gain + voa
|
||||
else:
|
||||
voa = 0 # no output voa optimization in gain mode
|
||||
amp.out_voa = voa
|
||||
|
||||
def set_egress_amplifier(network, roadm, equipment, pref_total_db):
|
||||
power_mode = equipment['Span']['default'].power_mode
|
||||
next_oms = (n for n in network.successors(roadm) if not isinstance(n, Transceiver))
|
||||
for oms in next_oms:
|
||||
#go through all the OMS departing from the Roadm
|
||||
node = roadm
|
||||
prev_node = roadm
|
||||
next_node = oms
|
||||
# if isinstance(next_node, Fused): #support ROADM wo egress amp for metro applications
|
||||
# node = find_last_node(next_node)
|
||||
# next_node = next(n for n in network.successors(node))
|
||||
# next_node = find_last_node(next_node)
|
||||
|
||||
if node.per_degree_target_pch_out_db:
|
||||
# find the target power on this degree
|
||||
try:
|
||||
prev_dp = next(el["target_pch_out_db"] for el in \
|
||||
node.per_degree_target_pch_out_db if el["to_node"]==next_node.uid)
|
||||
except StopIteration:
|
||||
# if no target power is defined on this degree use the global one
|
||||
prev_dp = getattr(node.params, 'target_pch_out_db', 0)
|
||||
else:
|
||||
# if no per degree target power is given use the global one
|
||||
prev_dp = getattr(node.params, 'target_pch_out_db', 0)
|
||||
dp = prev_dp
|
||||
prev_voa = 0
|
||||
voa = 0
|
||||
while True:
|
||||
#go through all nodes in the OMS (loop until next Roadm instance)
|
||||
if isinstance(node, Edfa):
|
||||
node_loss = span_loss(network, prev_node)
|
||||
voa = node.out_voa if node.out_voa else 0
|
||||
if node.delta_p is None:
|
||||
dp = target_power(network, next_node, equipment)
|
||||
else:
|
||||
dp = node.delta_p
|
||||
gain_from_dp = node_loss + dp - prev_dp + prev_voa
|
||||
if node.effective_gain is None or power_mode:
|
||||
gain_target = gain_from_dp
|
||||
else: #gain mode with effective_gain
|
||||
gain_target = node.effective_gain
|
||||
dp = prev_dp - node_loss + gain_target
|
||||
|
||||
power_target = pref_total_db + dp
|
||||
|
||||
raman_allowed = False
|
||||
if isinstance(prev_node, Fiber):
|
||||
max_fiber_lineic_loss_for_raman = \
|
||||
equipment['Span']['default'].max_fiber_lineic_loss_for_raman
|
||||
raman_allowed = prev_node.params.loss_coef < max_fiber_lineic_loss_for_raman
|
||||
|
||||
# implementation of restrictions on roadm boosters
|
||||
if isinstance(prev_node,Roadm):
|
||||
if prev_node.restrictions['booster_variety_list']:
|
||||
restrictions = prev_node.restrictions['booster_variety_list']
|
||||
else:
|
||||
restrictions = None
|
||||
elif isinstance(next_node,Roadm):
|
||||
# implementation of restrictions on roadm preamp
|
||||
if next_node.restrictions['preamp_variety_list']:
|
||||
restrictions = next_node.restrictions['preamp_variety_list']
|
||||
else:
|
||||
restrictions = None
|
||||
else:
|
||||
restrictions = None
|
||||
|
||||
if node.params.type_variety == '':
|
||||
edfa_variety, power_reduction = select_edfa(raman_allowed,
|
||||
gain_target, power_target, equipment, node.uid, restrictions)
|
||||
extra_params = equipment['Edfa'][edfa_variety]
|
||||
node.params.update_params(extra_params.__dict__)
|
||||
dp += power_reduction
|
||||
gain_target += power_reduction
|
||||
elif node.params.raman and not raman_allowed:
|
||||
print(
|
||||
f'\x1b[1;31;40m'\
|
||||
+ f'WARNING: raman is used in node {node.uid}\n \
|
||||
but fiber lineic loss is above threshold\n'\
|
||||
+ '\x1b[0m'
|
||||
)
|
||||
|
||||
node.delta_p = dp if power_mode else None
|
||||
node.effective_gain = gain_target
|
||||
set_amplifier_voa(node, power_target, power_mode)
|
||||
if isinstance(next_node, Roadm) or isinstance(next_node, Transceiver):
|
||||
break
|
||||
prev_dp = dp
|
||||
prev_voa = voa
|
||||
prev_node = node
|
||||
node = next_node
|
||||
# print(f'{node.uid}')
|
||||
next_node = next(n for n in network.successors(node))
|
||||
|
||||
|
||||
def add_egress_amplifier(network, node):
|
||||
next_nodes = [n for n in network.successors(node)
|
||||
if not (isinstance(n, Transceiver) or isinstance(n, Fused) or isinstance(n, Edfa))]
|
||||
#no amplification for fused spans or TRX
|
||||
for i, next_node in enumerate(next_nodes):
|
||||
network.remove_edge(node, next_node)
|
||||
amp = Edfa(
|
||||
uid = f'Edfa{i}_{node.uid}',
|
||||
params = {},
|
||||
metadata = {
|
||||
'location': {
|
||||
'latitude': (node.lat * 2 + next_node.lat * 2) / 4,
|
||||
'longitude': (node.lng * 2 + next_node.lng * 2) / 4,
|
||||
'city': node.loc.city,
|
||||
'region': node.loc.region,
|
||||
}
|
||||
},
|
||||
operational = {
|
||||
'gain_target': None,
|
||||
'tilt_target': 0,
|
||||
})
|
||||
network.add_node(amp)
|
||||
if isinstance(node,Fiber):
|
||||
edgeweight = node.params.length
|
||||
else:
|
||||
edgeweight = 0.01
|
||||
network.add_edge(node, amp, weight = edgeweight)
|
||||
network.add_edge(amp, next_node, weight = 0.01)
|
||||
|
||||
|
||||
def calculate_new_length(fiber_length, bounds, target_length):
|
||||
if fiber_length < bounds.stop:
|
||||
return fiber_length, 1
|
||||
|
||||
n_spans = int(fiber_length // target_length)
|
||||
|
||||
length1 = fiber_length / (n_spans+1)
|
||||
delta1 = target_length-length1
|
||||
result1 = (length1, n_spans+1)
|
||||
|
||||
length2 = fiber_length / n_spans
|
||||
delta2 = length2-target_length
|
||||
result2 = (length2, n_spans)
|
||||
|
||||
if (bounds.start<=length1<=bounds.stop) and not(bounds.start<=length2<=bounds.stop):
|
||||
result = result1
|
||||
elif (bounds.start<=length2<=bounds.stop) and not(bounds.start<=length1<=bounds.stop):
|
||||
result = result2
|
||||
else:
|
||||
result = result1 if delta1 < delta2 else result2
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def split_fiber(network, fiber, bounds, target_length, equipment):
|
||||
new_length, n_spans = calculate_new_length(fiber.length, bounds, target_length)
|
||||
if n_spans == 1:
|
||||
return
|
||||
|
||||
try:
|
||||
next_node = next(network.successors(fiber))
|
||||
prev_node = next(network.predecessors(fiber))
|
||||
except StopIteration:
|
||||
raise NetworkTopologyError(f'Fiber {fiber.uid} is not properly connected, please check network topology')
|
||||
|
||||
network.remove_node(fiber)
|
||||
|
||||
fiber_params = fiber.params._asdict()
|
||||
fiber_params['length'] = new_length / UNITS[fiber.params.length_units]
|
||||
fiber_params['con_in'] = fiber.con_in
|
||||
fiber_params['con_out'] = fiber.con_out
|
||||
|
||||
f = interp1d([prev_node.lng, next_node.lng], [prev_node.lat, next_node.lat])
|
||||
xpos = [prev_node.lng + (next_node.lng - prev_node.lng) * (n+1)/(n_spans+1) for n in range(n_spans)]
|
||||
ypos = f(xpos)
|
||||
for span, lng, lat in zip(range(n_spans), xpos, ypos):
|
||||
new_span = Fiber(uid = f'{fiber.uid}_({span+1}/{n_spans})',
|
||||
metadata = {
|
||||
'location': {
|
||||
'latitude': lat,
|
||||
'longitude': lng,
|
||||
'city': fiber.loc.city,
|
||||
'region': fiber.loc.region,
|
||||
}
|
||||
},
|
||||
params = fiber_params)
|
||||
if isinstance(prev_node,Fiber):
|
||||
edgeweight = prev_node.params.length
|
||||
else:
|
||||
edgeweight = 0.01
|
||||
network.add_edge(prev_node, new_span, weight = edgeweight)
|
||||
prev_node = new_span
|
||||
if isinstance(prev_node,Fiber):
|
||||
edgeweight = prev_node.params.length
|
||||
else:
|
||||
edgeweight = 0.01
|
||||
network.add_edge(prev_node, next_node, weight = edgeweight)
|
||||
|
||||
def add_connector_loss(network, fibers, default_con_in, default_con_out, EOL):
|
||||
for fiber in fibers:
|
||||
if fiber.con_in is None: fiber.con_in = default_con_in
|
||||
if fiber.con_out is None: fiber.con_out = default_con_out
|
||||
next_node = next(n for n in network.successors(fiber))
|
||||
if not isinstance(next_node, Fused):
|
||||
fiber.con_out += EOL
|
||||
|
||||
def add_fiber_padding(network, fibers, padding):
|
||||
"""last_fibers = (fiber for n in network.nodes()
|
||||
if not (isinstance(n, Fiber) or isinstance(n, Fused))
|
||||
for fiber in network.predecessors(n)
|
||||
if isinstance(fiber, Fiber))"""
|
||||
for fiber in fibers:
|
||||
this_span_loss = span_loss(network, fiber)
|
||||
try:
|
||||
next_node = next(network.successors(fiber))
|
||||
except StopIteration:
|
||||
raise NetworkTopologyError(f'Fiber {fiber.uid} is not properly connected, please check network topology')
|
||||
if this_span_loss < padding and not (isinstance(next_node, Fused)):
|
||||
#add a padding att_in at the input of the 1st fiber:
|
||||
#address the case when several fibers are spliced together
|
||||
first_fiber = find_first_node(network, fiber)
|
||||
# in order to support no booster , fused might be placed
|
||||
# just after a roadm: need to check that first_fiber is really a fiber
|
||||
if isinstance(first_fiber,Fiber):
|
||||
if first_fiber.att_in is None:
|
||||
first_fiber.att_in = padding - this_span_loss
|
||||
else:
|
||||
first_fiber.att_in = first_fiber.att_in + padding - this_span_loss
|
||||
|
||||
def build_network(network, equipment, pref_ch_db, pref_total_db):
|
||||
default_span_data = equipment['Span']['default']
|
||||
max_length = int(default_span_data.max_length * UNITS[default_span_data.length_units])
|
||||
min_length = max(int(default_span_data.padding/0.2*1e3),50_000)
|
||||
bounds = range(min_length, max_length)
|
||||
target_length = max(min_length, 90_000)
|
||||
default_con_in = default_span_data.con_in
|
||||
default_con_out = default_span_data.con_out
|
||||
padding = default_span_data.padding
|
||||
|
||||
#set roadm loss for gain_mode before to build network
|
||||
fibers = [f for f in network.nodes() if isinstance(f, Fiber)]
|
||||
add_connector_loss(network, fibers, default_con_in, default_con_out, default_span_data.EOL)
|
||||
add_fiber_padding(network, fibers, padding)
|
||||
# don't group split fiber and add amp in the same loop
|
||||
# =>for code clarity (at the expense of speed):
|
||||
for fiber in fibers:
|
||||
split_fiber(network, fiber, bounds, target_length, equipment)
|
||||
|
||||
amplified_nodes = [n for n in network.nodes()
|
||||
if isinstance(n, Fiber) or isinstance(n, Roadm)]
|
||||
|
||||
for node in amplified_nodes:
|
||||
add_egress_amplifier(network, node)
|
||||
|
||||
roadms = [r for r in network.nodes() if isinstance(r, Roadm)]
|
||||
for roadm in roadms:
|
||||
set_egress_amplifier(network, roadm, equipment, pref_total_db)
|
||||
|
||||
#support older json input topology wo Roadms:
|
||||
if len(roadms) == 0:
|
||||
trx = [t for t in network.nodes() if isinstance(t, Transceiver)]
|
||||
for t in trx:
|
||||
set_egress_amplifier(network, t, equipment, pref_total_db)
|
||||
|
||||
def load_sim_params(filename):
|
||||
sim_params = load_json(filename)
|
||||
return SimParams(params=sim_params)
|
||||
|
||||
def configure_network(network, sim_params):
|
||||
for node in network.nodes:
|
||||
if isinstance(node, RamanFiber):
|
||||
node.sim_params = sim_params
|
||||
56
gnpy/core/node.py
Normal file
56
gnpy/core/node.py
Normal file
@@ -0,0 +1,56 @@
|
||||
#! /bin/usr/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.node
|
||||
==============
|
||||
|
||||
This module contains the base class for a network element.
|
||||
|
||||
Strictly, a network element is any callable which accepts an immutable
|
||||
:class:`.info.SpectralInformation` object and returns an :class:`.info.SpectralInformation` object
|
||||
(a copy).
|
||||
|
||||
Network elements MUST implement two attributes .uid and .name representing a
|
||||
unique identifier and a printable name.
|
||||
|
||||
This base class provides a more convenient way to define a network element
|
||||
via subclassing.
|
||||
'''
|
||||
|
||||
from uuid import uuid4
|
||||
from collections import namedtuple
|
||||
|
||||
class Location(namedtuple('Location', 'latitude longitude city region')):
|
||||
def __new__(cls, latitude=0, longitude=0, city=None, region=None):
|
||||
return super().__new__(cls, latitude, longitude, city, region)
|
||||
|
||||
class Node:
|
||||
def __init__(self, uid, name=None, params=None, metadata=None, operational=None):
|
||||
if name is None:
|
||||
name = uid
|
||||
self.uid, self.name = uid, name
|
||||
if metadata is None:
|
||||
metadata = {'location': {}}
|
||||
if metadata and not isinstance(metadata.get('location'), Location):
|
||||
metadata['location'] = Location(**metadata.pop('location', {}))
|
||||
self.params, self.metadata, self.operational = params, metadata, operational
|
||||
|
||||
@property
|
||||
def coords(self):
|
||||
return self.lng, self.lat
|
||||
|
||||
@property
|
||||
def location(self):
|
||||
return self.metadata['location']
|
||||
loc = location
|
||||
|
||||
@property
|
||||
def longitude(self):
|
||||
return self.location.longitude
|
||||
lng = longitude
|
||||
|
||||
@property
|
||||
def latitude(self):
|
||||
return self.location.latitude
|
||||
lat = latitude
|
||||
1097
gnpy/core/request.py
Normal file
1097
gnpy/core/request.py
Normal file
File diff suppressed because it is too large
Load Diff
820
gnpy/core/science_utils.py
Normal file
820
gnpy/core/science_utils.py
Normal file
@@ -0,0 +1,820 @@
|
||||
import numpy as np
|
||||
from operator import attrgetter
|
||||
from collections import namedtuple
|
||||
from logging import getLogger
|
||||
import scipy.constants as ph
|
||||
from scipy.integrate import solve_bvp
|
||||
from scipy.integrate import cumtrapz
|
||||
from scipy.interpolate import interp1d
|
||||
from scipy.optimize import OptimizeResult
|
||||
|
||||
from gnpy.core.utils import db2lin
|
||||
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
|
||||
class RamanParams():
|
||||
def __init__(self, params):
|
||||
self._flag_raman = params['flag_raman']
|
||||
self._space_resolution = params['space_resolution']
|
||||
self._tolerance = params['tolerance']
|
||||
|
||||
@property
|
||||
def flag_raman(self):
|
||||
return self._flag_raman
|
||||
|
||||
@property
|
||||
def space_resolution(self):
|
||||
return self._space_resolution
|
||||
|
||||
@property
|
||||
def tolerance(self):
|
||||
return self._tolerance
|
||||
|
||||
class NLIParams():
|
||||
def __init__(self, params):
|
||||
self._nli_method_name = params['nli_method_name']
|
||||
self._wdm_grid_size = params['wdm_grid_size']
|
||||
self._dispersion_tolerance = params['dispersion_tolerance']
|
||||
self._phase_shift_tollerance = params['phase_shift_tollerance']
|
||||
self._f_cut_resolution = None
|
||||
self._f_pump_resolution = None
|
||||
|
||||
@property
|
||||
def nli_method_name(self):
|
||||
return self._nli_method_name
|
||||
|
||||
@property
|
||||
def wdm_grid_size(self):
|
||||
return self._wdm_grid_size
|
||||
|
||||
@property
|
||||
def dispersion_tolerance(self):
|
||||
return self._dispersion_tolerance
|
||||
|
||||
@property
|
||||
def phase_shift_tollerance(self):
|
||||
return self._phase_shift_tollerance
|
||||
|
||||
@property
|
||||
def f_cut_resolution(self):
|
||||
return self._f_cut_resolution
|
||||
|
||||
@f_cut_resolution.setter
|
||||
def f_cut_resolution(self, f_cut_resolution):
|
||||
self._f_cut_resolution = f_cut_resolution
|
||||
|
||||
@property
|
||||
def f_pump_resolution(self):
|
||||
return self._f_pump_resolution
|
||||
|
||||
@f_pump_resolution.setter
|
||||
def f_pump_resolution(self, f_pump_resolution):
|
||||
self._f_pump_resolution = f_pump_resolution
|
||||
|
||||
class SimParams():
|
||||
def __init__(self, params):
|
||||
self._raman_computed_channels = params['raman_computed_channels']
|
||||
self._raman_params = RamanParams(params=params['raman_parameters'])
|
||||
self._nli_params = NLIParams(params=params['nli_parameters'])
|
||||
|
||||
@property
|
||||
def raman_computed_channels(self):
|
||||
return self._raman_computed_channels
|
||||
|
||||
@property
|
||||
def raman_params(self):
|
||||
return self._raman_params
|
||||
|
||||
@property
|
||||
def nli_params(self):
|
||||
return self._nli_params
|
||||
|
||||
class FiberParams():
|
||||
def __init__(self, fiber):
|
||||
self._loss_coef = 2 * fiber.dbkm_2_lin()[1]
|
||||
self._length = fiber.length
|
||||
self._gamma = fiber.gamma
|
||||
self._beta2 = fiber.beta2()
|
||||
self._beta3 = fiber.beta3 if hasattr(fiber, 'beta3') else 0
|
||||
self._f_ref_beta = fiber.f_ref_beta if hasattr(fiber, 'f_ref_beta') else 0
|
||||
self._raman_efficiency = fiber.params.raman_efficiency
|
||||
self._temperature = fiber.operational['temperature']
|
||||
|
||||
@property
|
||||
def loss_coef(self):
|
||||
return self._loss_coef
|
||||
|
||||
@property
|
||||
def length(self):
|
||||
return self._length
|
||||
|
||||
@property
|
||||
def gamma(self):
|
||||
return self._gamma
|
||||
|
||||
@property
|
||||
def beta2(self):
|
||||
return self._beta2
|
||||
|
||||
@property
|
||||
def beta3(self):
|
||||
return self._beta3
|
||||
|
||||
@property
|
||||
def f_ref_beta(self):
|
||||
return self._f_ref_beta
|
||||
|
||||
@property
|
||||
def raman_efficiency(self):
|
||||
return self._raman_efficiency
|
||||
|
||||
@property
|
||||
def temperature(self):
|
||||
return self._temperature
|
||||
|
||||
def alpha0(self, f_ref=193.5e12):
|
||||
""" It returns the zero element of the series expansion of attenuation coefficient alpha(f) in the
|
||||
reference frequency f_ref
|
||||
|
||||
:param f_ref: reference frequency of series expansion [Hz]
|
||||
:return: alpha0: power attenuation coefficient in f_ref [Neper/m]
|
||||
"""
|
||||
if not hasattr(self.loss_coef, 'alpha_power'):
|
||||
alpha0 = self.loss_coef
|
||||
else:
|
||||
alpha_interp = interp1d(self.loss_coef['frequency'],
|
||||
self.loss_coef['alpha_power'])
|
||||
alpha0 = alpha_interp(f_ref)
|
||||
return alpha0
|
||||
|
||||
pump = namedtuple('RamanPump', 'power frequency propagation_direction')
|
||||
|
||||
def propagate_raman_fiber(fiber, *carriers):
|
||||
sim_params = fiber.sim_params
|
||||
raman_params = fiber.sim_params.raman_params
|
||||
nli_params = fiber.sim_params.nli_params
|
||||
# apply input attenuation to carriers
|
||||
attenuation_in = db2lin(fiber.con_in + fiber.att_in)
|
||||
chan = []
|
||||
for carrier in carriers:
|
||||
pwr = carrier.power
|
||||
pwr = pwr._replace(signal=pwr.signal / attenuation_in,
|
||||
nli=pwr.nli / attenuation_in,
|
||||
ase=pwr.ase / attenuation_in)
|
||||
carrier = carrier._replace(power=pwr)
|
||||
chan.append(carrier)
|
||||
carriers = tuple(f for f in chan)
|
||||
fiber_params = FiberParams(fiber)
|
||||
|
||||
# evaluate fiber attenuation involving also SRS if required by sim_params
|
||||
if 'raman_pumps' in fiber.operational:
|
||||
raman_pumps = tuple(pump(p['power'], p['frequency'], p['propagation_direction'])
|
||||
for p in fiber.operational['raman_pumps'])
|
||||
else:
|
||||
raman_pumps = None
|
||||
raman_solver = RamanSolver(raman_params=raman_params, fiber_params=fiber_params)
|
||||
stimulated_raman_scattering = raman_solver.stimulated_raman_scattering(carriers=carriers,
|
||||
raman_pumps=raman_pumps)
|
||||
fiber_attenuation = (stimulated_raman_scattering.rho[:, -1])**-2
|
||||
if not raman_params.flag_raman:
|
||||
fiber_attenuation = tuple(fiber.lin_attenuation for _ in carriers)
|
||||
|
||||
# evaluate Raman ASE noise if required by sim_params and if raman pumps are present
|
||||
if raman_params.flag_raman and raman_pumps:
|
||||
raman_ase = raman_solver.spontaneous_raman_scattering.power[:, -1]
|
||||
else:
|
||||
raman_ase = tuple(0 for _ in carriers)
|
||||
|
||||
# evaluate nli and propagate in fiber
|
||||
attenuation_out = db2lin(fiber.con_out)
|
||||
nli_solver = NliSolver(nli_params=nli_params, fiber_params=fiber_params)
|
||||
nli_solver.stimulated_raman_scattering = stimulated_raman_scattering
|
||||
|
||||
nli_frequencies = []
|
||||
computed_nli = []
|
||||
for carrier in (c for c in carriers if c.channel_number in sim_params.raman_computed_channels):
|
||||
resolution_param = frequency_resolution(carrier, carriers, sim_params, fiber_params)
|
||||
f_cut_resolution, f_pump_resolution, _, _ = resolution_param
|
||||
nli_params.f_cut_resolution = f_cut_resolution
|
||||
nli_params.f_pump_resolution = f_pump_resolution
|
||||
nli_frequencies.append(carrier.frequency)
|
||||
computed_nli.append(nli_solver.compute_nli(carrier, *carriers))
|
||||
|
||||
new_carriers = []
|
||||
for carrier, attenuation, rmn_ase in zip(carriers, fiber_attenuation, raman_ase):
|
||||
carrier_nli = np.interp(carrier.frequency, nli_frequencies, computed_nli)
|
||||
pwr = carrier.power
|
||||
pwr = pwr._replace(signal=pwr.signal/attenuation/attenuation_out,
|
||||
nli=(pwr.nli+carrier_nli)/attenuation/attenuation_out,
|
||||
ase=((pwr.ase/attenuation)+rmn_ase)/attenuation_out)
|
||||
new_carriers.append(carrier._replace(power=pwr))
|
||||
return new_carriers
|
||||
|
||||
def frequency_resolution(carrier, carriers, sim_params, fiber_params):
|
||||
def _get_freq_res_k_phi(delta_count, grid_size, alpha0, delta_z, beta2, k_tol, phi_tol):
|
||||
res_phi = _get_freq_res_phase_rotation(delta_count, grid_size, delta_z, beta2, phi_tol)
|
||||
res_k = _get_freq_res_dispersion_attenuation(delta_count, grid_size, alpha0, beta2, k_tol)
|
||||
res_dict = {'res_phi': res_phi, 'res_k': res_k}
|
||||
method = min(res_dict, key=res_dict.get)
|
||||
return res_dict[method], method, res_dict
|
||||
|
||||
def _get_freq_res_dispersion_attenuation(delta_count, grid_size, alpha0, beta2, k_tol):
|
||||
return k_tol * abs(alpha0) / abs(beta2) / (1 + delta_count) / (4 * np.pi ** 2 * grid_size)
|
||||
|
||||
def _get_freq_res_phase_rotation(delta_count, grid_size, delta_z, beta2, phi_tol):
|
||||
return phi_tol / abs(beta2) / (1 + delta_count) / delta_z / (4 * np.pi ** 2 * grid_size)
|
||||
|
||||
grid_size = sim_params.nli_params.wdm_grid_size
|
||||
delta_z = sim_params.raman_params.space_resolution
|
||||
alpha0 = fiber_params.alpha0()
|
||||
beta2 = fiber_params.beta2
|
||||
k_tol = sim_params.nli_params.dispersion_tolerance
|
||||
phi_tol = sim_params.nli_params.phase_shift_tollerance
|
||||
f_pump_resolution, method_f_pump, res_dict_pump = \
|
||||
_get_freq_res_k_phi(0, grid_size, alpha0, delta_z, beta2, k_tol, phi_tol)
|
||||
f_cut_resolution = {}
|
||||
method_f_cut = {}
|
||||
res_dict_cut = {}
|
||||
for cut_carrier in carriers:
|
||||
delta_number = cut_carrier.channel_number - carrier.channel_number
|
||||
delta_count = abs(delta_number)
|
||||
f_res, method, res_dict = \
|
||||
_get_freq_res_k_phi(delta_count, grid_size, alpha0, delta_z, beta2, k_tol, phi_tol)
|
||||
f_cut_resolution[f'delta_{delta_number}'] = f_res
|
||||
method_f_cut[delta_number] = method
|
||||
res_dict_cut[delta_number] = res_dict
|
||||
return [f_cut_resolution, f_pump_resolution, (method_f_cut, method_f_pump), (res_dict_cut, res_dict_pump)]
|
||||
|
||||
def raised_cosine_comb(f, *carriers):
|
||||
""" Returns an array storing the PSD of a WDM comb of raised cosine shaped
|
||||
channels at the input frequencies defined in array f
|
||||
:param f: numpy array of frequencies in Hz
|
||||
:param carriers: namedtuple describing the WDM comb
|
||||
:return: PSD of the WDM comb evaluated over f
|
||||
"""
|
||||
psd = np.zeros(np.shape(f))
|
||||
for carrier in carriers:
|
||||
f_nch = carrier.frequency
|
||||
g_ch = carrier.power.signal / carrier.baud_rate
|
||||
ts = 1 / carrier.baud_rate
|
||||
passband = (1 - carrier.roll_off) / (2 / carrier.baud_rate)
|
||||
stopband = (1 + carrier.roll_off) / (2 / carrier.baud_rate)
|
||||
ff = np.abs(f - f_nch)
|
||||
tf = ff - passband
|
||||
if carrier.roll_off == 0:
|
||||
psd = np.where(tf <= 0, g_ch, 0.) + psd
|
||||
else:
|
||||
psd = g_ch * (np.where(tf <= 0, 1., 0.) + 1 / 2 * (1 + np.cos(np.pi * ts / carrier.roll_off * tf)) *
|
||||
np.where(tf > 0, 1., 0.) * np.where(np.abs(ff) <= stopband, 1., 0.)) + psd
|
||||
return psd
|
||||
|
||||
class RamanSolver:
|
||||
def __init__(self, raman_params=None, fiber_params=None):
|
||||
""" Initialize the fiber object with its physical parameters
|
||||
:param length: fiber length in m.
|
||||
:param alphap: fiber power attenuation coefficient vs frequency in 1/m. numpy array
|
||||
:param freq_alpha: frequency axis of alphap in Hz. numpy array
|
||||
:param cr_raman: Raman efficiency vs frequency offset in 1/W/m. numpy array
|
||||
:param freq_cr: reference frequency offset axis for cr_raman. numpy array
|
||||
:param raman_params: namedtuple containing the solver parameters (optional).
|
||||
"""
|
||||
self.fiber_params = fiber_params
|
||||
self.raman_params = raman_params
|
||||
self._carriers = None
|
||||
self._stimulated_raman_scattering = None
|
||||
self._spontaneous_raman_scattering = None
|
||||
|
||||
@property
|
||||
def fiber_params(self):
|
||||
return self._fiber_params
|
||||
|
||||
@fiber_params.setter
|
||||
def fiber_params(self, fiber_params):
|
||||
self._stimulated_raman_scattering = None
|
||||
self._fiber_params = fiber_params
|
||||
|
||||
@property
|
||||
def carriers(self):
|
||||
return self._carriers
|
||||
|
||||
@carriers.setter
|
||||
def carriers(self, carriers):
|
||||
"""
|
||||
:param carriers: tuple of namedtuples containing information about carriers
|
||||
:return:
|
||||
"""
|
||||
self._carriers = carriers
|
||||
self._stimulated_raman_scattering = None
|
||||
|
||||
@property
|
||||
def raman_pumps(self):
|
||||
return self._raman_pumps
|
||||
|
||||
@raman_pumps.setter
|
||||
def raman_pumps(self, raman_pumps):
|
||||
self._raman_pumps = raman_pumps
|
||||
self._stimulated_raman_scattering = None
|
||||
|
||||
@property
|
||||
def raman_params(self):
|
||||
return self._raman_params
|
||||
|
||||
@raman_params.setter
|
||||
def raman_params(self, raman_params):
|
||||
"""
|
||||
:param raman_params: namedtuple containing the solver parameters (optional).
|
||||
:return:
|
||||
"""
|
||||
self._raman_params = raman_params
|
||||
self._stimulated_raman_scattering = None
|
||||
self._spontaneous_raman_scattering = None
|
||||
|
||||
@property
|
||||
def spontaneous_raman_scattering(self):
|
||||
if self._spontaneous_raman_scattering is None:
|
||||
# SET STUFF
|
||||
loss_coef = self.fiber_params.loss_coef
|
||||
raman_efficiency = self.fiber_params.raman_efficiency
|
||||
temperature = self.fiber_params.temperature
|
||||
carriers = self.carriers
|
||||
raman_pumps = self.raman_pumps
|
||||
|
||||
logger.debug('Start computing fiber Spontaneous Raman Scattering')
|
||||
power_spectrum, freq_array, prop_direct, bn_array = self._compute_power_spectrum(carriers, raman_pumps)
|
||||
|
||||
if not hasattr(loss_coef, 'alpha_power'):
|
||||
alphap_fiber = loss_coef * np.ones(freq_array.shape)
|
||||
else:
|
||||
interp_alphap = interp1d(loss_coef['frequency'], loss_coef['alpha_power'])
|
||||
alphap_fiber = interp_alphap(freq_array)
|
||||
|
||||
freq_diff = abs(freq_array - np.reshape(freq_array, (len(freq_array), 1)))
|
||||
interp_cr = interp1d(raman_efficiency['frequency_offset'], raman_efficiency['cr'])
|
||||
cr = interp_cr(freq_diff)
|
||||
|
||||
# z propagation axis
|
||||
z_array = self._stimulated_raman_scattering.z
|
||||
ase_bc = np.zeros(freq_array.shape)
|
||||
|
||||
# calculate ase power
|
||||
spontaneous_raman_scattering = self._int_spontaneous_raman(z_array, self._stimulated_raman_scattering.power,
|
||||
alphap_fiber, freq_array, cr, freq_diff, ase_bc,
|
||||
bn_array, temperature)
|
||||
|
||||
setattr(spontaneous_raman_scattering, 'frequency', freq_array)
|
||||
setattr(spontaneous_raman_scattering, 'z', z_array)
|
||||
setattr(spontaneous_raman_scattering, 'power', spontaneous_raman_scattering.x)
|
||||
delattr(spontaneous_raman_scattering, 'x')
|
||||
|
||||
logger.debug(spontaneous_raman_scattering.message)
|
||||
|
||||
self._spontaneous_raman_scattering = spontaneous_raman_scattering
|
||||
|
||||
return self._spontaneous_raman_scattering
|
||||
|
||||
@staticmethod
|
||||
def _compute_power_spectrum(carriers, raman_pumps=None):
|
||||
"""
|
||||
Rearrangement of spectral and Raman pump information to make them compatible with Raman solver
|
||||
:param carriers: a tuple of namedtuples describing the transmitted channels
|
||||
:param raman_pumps: a namedtuple describing the Raman pumps
|
||||
:return:
|
||||
"""
|
||||
|
||||
# Signal power spectrum
|
||||
pow_array = np.array([])
|
||||
f_array = np.array([])
|
||||
noise_bandwidth_array = np.array([])
|
||||
for carrier in sorted(carriers, key=attrgetter('frequency')):
|
||||
f_array = np.append(f_array, carrier.frequency)
|
||||
pow_array = np.append(pow_array, carrier.power.signal)
|
||||
ref_bw = carrier.baud_rate
|
||||
noise_bandwidth_array = np.append(noise_bandwidth_array, ref_bw)
|
||||
|
||||
propagation_direction = np.ones(len(f_array))
|
||||
|
||||
# Raman pump power spectrum
|
||||
if raman_pumps:
|
||||
for pump in raman_pumps:
|
||||
pow_array = np.append(pow_array, pump.power)
|
||||
f_array = np.append(f_array, pump.frequency)
|
||||
direction = +1 if pump.propagation_direction.lower() == 'coprop' else -1
|
||||
propagation_direction = np.append(propagation_direction, direction)
|
||||
noise_bandwidth_array = np.append(noise_bandwidth_array, ref_bw)
|
||||
|
||||
# Final sorting
|
||||
ind = np.argsort(f_array)
|
||||
f_array = f_array[ind]
|
||||
pow_array = pow_array[ind]
|
||||
propagation_direction = propagation_direction[ind]
|
||||
|
||||
return pow_array, f_array, propagation_direction, noise_bandwidth_array
|
||||
|
||||
def _int_spontaneous_raman(self, z_array, raman_matrix, alphap_fiber, freq_array, cr_raman_matrix, freq_diff, ase_bc, bn_array, temperature):
|
||||
spontaneous_raman_scattering = OptimizeResult()
|
||||
|
||||
dx = self.raman_params.space_resolution
|
||||
h = ph.value('Planck constant')
|
||||
kb = ph.value('Boltzmann constant')
|
||||
|
||||
power_ase = np.nan * np.ones(raman_matrix.shape)
|
||||
int_pump = cumtrapz(raman_matrix, z_array, dx=dx, axis=1, initial=0)
|
||||
|
||||
for f_ind, f_ase in enumerate(freq_array):
|
||||
cr_raman = cr_raman_matrix[f_ind, :]
|
||||
vibrational_loss = f_ase / freq_array[:f_ind]
|
||||
eta = 1/(np.exp((h*freq_diff[f_ind, f_ind+1:])/(kb*temperature)) - 1)
|
||||
|
||||
int_fiber_loss = -alphap_fiber[f_ind] * z_array
|
||||
int_raman_loss = np.sum((cr_raman[:f_ind] * vibrational_loss * int_pump[:f_ind, :].transpose()).transpose(), axis=0)
|
||||
int_raman_gain = np.sum((cr_raman[f_ind + 1:] * int_pump[f_ind + 1:, :].transpose()).transpose(), axis=0)
|
||||
|
||||
int_gain_loss = int_fiber_loss + int_raman_gain + int_raman_loss
|
||||
|
||||
new_ase = np.sum((cr_raman[f_ind+1:] * (1 + eta) * raman_matrix[f_ind+1:, :].transpose()).transpose() * h * f_ase * bn_array[f_ind], axis=0)
|
||||
|
||||
bc_evolution = ase_bc[f_ind] * np.exp(int_gain_loss)
|
||||
ase_evolution = np.exp(int_gain_loss) * cumtrapz(new_ase*np.exp(-int_gain_loss), z_array, dx=dx, initial=0)
|
||||
|
||||
power_ase[f_ind, :] = bc_evolution + ase_evolution
|
||||
|
||||
spontaneous_raman_scattering.x = 2 * power_ase
|
||||
spontaneous_raman_scattering.success = True
|
||||
spontaneous_raman_scattering.message = "Spontaneous Raman Scattering evaluated successfully"
|
||||
|
||||
return spontaneous_raman_scattering
|
||||
|
||||
def stimulated_raman_scattering(self, carriers, raman_pumps=None):
|
||||
""" Returns stimulated Raman scattering solution including
|
||||
fiber gain/loss profile.
|
||||
:return: self._stimulated_raman_scattering: the SRS problem solution.
|
||||
scipy.interpolate.PPoly instance
|
||||
"""
|
||||
|
||||
if self._stimulated_raman_scattering is None:
|
||||
# fiber parameters
|
||||
fiber_length = self.fiber_params.length
|
||||
loss_coef = self.fiber_params.loss_coef
|
||||
if self.raman_params.flag_raman:
|
||||
raman_efficiency = self.fiber_params.raman_efficiency
|
||||
else:
|
||||
raman_efficiency = self.fiber_params.raman_efficiency
|
||||
raman_efficiency['cr'] = np.array(raman_efficiency['cr']) * 0
|
||||
# raman solver parameters
|
||||
z_resolution = self.raman_params.space_resolution
|
||||
tolerance = self.raman_params.tolerance
|
||||
|
||||
logger.debug('Start computing fiber Stimulated Raman Scattering')
|
||||
|
||||
power_spectrum, freq_array, prop_direct, _ = self._compute_power_spectrum(carriers, raman_pumps)
|
||||
|
||||
if not hasattr(loss_coef, 'alpha_power'):
|
||||
alphap_fiber = loss_coef * np.ones(freq_array.shape)
|
||||
else:
|
||||
interp_alphap = interp1d(loss_coef['frequency'], loss_coef['alpha_power'])
|
||||
alphap_fiber = interp_alphap(freq_array)
|
||||
|
||||
freq_diff = abs(freq_array - np.reshape(freq_array, (len(freq_array), 1)))
|
||||
interp_cr = interp1d(raman_efficiency['frequency_offset'], raman_efficiency['cr'])
|
||||
cr = interp_cr(freq_diff)
|
||||
|
||||
# z propagation axis
|
||||
z = np.arange(0, fiber_length+1, z_resolution)
|
||||
|
||||
ode_function = lambda z, p: self._ode_stimulated_raman(z, p, alphap_fiber, freq_array, cr, prop_direct)
|
||||
boundary_residual = lambda ya, yb: self._residuals_stimulated_raman(ya, yb, power_spectrum, prop_direct)
|
||||
initial_guess_conditions = self._initial_guess_stimulated_raman(z, power_spectrum, alphap_fiber, prop_direct)
|
||||
|
||||
# ODE SOLVER
|
||||
stimulated_raman_scattering = solve_bvp(ode_function, boundary_residual, z, initial_guess_conditions, tol=tolerance)
|
||||
|
||||
rho = (stimulated_raman_scattering.y.transpose() / power_spectrum).transpose()
|
||||
rho = np.sqrt(rho) # From power attenuation to field attenuation
|
||||
setattr(stimulated_raman_scattering, 'frequency', freq_array)
|
||||
setattr(stimulated_raman_scattering, 'z', stimulated_raman_scattering.x)
|
||||
setattr(stimulated_raman_scattering, 'rho', rho)
|
||||
setattr(stimulated_raman_scattering, 'power', stimulated_raman_scattering.y)
|
||||
delattr(stimulated_raman_scattering, 'x')
|
||||
delattr(stimulated_raman_scattering, 'y')
|
||||
|
||||
self.carriers = carriers
|
||||
self.raman_pumps = raman_pumps
|
||||
self._stimulated_raman_scattering = stimulated_raman_scattering
|
||||
|
||||
return self._stimulated_raman_scattering
|
||||
|
||||
def _residuals_stimulated_raman(self, ya, yb, power_spectrum, prop_direct):
|
||||
|
||||
computed_boundary_value = np.zeros(ya.size)
|
||||
|
||||
for index, direction in enumerate(prop_direct):
|
||||
if direction == +1:
|
||||
computed_boundary_value[index] = ya[index]
|
||||
else:
|
||||
computed_boundary_value[index] = yb[index]
|
||||
|
||||
return power_spectrum - computed_boundary_value
|
||||
|
||||
def _initial_guess_stimulated_raman(self, z, power_spectrum, alphap_fiber, prop_direct):
|
||||
""" Computes the initial guess knowing the boundary conditions
|
||||
:param z: patial axis [m]. numpy array
|
||||
:param power_spectrum: power in each frequency slice [W]. Frequency axis is defined by freq_array. numpy array
|
||||
:param alphap_fiber: frequency dependent fiber attenuation of signal power [1/m]. Frequency defined by freq_array. numpy array
|
||||
:param prop_direct: indicates the propagation direction of each power slice in power_spectrum:
|
||||
+1 for forward propagation and -1 for backward propagation. Frequency defined by freq_array. numpy array
|
||||
:return: power_guess: guess on the initial conditions [W]. The first ndarray index identifies the frequency slice,
|
||||
the second ndarray index identifies the step in z. ndarray
|
||||
"""
|
||||
|
||||
power_guess = np.empty((power_spectrum.size, z.size))
|
||||
for f_index, power_slice in enumerate(power_spectrum):
|
||||
if prop_direct[f_index] == +1:
|
||||
power_guess[f_index, :] = np.exp(-alphap_fiber[f_index] * z) * power_slice
|
||||
else:
|
||||
power_guess[f_index, :] = np.exp(-alphap_fiber[f_index] * z[::-1]) * power_slice
|
||||
|
||||
return power_guess
|
||||
|
||||
def _ode_stimulated_raman(self, z, power_spectrum, alphap_fiber, freq_array, cr_raman_matrix, prop_direct):
|
||||
""" Aim of ode_raman is to implement the set of ordinary differential equations (ODEs) describing the Raman effect.
|
||||
:param z: spatial axis (unused).
|
||||
:param power_spectrum: power in each frequency slice [W]. Frequency axis is defined by freq_array. numpy array. Size n
|
||||
:param alphap_fiber: frequency dependent fiber attenuation of signal power [1/m]. Frequency defined by freq_array. numpy array. Size n
|
||||
:param freq_array: reference frequency axis [Hz]. numpy array. Size n
|
||||
:param cr_raman: Cr(f) Raman gain efficiency variation in frequency [1/W/m]. Frequency defined by freq_array. numpy ndarray. Size nxn
|
||||
:param prop_direct: indicates the propagation direction of each power slice in power_spectrum:
|
||||
+1 for forward propagation and -1 for backward propagation. Frequency defined by freq_array. numpy array. Size n
|
||||
:return: dP/dz: the power variation in dz [W/m]. numpy array. Size n
|
||||
"""
|
||||
|
||||
dpdz = np.nan * np.ones(power_spectrum.shape)
|
||||
for f_ind, power in enumerate(power_spectrum):
|
||||
cr_raman = cr_raman_matrix[f_ind, :]
|
||||
vibrational_loss = freq_array[f_ind] / freq_array[:f_ind]
|
||||
|
||||
for z_ind, power_sample in enumerate(power):
|
||||
raman_gain = np.sum(cr_raman[f_ind+1:] * power_spectrum[f_ind+1:, z_ind])
|
||||
raman_loss = np.sum(vibrational_loss * cr_raman[:f_ind] * power_spectrum[:f_ind, z_ind])
|
||||
|
||||
dpdz_element = prop_direct[f_ind] * (-alphap_fiber[f_ind] + raman_gain - raman_loss) * power_sample
|
||||
dpdz[f_ind][z_ind] = dpdz_element
|
||||
|
||||
return np.vstack(dpdz)
|
||||
|
||||
class NliSolver:
|
||||
""" This class implements the NLI models.
|
||||
Model and method can be specified in `self.nli_params.method`.
|
||||
List of implemented methods:
|
||||
'gn_model_analytic': brute force triple integral solution
|
||||
'ggn_spectrally_separated_xpm_spm': XPM plus SPM
|
||||
"""
|
||||
|
||||
def __init__(self, nli_params=None, fiber_params=None):
|
||||
""" Initialize the fiber object with its physical parameters
|
||||
"""
|
||||
self.fiber_params = fiber_params
|
||||
self.nli_params = nli_params
|
||||
self.stimulated_raman_scattering = None
|
||||
|
||||
@property
|
||||
def fiber_params(self):
|
||||
return self._fiber_params
|
||||
|
||||
@fiber_params.setter
|
||||
def fiber_params(self, fiber_params):
|
||||
self._fiber_params = fiber_params
|
||||
|
||||
@property
|
||||
def stimulated_raman_scattering(self):
|
||||
return self._stimulated_raman_scattering
|
||||
|
||||
@stimulated_raman_scattering.setter
|
||||
def stimulated_raman_scattering(self, stimulated_raman_scattering):
|
||||
self._stimulated_raman_scattering = stimulated_raman_scattering
|
||||
|
||||
@property
|
||||
def nli_params(self):
|
||||
return self._nli_params
|
||||
|
||||
@nli_params.setter
|
||||
def nli_params(self, nli_params):
|
||||
"""
|
||||
:param model_params: namedtuple containing the parameters used to compute the NLI.
|
||||
"""
|
||||
self._nli_params = nli_params
|
||||
|
||||
def compute_nli(self, carrier, *carriers):
|
||||
""" Compute NLI power generated by the WDM comb `*carriers` on the channel under test `carrier`
|
||||
at the end of the fiber span.
|
||||
"""
|
||||
if 'gn_model_analytic' == self.nli_params.nli_method_name.lower():
|
||||
carrier_nli = self._gn_analytic(carrier, *carriers)
|
||||
elif 'ggn_spectrally_separated' in self.nli_params.nli_method_name.lower():
|
||||
eta_matrix = self._compute_eta_matrix(carrier, *carriers)
|
||||
carrier_nli = self._carrier_nli_from_eta_matrix(eta_matrix, carrier, *carriers)
|
||||
else:
|
||||
raise ValueError(f'Method {self.nli_params.method_nli} not implemented.')
|
||||
|
||||
return carrier_nli
|
||||
|
||||
@staticmethod
|
||||
def _carrier_nli_from_eta_matrix(eta_matrix, carrier, *carriers):
|
||||
carrier_nli = 0
|
||||
for pump_carrier_1 in carriers:
|
||||
for pump_carrier_2 in carriers:
|
||||
carrier_nli += eta_matrix[pump_carrier_1.channel_number-1, pump_carrier_2.channel_number-1] * \
|
||||
pump_carrier_1.power.signal * pump_carrier_2.power.signal
|
||||
carrier_nli *= carrier.power.signal
|
||||
|
||||
return carrier_nli
|
||||
|
||||
def _compute_eta_matrix(self, carrier_cut, *carriers):
|
||||
cut_index = carrier_cut.channel_number - 1
|
||||
# Matrix initialization
|
||||
matrix_size = max(carriers, key=lambda x: getattr(x, 'channel_number')).channel_number
|
||||
eta_matrix = np.zeros(shape=(matrix_size, matrix_size))
|
||||
|
||||
# SPM
|
||||
logger.debug(f'Start computing SPM on channel #{carrier_cut.channel_number}')
|
||||
# SPM GGN
|
||||
if 'ggn' in self.nli_params.nli_method_name.lower():
|
||||
partial_nli = self._generalized_spectrally_separated_spm(carrier_cut)
|
||||
# SPM GN
|
||||
elif 'gn' in self.nli_params.nli_method_name.lower():
|
||||
partial_nli = self._gn_analytic(carrier_cut, *[carrier_cut])
|
||||
eta_matrix[cut_index, cut_index] = partial_nli / (carrier_cut.power.signal**3)
|
||||
|
||||
# XPM
|
||||
for pump_carrier in carriers:
|
||||
pump_index = pump_carrier.channel_number - 1
|
||||
if not (cut_index == pump_index):
|
||||
logger.debug(f'Start computing XPM on channel #{carrier_cut.channel_number} '
|
||||
f'from channel #{pump_carrier.channel_number}')
|
||||
# XPM GGN
|
||||
if 'ggn' in self.nli_params.nli_method_name.lower():
|
||||
partial_nli = self._generalized_spectrally_separated_xpm(carrier_cut, pump_carrier)
|
||||
# XPM GGN
|
||||
elif 'gn' in self.nli_params.nli_method_name.lower():
|
||||
partial_nli = self._gn_analytic(carrier_cut, *[pump_carrier])
|
||||
eta_matrix[pump_index, pump_index] = partial_nli /\
|
||||
(carrier_cut.power.signal * pump_carrier.power.signal**2)
|
||||
return eta_matrix
|
||||
|
||||
# Methods for computing GN-model
|
||||
def _gn_analytic(self, carrier, *carriers):
|
||||
""" Computes the nonlinear interference power on a single carrier.
|
||||
The method uses eq. 120 from arXiv:1209.0394.
|
||||
:param carrier: the signal under analysis
|
||||
:param carriers: the full WDM comb
|
||||
:return: carrier_nli: the amount of nonlinear interference in W on the carrier under analysis
|
||||
"""
|
||||
alpha = self.fiber_params.alpha0() / 2
|
||||
beta2 = self.fiber_params.beta2
|
||||
gamma = self.fiber_params.gamma
|
||||
length = self.fiber_params.length
|
||||
effective_length = (1 - np.exp(-2 * alpha * length)) / (2 * alpha)
|
||||
asymptotic_length = 1 / (2 * alpha)
|
||||
|
||||
g_nli = 0
|
||||
for interfering_carrier in carriers:
|
||||
g_interfearing = interfering_carrier.power.signal / interfering_carrier.baud_rate
|
||||
g_signal = carrier.power.signal / carrier.baud_rate
|
||||
g_nli += g_interfearing**2 * g_signal \
|
||||
* _psi(carrier, interfering_carrier, beta2=self.fiber_params.beta2, asymptotic_length=1/self.fiber_params.alpha0())
|
||||
g_nli *= (16.0 / 27.0) * (gamma * effective_length)**2 /\
|
||||
(2 * np.pi * abs(beta2) * asymptotic_length)
|
||||
carrier_nli = carrier.baud_rate * g_nli
|
||||
return carrier_nli
|
||||
|
||||
# Methods for computing the GGN-model
|
||||
def _generalized_spectrally_separated_spm(self, carrier):
|
||||
f_cut_resolution = self.nli_params.f_cut_resolution['delta_0']
|
||||
f_eval = carrier.frequency
|
||||
g_cut = (carrier.power.signal / carrier.baud_rate)
|
||||
|
||||
spm_nli = carrier.baud_rate * (16.0 / 27.0) * self.fiber_params.gamma**2 * g_cut**3 * \
|
||||
self._generalized_psi(carrier, carrier, f_eval, f_cut_resolution, f_cut_resolution)
|
||||
return spm_nli
|
||||
|
||||
def _generalized_spectrally_separated_xpm(self, carrier_cut, pump_carrier):
|
||||
delta_index = pump_carrier.channel_number - carrier_cut.channel_number
|
||||
f_cut_resolution = self.nli_params.f_cut_resolution[f'delta_{delta_index}']
|
||||
f_pump_resolution = self.nli_params.f_pump_resolution
|
||||
f_eval = carrier_cut.frequency
|
||||
g_pump = (pump_carrier.power.signal / pump_carrier.baud_rate)
|
||||
g_cut = (carrier_cut.power.signal / carrier_cut.baud_rate)
|
||||
frequency_offset_threshold = self._frequency_offset_threshold(pump_carrier.baud_rate)
|
||||
if abs(carrier_cut.frequency - pump_carrier.frequency) <= frequency_offset_threshold:
|
||||
xpm_nli = carrier_cut.baud_rate * (16.0 / 27.0) * self.fiber_params.gamma**2 * g_pump**2 * g_cut * \
|
||||
2 * self._generalized_psi(carrier_cut, pump_carrier, f_eval, f_cut_resolution, f_pump_resolution)
|
||||
else:
|
||||
xpm_nli = carrier_cut.baud_rate * (16.0 / 27.0) * self.fiber_params.gamma**2 * g_pump**2 * g_cut * \
|
||||
2 * self._fast_generalized_psi(carrier_cut, pump_carrier, f_eval, f_cut_resolution)
|
||||
return xpm_nli
|
||||
|
||||
def _fast_generalized_psi(self, carrier_cut, pump_carrier, f_eval, f_cut_resolution):
|
||||
""" It computes the generalized psi function similarly to the one used in the GN model
|
||||
:return: generalized_psi
|
||||
"""
|
||||
# Fiber parameters
|
||||
alpha0 = self.fiber_params.alpha0(f_eval)
|
||||
beta2 = self.fiber_params.beta2
|
||||
beta3 = self.fiber_params.beta3
|
||||
f_ref_beta = self.fiber_params.f_ref_beta
|
||||
z = self.stimulated_raman_scattering.z
|
||||
frequency_rho = self.stimulated_raman_scattering.frequency
|
||||
rho_norm = self.stimulated_raman_scattering.rho * np.exp(np.abs(alpha0) * z / 2)
|
||||
if len(frequency_rho) == 1:
|
||||
rho_function = lambda f: rho_norm[0, :]
|
||||
else:
|
||||
rho_function = interp1d(frequency_rho, rho_norm, axis=0, fill_value='extrapolate')
|
||||
rho_norm_pump = rho_function(pump_carrier.frequency)
|
||||
|
||||
f1_array = np.array([pump_carrier.frequency - (pump_carrier.baud_rate * (1 + pump_carrier.roll_off) / 2),
|
||||
pump_carrier.frequency + (pump_carrier.baud_rate * (1 + pump_carrier.roll_off) / 2)])
|
||||
f2_array = np.arange(carrier_cut.frequency,
|
||||
carrier_cut.frequency + (carrier_cut.baud_rate * (1 + carrier_cut.roll_off) / 2),
|
||||
f_cut_resolution) # Only positive f2 is used since integrand_f2 is symmetric
|
||||
|
||||
integrand_f1 = np.zeros(len(f1_array))
|
||||
for f1_index, f1 in enumerate(f1_array):
|
||||
delta_beta = 4 * np.pi**2 * (f1 - f_eval) * (f2_array - f_eval) * \
|
||||
(beta2 + np.pi * beta3 * (f1 + f2_array - 2 * f_ref_beta))
|
||||
integrand_f2 = self._generalized_rho_nli(delta_beta, rho_norm_pump, z, alpha0)
|
||||
integrand_f1[f1_index] = 2 * np.trapz(integrand_f2, f2_array) # 2x since integrand_f2 is symmetric in f2
|
||||
generalized_psi = 0.5 * sum(integrand_f1) * pump_carrier.baud_rate
|
||||
return generalized_psi
|
||||
|
||||
def _generalized_psi(self, carrier_cut, pump_carrier, f_eval, f_cut_resolution, f_pump_resolution):
|
||||
""" It computes the generalized psi function similarly to the one used in the GN model
|
||||
:return: generalized_psi
|
||||
"""
|
||||
# Fiber parameters
|
||||
alpha0 = self.fiber_params.alpha0(f_eval)
|
||||
beta2 = self.fiber_params.beta2
|
||||
beta3 = self.fiber_params.beta3
|
||||
f_ref_beta = self.fiber_params.f_ref_beta
|
||||
z = self.stimulated_raman_scattering.z
|
||||
frequency_rho = self.stimulated_raman_scattering.frequency
|
||||
rho_norm = self.stimulated_raman_scattering.rho * np.exp(np.abs(alpha0) * z / 2)
|
||||
if len(frequency_rho) == 1:
|
||||
rho_function = lambda f: rho_norm[0, :]
|
||||
else:
|
||||
rho_function = interp1d(frequency_rho, rho_norm, axis=0, fill_value='extrapolate')
|
||||
rho_norm_pump = rho_function(pump_carrier.frequency)
|
||||
|
||||
f1_array = np.arange(pump_carrier.frequency - (pump_carrier.baud_rate * (1 + pump_carrier.roll_off) / 2),
|
||||
pump_carrier.frequency + (pump_carrier.baud_rate * (1 + pump_carrier.roll_off) / 2),
|
||||
f_pump_resolution)
|
||||
f2_array = np.arange(carrier_cut.frequency - (carrier_cut.baud_rate * (1 + carrier_cut.roll_off) / 2),
|
||||
carrier_cut.frequency + (carrier_cut.baud_rate * (1 + carrier_cut.roll_off) / 2),
|
||||
f_cut_resolution)
|
||||
psd1 = raised_cosine_comb(f1_array, pump_carrier) * (pump_carrier.baud_rate / pump_carrier.power.signal)
|
||||
|
||||
integrand_f1 = np.zeros(len(f1_array))
|
||||
for f1_index, (f1, psd1_sample) in enumerate(zip(f1_array, psd1)):
|
||||
f3_array = f1 + f2_array - f_eval
|
||||
psd2 = raised_cosine_comb(f2_array, carrier_cut) * (carrier_cut.baud_rate / carrier_cut.power.signal)
|
||||
psd3 = raised_cosine_comb(f3_array, pump_carrier) * (pump_carrier.baud_rate / pump_carrier.power.signal)
|
||||
ggg = psd1_sample * psd2 * psd3
|
||||
|
||||
delta_beta = 4 * np.pi**2 * (f1 - f_eval) * (f2_array - f_eval) * \
|
||||
(beta2 + np.pi * beta3 * (f1 + f2_array - 2 * f_ref_beta))
|
||||
|
||||
integrand_f2 = ggg * self._generalized_rho_nli(delta_beta, rho_norm_pump, z, alpha0)
|
||||
integrand_f1[f1_index] = np.trapz(integrand_f2, f2_array)
|
||||
generalized_psi = np.trapz(integrand_f1, f1_array)
|
||||
return generalized_psi
|
||||
|
||||
@staticmethod
|
||||
def _generalized_rho_nli(delta_beta, rho_norm_pump, z, alpha0):
|
||||
w = 1j * delta_beta - alpha0
|
||||
generalized_rho_nli = (rho_norm_pump[-1]**2 * np.exp(w * z[-1]) - rho_norm_pump[0]**2 * np.exp(w * z[0])) / w
|
||||
for z_ind in range(0, len(z) - 1):
|
||||
derivative_rho = (rho_norm_pump[z_ind + 1]**2 - rho_norm_pump[z_ind]**2) / (z[z_ind + 1] - z[z_ind])
|
||||
generalized_rho_nli -= derivative_rho * (np.exp(w * z[z_ind + 1]) - np.exp(w * z[z_ind])) / (w**2)
|
||||
generalized_rho_nli = np.abs(generalized_rho_nli)**2
|
||||
return generalized_rho_nli
|
||||
|
||||
def _frequency_offset_threshold(self, symbol_rate):
|
||||
k_ref = 5
|
||||
beta2_ref = 21.3e-27
|
||||
delta_f_ref = 50e9
|
||||
rs_ref = 32e9
|
||||
freq_offset_th = ((k_ref * delta_f_ref) * rs_ref * beta2_ref) / (self.fiber_params.beta2 * symbol_rate)
|
||||
return freq_offset_th
|
||||
|
||||
def _psi(carrier, interfering_carrier, beta2, asymptotic_length):
|
||||
"""Calculates eq. 123 from `arXiv:1209.0394 <https://arxiv.org/abs/1209.0394>`__"""
|
||||
|
||||
if carrier.channel_number == interfering_carrier.channel_number: # SCI, SPM
|
||||
psi = np.arcsinh(0.5 * np.pi**2 * asymptotic_length * abs(beta2) * carrier.baud_rate**2)
|
||||
else: # XCI, XPM
|
||||
delta_f = carrier.frequency - interfering_carrier.frequency
|
||||
psi = np.arcsinh(np.pi**2 * asymptotic_length * abs(beta2) *
|
||||
carrier.baud_rate * (delta_f + 0.5 * interfering_carrier.baud_rate))
|
||||
psi -= np.arcsinh(np.pi**2 * asymptotic_length * abs(beta2) *
|
||||
carrier.baud_rate * (delta_f - 0.5 * interfering_carrier.baud_rate))
|
||||
return psi
|
||||
268
gnpy/core/service_sheet.py
Normal file
268
gnpy/core/service_sheet.py
Normal file
@@ -0,0 +1,268 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
gnpy.core.service_sheet
|
||||
========================
|
||||
|
||||
XLS parser that can be called to create a JSON request file in accordance with
|
||||
Yang model for requesting path computation.
|
||||
|
||||
See: draft-ietf-teas-yang-path-computation-01.txt
|
||||
"""
|
||||
|
||||
from sys import exit
|
||||
try:
|
||||
from xlrd import open_workbook, XL_CELL_EMPTY
|
||||
except ModuleNotFoundError:
|
||||
exit('Required: `pip install xlrd`')
|
||||
from collections import namedtuple
|
||||
from logging import getLogger, basicConfig, CRITICAL, DEBUG, INFO
|
||||
from json import dumps
|
||||
from pathlib import Path
|
||||
from gnpy.core.equipment import load_equipment
|
||||
from gnpy.core.utils import db2lin, lin2db
|
||||
from gnpy.core.exceptions import ServiceError
|
||||
|
||||
SERVICES_COLUMN = 12
|
||||
#EQPT_LIBRARY_FILENAME = Path(__file__).parent / 'eqpt_config.json'
|
||||
|
||||
all_rows = lambda sheet, start=0: (sheet.row(x) for x in range(start, sheet.nrows))
|
||||
logger = getLogger(__name__)
|
||||
|
||||
# Type for input data
|
||||
class Request(namedtuple('Request', 'request_id source destination trx_type mode \
|
||||
spacing power nb_channel disjoint_from nodes_list is_loose path_bandwidth')):
|
||||
def __new__(cls, request_id, source, destination, trx_type, mode=None , spacing= None , power = None, nb_channel = None , disjoint_from ='' , nodes_list = None, is_loose = '', path_bandwidth = None):
|
||||
return super().__new__(cls, request_id, source, destination, trx_type, mode, spacing, power, nb_channel, disjoint_from, nodes_list, is_loose, path_bandwidth)
|
||||
|
||||
# Type for output data: // from dutc
|
||||
class Element:
|
||||
def __eq__(self, other):
|
||||
return type(self) == type(other) and self.uid == other.uid
|
||||
def __hash__(self):
|
||||
return hash((type(self), self.uid))
|
||||
|
||||
class Request_element(Element):
|
||||
def __init__(self, Request, eqpt_filename, bidir):
|
||||
# request_id is str
|
||||
# excel has automatic number formatting that adds .0 on integer values
|
||||
# the next lines recover the pure int value, assuming this .0 is unwanted
|
||||
self.request_id = correct_xlrd_int_to_str_reading(Request.request_id)
|
||||
self.source = f'trx {Request.source}'
|
||||
self.destination = f'trx {Request.destination}'
|
||||
# TODO: the automatic naming generated by excel parser requires that source and dest name
|
||||
# be a string starting with 'trx' : this is manually added here.
|
||||
self.srctpid = f'trx {Request.source}'
|
||||
self.dsttpid = f'trx {Request.destination}'
|
||||
self.bidir = bidir
|
||||
# test that trx_type belongs to eqpt_config.json
|
||||
# if not replace it with a default
|
||||
equipment = load_equipment(eqpt_filename)
|
||||
try :
|
||||
if equipment['Transceiver'][Request.trx_type]:
|
||||
self.trx_type = correct_xlrd_int_to_str_reading(Request.trx_type)
|
||||
if Request.mode is not None :
|
||||
Requestmode = correct_xlrd_int_to_str_reading(Request.mode)
|
||||
if [mode for mode in equipment['Transceiver'][Request.trx_type].mode if mode['format'] == Requestmode]:
|
||||
self.mode = Requestmode
|
||||
else :
|
||||
msg = f'Request Id: {self.request_id} - could not find tsp : \'{Request.trx_type}\' with mode: \'{Requestmode}\' in eqpt library \nComputation stopped.'
|
||||
#print(msg)
|
||||
logger.critical(msg)
|
||||
exit(1)
|
||||
else:
|
||||
Requestmode = None
|
||||
self.mode = Request.mode
|
||||
except KeyError:
|
||||
msg = f'Request Id: {self.request_id} - could not find tsp : \'{Request.trx_type}\' with mode: \'{Request.mode}\' in eqpt library \nComputation stopped.'
|
||||
#print(msg)
|
||||
logger.critical(msg)
|
||||
raise ServiceError(msg)
|
||||
# excel input are in GHz and dBm
|
||||
if Request.spacing is not None:
|
||||
self.spacing = Request.spacing * 1e9
|
||||
else:
|
||||
msg = f'Request {self.request_id} missing spacing: spacing is mandatory.\ncomputation stopped'
|
||||
logger.critical(msg)
|
||||
raise ServiceError(msg)
|
||||
if Request.power is not None:
|
||||
self.power = db2lin(Request.power) * 1e-3
|
||||
else:
|
||||
self.power = None
|
||||
if Request.nb_channel is not None :
|
||||
self.nb_channel = int(Request.nb_channel)
|
||||
else:
|
||||
self.nb_channel = None
|
||||
|
||||
value = correct_xlrd_int_to_str_reading(Request.disjoint_from)
|
||||
self.disjoint_from = [n for n in value.split(' | ') if value]
|
||||
self.nodes_list = []
|
||||
if Request.nodes_list :
|
||||
self.nodes_list = Request.nodes_list.split(' | ')
|
||||
|
||||
# cleaning the list of nodes to remove source and destination
|
||||
# (because the remaining of the program assumes that the nodes list are nodes
|
||||
# on the path and should not include source and destination)
|
||||
try :
|
||||
self.nodes_list.remove(self.source)
|
||||
msg = f'{self.source} removed from explicit path node-list'
|
||||
logger.info(msg)
|
||||
except ValueError:
|
||||
msg = f'{self.source} already removed from explicit path node-list'
|
||||
logger.info(msg)
|
||||
|
||||
try :
|
||||
self.nodes_list.remove(self.destination)
|
||||
msg = f'{self.destination} removed from explicit path node-list'
|
||||
logger.info(msg)
|
||||
except ValueError:
|
||||
msg = f'{self.destination} already removed from explicit path node-list'
|
||||
logger.info(msg)
|
||||
|
||||
# the excel parser applies the same hop-type to all nodes in the route nodes_list.
|
||||
# user can change this per node in the generated json
|
||||
self.loose = 'LOOSE'
|
||||
if Request.is_loose == 'no' :
|
||||
self.loose = 'STRICT'
|
||||
self.path_bandwidth = None
|
||||
if Request.path_bandwidth is not None:
|
||||
self.path_bandwidth = Request.path_bandwidth * 1e9
|
||||
else:
|
||||
self.path_bandwidth = 0
|
||||
|
||||
uid = property(lambda self: repr(self))
|
||||
@property
|
||||
def pathrequest(self):
|
||||
# Default assumption for bidir is False
|
||||
req_dictionnary = {
|
||||
'request-id':self.request_id,
|
||||
'source': self.source,
|
||||
'destination': self.destination,
|
||||
'src-tp-id': self.srctpid,
|
||||
'dst-tp-id': self.dsttpid,
|
||||
'bidirectional': self.bidir,
|
||||
'path-constraints':{
|
||||
'te-bandwidth': {
|
||||
'technology': 'flexi-grid',
|
||||
'trx_type' : self.trx_type,
|
||||
'trx_mode' : self.mode,
|
||||
'effective-freq-slot':[{'N': 'null', 'M': 'null'}],
|
||||
'spacing' : self.spacing,
|
||||
'max-nb-of-channel' : self.nb_channel,
|
||||
'output-power' : self.power
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if self.nodes_list:
|
||||
req_dictionnary['explicit-route-objects'] = {}
|
||||
temp = {'route-object-include-exclude' : [
|
||||
{'explicit-route-usage': 'route-include-ero',
|
||||
'index': self.nodes_list.index(node),
|
||||
'num-unnum-hop': {
|
||||
'node-id': f'{node}',
|
||||
'link-tp-id': 'link-tp-id is not used',
|
||||
'hop-type': f'{self.loose}',
|
||||
}
|
||||
}
|
||||
for node in self.nodes_list]
|
||||
}
|
||||
req_dictionnary['explicit-route-objects'] = temp
|
||||
if self.path_bandwidth is not None:
|
||||
req_dictionnary['path-constraints']['te-bandwidth']['path_bandwidth'] = self.path_bandwidth
|
||||
|
||||
return req_dictionnary
|
||||
@property
|
||||
def pathsync(self):
|
||||
if self.disjoint_from :
|
||||
return {'synchronization-id':self.request_id,
|
||||
'svec': {
|
||||
'relaxable' : 'false',
|
||||
'disjointness': 'node link',
|
||||
'request-id-number': [self.request_id]+ [n for n in self.disjoint_from]
|
||||
}
|
||||
}
|
||||
else:
|
||||
return None
|
||||
# TO-DO: avoid multiple entries with same synchronisation vectors
|
||||
@property
|
||||
def json(self):
|
||||
return self.pathrequest , self.pathsync
|
||||
|
||||
def convert_service_sheet(input_filename, eqpt_filename, output_filename='', bidir=False, filter_region=None):
|
||||
""" converts a service sheet into a json structure
|
||||
"""
|
||||
if filter_region is None:
|
||||
filter_region = []
|
||||
service = parse_excel(input_filename)
|
||||
req = [Request_element(n, eqpt_filename, bidir) for n in service]
|
||||
# dumps the output into a json file with name
|
||||
# split_filename = [input_filename[0:len(input_filename)-len(suffix_filename)] , suffix_filename[1:]]
|
||||
if output_filename=='':
|
||||
output_filename = f'{str(input_filename)[0:len(str(input_filename))-len(str(input_filename.suffixes[0]))]}_services.json'
|
||||
# for debug
|
||||
# print(json_filename)
|
||||
# if there is no sync vector , do not write any synchronization
|
||||
synchro = [n.json[1] for n in req if n.json[1] is not None]
|
||||
if synchro:
|
||||
data = {
|
||||
'path-request': [n.json[0] for n in req],
|
||||
'synchronization': synchro
|
||||
}
|
||||
else:
|
||||
data = {
|
||||
'path-request': [n.json[0] for n in req]
|
||||
}
|
||||
with open(output_filename, 'w', encoding='utf-8') as f:
|
||||
f.write(dumps(data, indent=2, ensure_ascii=False))
|
||||
return data
|
||||
|
||||
def correct_xlrd_int_to_str_reading(v) :
|
||||
if not isinstance(v,str):
|
||||
value = str(int(v))
|
||||
if value.endswith('.0'):
|
||||
value = value[:-2]
|
||||
else:
|
||||
value = v
|
||||
return value
|
||||
|
||||
# to be used from dutc
|
||||
def parse_row(row, fieldnames):
|
||||
return {f: r.value for f, r in zip(fieldnames, row[0:SERVICES_COLUMN])
|
||||
if r.ctype != XL_CELL_EMPTY}
|
||||
#
|
||||
|
||||
def parse_excel(input_filename):
|
||||
with open_workbook(input_filename) as wb:
|
||||
service_sheet = wb.sheet_by_name('Service')
|
||||
services = list(parse_service_sheet(service_sheet))
|
||||
return services
|
||||
|
||||
def parse_service_sheet(service_sheet):
|
||||
""" reads each column according to authorized fieldnames. order is not important.
|
||||
"""
|
||||
logger.info(f'Validating headers on {service_sheet.name!r}')
|
||||
# add a test on field to enable the '' field case that arises when columns on the
|
||||
# right hand side are used as comments or drawing in the excel sheet
|
||||
header = [x.value.strip() for x in service_sheet.row(4)[0:SERVICES_COLUMN]
|
||||
if len(x.value.strip()) > 0]
|
||||
|
||||
# create a service_fieldname independant from the excel column order
|
||||
# to be compatible with any version of the sheet
|
||||
# the following dictionnary records the excel field names and the corresponding parameter's name
|
||||
|
||||
authorized_fieldnames = {
|
||||
'route id':'request_id', 'Source':'source', 'Destination':'destination', \
|
||||
'TRX type':'trx_type', 'Mode' : 'mode', 'System: spacing':'spacing', \
|
||||
'System: input power (dBm)':'power', 'System: nb of channels':'nb_channel',\
|
||||
'routing: disjoint from': 'disjoint_from', 'routing: path':'nodes_list',\
|
||||
'routing: is loose?':'is_loose', 'path bandwidth':'path_bandwidth'}
|
||||
try:
|
||||
service_fieldnames = [authorized_fieldnames[e] for e in header]
|
||||
except KeyError:
|
||||
msg = f'Malformed header on Service sheet: {header} field not in {authorized_fieldnames}'
|
||||
logger.critical(msg)
|
||||
raise ValueError(msg)
|
||||
for row in all_rows(service_sheet, start=5):
|
||||
yield Request(**parse_row(row[0:SERVICES_COLUMN], service_fieldnames))
|
||||
386
gnpy/core/spectrum_assignment.py
Normal file
386
gnpy/core/spectrum_assignment.py
Normal file
@@ -0,0 +1,386 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
gnpy.core.spectrum_assignment
|
||||
=============================
|
||||
|
||||
This module contains the Oms and Bitmap classes and the different method to
|
||||
select and assign spectrum. Spectrum_selection function identifies the free
|
||||
slots and select_candidate selects the candidate spectrum according to
|
||||
strategy: for example first fit
|
||||
oms records its elements, and elements are updated with an oms to have
|
||||
element/oms correspondace
|
||||
"""
|
||||
|
||||
from collections import namedtuple
|
||||
from logging import getLogger
|
||||
from math import ceil
|
||||
from gnpy.core.elements import Roadm, Transceiver
|
||||
from gnpy.core.exceptions import SpectrumError
|
||||
|
||||
LOGGER = getLogger(__name__)
|
||||
|
||||
class Bitmap:
|
||||
""" records the spectrum occupation
|
||||
"""
|
||||
def __init__(self, f_min, f_max, grid, guardband=0.15e12, bitmap=None):
|
||||
# n is the min index including guardband. Guardband is require to be sure
|
||||
# that a channel can be assigned with center frequency fmin (means that its
|
||||
# slot occupation goes below freq_index_min
|
||||
n_min = frequency_to_n(f_min-guardband, grid)
|
||||
n_max = frequency_to_n(f_max+guardband, grid) - 1
|
||||
self.n_min = n_min
|
||||
self.n_max = n_max
|
||||
self.freq_index_min = frequency_to_n(f_min)
|
||||
self.freq_index_max = frequency_to_n(f_max)
|
||||
self.freq_index = list(range(n_min, n_max+1))
|
||||
if bitmap is None:
|
||||
self.bitmap = [1] * (n_max-n_min+1)
|
||||
elif len(bitmap) == len(self.freq_index):
|
||||
self.bitmap = bitmap
|
||||
else:
|
||||
raise SpectrumError(f'bitmap is not consistant with f_min{f_min} - n: {n_min} and f_max{f_max}- n :{n_max}')
|
||||
|
||||
def getn(self, i):
|
||||
""" converts the n (itu grid) into a local index
|
||||
"""
|
||||
return self.freq_index[i]
|
||||
def geti(self, nvalue):
|
||||
""" converts the local index into n (itu grid)
|
||||
"""
|
||||
return self.freq_index.index(nvalue)
|
||||
def insert_left(self, newbitmap):
|
||||
""" insert bitmap on the left to align oms bitmaps if their start frequencies are different
|
||||
"""
|
||||
self.bitmap = newbitmap + self.bitmap
|
||||
temp = list(range(self.n_min-len(newbitmap), self.n_min))
|
||||
self.freq_index = temp + self.freq_index
|
||||
self.n_min = self.freq_index[0]
|
||||
def insert_right(self, newbitmap):
|
||||
""" insert bitmap on the right to align oms bitmaps if their stop frequencies are different
|
||||
"""
|
||||
self.bitmap = self.bitmap + newbitmap
|
||||
self.freq_index = self.freq_index + list(range(self.n_max, self.n_max+len(newbitmap)))
|
||||
self.n_max = self.freq_index[-1]
|
||||
|
||||
# +'grid available_slots f_min f_max services_list')
|
||||
OMSParams = namedtuple('OMSParams', 'oms_id el_id_list el_list')
|
||||
|
||||
class OMS:
|
||||
""" OMS class is the logical container that represent a link between two adjacent ROADMs and
|
||||
records the crossed elements and the occupied spectrum
|
||||
"""
|
||||
def __init__(self, *args, **params):
|
||||
params = OMSParams(**params)
|
||||
self.oms_id = params.oms_id
|
||||
self.el_id_list = params.el_id_list
|
||||
self.el_list = params.el_list
|
||||
self.spectrum_bitmap = []
|
||||
self.nb_channels = 0
|
||||
self.service_list = []
|
||||
# TODO
|
||||
def __str__(self):
|
||||
return '\n\t'.join([f'{type(self).__name__} {self.oms_id}',
|
||||
f'{self.el_id_list[0]} - {self.el_id_list[-1]}'])
|
||||
def __repr__(self):
|
||||
return '\n\t'.join([f'{type(self).__name__} {self.oms_id}',
|
||||
f'{self.el_id_list[0]} - {self.el_id_list[-1]}', '\n'])
|
||||
|
||||
def add_element(self, elem):
|
||||
""" records oms elements
|
||||
"""
|
||||
self.el_id_list.append(elem.uid)
|
||||
self.el_list.append(elem)
|
||||
|
||||
def update_spectrum(self, f_min, f_max, guardband=0.15e12, existing_spectrum=None,
|
||||
grid=0.00625e12):
|
||||
""" frequencies expressed in Hz
|
||||
"""
|
||||
if existing_spectrum is None:
|
||||
# add some 150 GHz margin to enable a center channel on f_min
|
||||
# use ITU-T G694.1
|
||||
# Flexible DWDM grid definition
|
||||
# For the flexible DWDM grid, the allowed frequency slots have a nominal
|
||||
# central frequency (in THz) defined by:
|
||||
# 193.1 + n × 0.00625 where n is a positive or negative integer including 0
|
||||
# and 0.00625 is the nominal central frequency granularity in THz
|
||||
# and a slot width defined by:
|
||||
# 12.5 × m where m is a positive integer and 12.5 is the slot width granularity in
|
||||
# GHz.
|
||||
# Any combination of frequency slots is allowed as long as no two frequency
|
||||
# slots overlap.
|
||||
|
||||
# TODO : add explaination on that / parametrize ....
|
||||
self.spectrum_bitmap = Bitmap(f_min, f_max, grid, guardband)
|
||||
# print(len(self.spectrum_bitmap.bitmap))
|
||||
|
||||
def assign_spectrum(self, nvalue, mvalue):
|
||||
""" change oms spectrum to mark spectrum assigned
|
||||
"""
|
||||
if (nvalue is None or mvalue is None or isinstance(nvalue, float)
|
||||
or isinstance(mvalue, float) or mvalue == 0):
|
||||
raise SpectrumError('could not assign None values')
|
||||
startn, stopn = mvalue_to_slots(nvalue, mvalue)
|
||||
# print(f'startn stop n {startn} , {stopn}')
|
||||
# assumes that guardbands are sufficient to ensure that assigning a center channel
|
||||
# at fmin or fmax is OK is startn > self.spectrum_bitmap.n_min
|
||||
if (nvalue <= self.spectrum_bitmap.freq_index_max and
|
||||
nvalue >= self.spectrum_bitmap.freq_index_min and
|
||||
stopn <= self.spectrum_bitmap.n_max and
|
||||
startn > self.spectrum_bitmap.n_min):
|
||||
# verification that both length are identical
|
||||
self.spectrum_bitmap.bitmap[self.spectrum_bitmap.geti(startn):self.spectrum_bitmap.geti(stopn)+1] = [0] * (stopn-startn+1)
|
||||
return True
|
||||
else:
|
||||
msg = f'Could not assign n {nvalue}, m {mvalue} values:' +\
|
||||
f' one or several slots are not available'
|
||||
LOGGER.info(msg)
|
||||
return False
|
||||
|
||||
def add_service(self, service_id, nb_wl):
|
||||
""" record service and mark spectrum as occupied
|
||||
"""
|
||||
self.service_list.append(service_id)
|
||||
self.nb_channels += nb_wl
|
||||
|
||||
def frequency_to_n(freq, grid=0.00625e12):
|
||||
""" converts frequency into the n value (ITU grid)
|
||||
"""
|
||||
return (int)((freq-193.1e12)/grid)
|
||||
|
||||
def nvalue_to_frequency(nvalue, grid=0.00625e12):
|
||||
""" converts n value into a frequency
|
||||
"""
|
||||
return 193.1e12 + nvalue * grid
|
||||
|
||||
def mvalue_to_slots(nvalue, mvalue):
|
||||
""" convert center n an m into start and stop n
|
||||
"""
|
||||
startn = nvalue - mvalue
|
||||
stopn = nvalue + mvalue -1
|
||||
return startn, stopn
|
||||
|
||||
def slots_to_m(startn, stopn):
|
||||
""" converts the start and stop n values to the center n and m value
|
||||
"""
|
||||
nvalue = (int)((startn+stopn+1)/2)
|
||||
mvalue = (int)((stopn-startn+1)/2)
|
||||
return nvalue, mvalue
|
||||
|
||||
def m_to_freq(nvalue, mvalue, grid=0.00625e12):
|
||||
""" converts m into frequency range
|
||||
"""
|
||||
startn, stopn = mvalue_to_slots(nvalue, mvalue)
|
||||
fstart = nvalue_to_frequency(startn, grid)
|
||||
fstop = nvalue_to_frequency(stopn+1, grid)
|
||||
return fstart, fstop
|
||||
|
||||
def align_grids(oms_list):
|
||||
""" used to apply same grid to all oms : same starting n, stop n and slot size
|
||||
out of grid slots are set to 0
|
||||
"""
|
||||
n_min = min([o.spectrum_bitmap.n_min for o in oms_list])
|
||||
n_max = max([o.spectrum_bitmap.n_max for o in oms_list])
|
||||
for this_o in oms_list:
|
||||
if (this_o.spectrum_bitmap.n_min - n_min) > 0:
|
||||
this_o.spectrum_bitmap.insert_left([0] * (this_o.spectrum_bitmap.n_min - n_min))
|
||||
if (n_max - this_o.spectrum_bitmap.n_max) > 0:
|
||||
this_o.spectrum_bitmap.insert_right([0] * (n_max - this_o.spectrum_bitmap.n_max))
|
||||
return oms_list
|
||||
|
||||
def build_oms_list(network, equipment):
|
||||
""" initialization of OMS list in the network
|
||||
an oms is build reading all intermediate nodes between two adjacent ROADMs
|
||||
each element within the list is being added an oms and oms_id to record the
|
||||
oms it belongs to.
|
||||
the function supports different spectrum width and supposes that the whole network
|
||||
works with the min range among OMSs
|
||||
"""
|
||||
oms_id = 0
|
||||
oms_list = []
|
||||
for node in [n for n in network.nodes() if isinstance(n, Roadm)]:
|
||||
for edge in network.edges([node]):
|
||||
if not isinstance(edge[1], Transceiver):
|
||||
nd_in = edge[0] # nd_in is a Roadm
|
||||
try:
|
||||
nd_in.oms_list.append(oms_id)
|
||||
except AttributeError:
|
||||
nd_in.oms_list = []
|
||||
nd_in.oms_list.append(oms_id)
|
||||
nd_out = edge[1]
|
||||
|
||||
params = {}
|
||||
params['oms_id'] = oms_id
|
||||
params['el_id_list'] = []
|
||||
params['el_list'] = []
|
||||
oms = OMS(**params)
|
||||
oms.add_element(nd_in)
|
||||
while not isinstance(nd_out, Roadm):
|
||||
oms.add_element(nd_out)
|
||||
# add an oms_id in the element
|
||||
nd_out.oms_id = oms_id
|
||||
nd_out.oms = oms
|
||||
n_temp = nd_out
|
||||
nd_out = next(n[1] for n in network.edges([n_temp]) if n[1].uid != nd_in.uid)
|
||||
nd_in = n_temp
|
||||
|
||||
oms.add_element(nd_out)
|
||||
# nd_out is a Roadm
|
||||
try:
|
||||
nd_out.oms_list.append(oms_id)
|
||||
except AttributeError:
|
||||
nd_out.oms_list = []
|
||||
nd_out.oms_list.append(oms_id)
|
||||
|
||||
oms.update_spectrum(equipment['SI']['default'].f_min,
|
||||
equipment['SI']['default'].f_max, grid=0.00625e12)
|
||||
# oms.assign_spectrum(13,7) gives back (193137500000000.0, 193225000000000.0)
|
||||
# as in the example in the standard
|
||||
# oms.assign_spectrum(13,7)
|
||||
|
||||
oms_list.append(oms)
|
||||
oms_id += 1
|
||||
oms_list = align_grids(oms_list)
|
||||
reversed_oms(oms_list)
|
||||
return oms_list
|
||||
|
||||
def reversed_oms(oms_list):
|
||||
""" identifies reversed OMS
|
||||
only applicable for non parallel OMS
|
||||
"""
|
||||
for oms in oms_list:
|
||||
has_reversed = False
|
||||
for this_o in oms_list:
|
||||
if (oms.el_id_list[0] == this_o.el_id_list[-1] and
|
||||
oms.el_id_list[-1] == this_o.el_id_list[0]):
|
||||
oms.reversed_oms = this_o
|
||||
has_reversed = True
|
||||
break
|
||||
if not has_reversed:
|
||||
oms.reversed_oms = None
|
||||
|
||||
|
||||
def bitmap_sum(band1, band2):
|
||||
""" a functions that marks occupied bitmap by 0 if the slot is occupied in band1 or in band2
|
||||
"""
|
||||
res = []
|
||||
for i, elem in enumerate(band1):
|
||||
if band2[i] * elem == 0:
|
||||
res.append(0)
|
||||
else:
|
||||
res.append(1)
|
||||
return res
|
||||
|
||||
def spectrum_selection(pth, oms_list, requested_m, requested_n=None):
|
||||
""" collects spectrum availability and call the select_candidate function
|
||||
# step 1 collects pth spectrum availability
|
||||
# step 2 if n is not None try to assign the spectrum
|
||||
# if the spectrum is not available then sends back an "error"
|
||||
# if n is None selects candidate spectrum
|
||||
# select spectrum that fits the policy ( first fit, random, ABP...)
|
||||
# step3 returns the selection
|
||||
"""
|
||||
|
||||
# use indexes instead of ITU-T n values
|
||||
path_oms = []
|
||||
for elem in pth:
|
||||
if not isinstance(elem, Roadm) and not isinstance(elem, Transceiver):
|
||||
# only edfa, fused and fibers have oms_id attribute
|
||||
path_oms.append(elem.oms_id)
|
||||
# remove duplicate oms_id, order is not important
|
||||
path_oms = list(set(path_oms))
|
||||
# assuming all oms have same freq index
|
||||
if not path_oms:
|
||||
candidate = (None, None, None)
|
||||
return candidate, path_oms
|
||||
freq_index = oms_list[path_oms[0]].spectrum_bitmap.freq_index
|
||||
freq_index_min = oms_list[path_oms[0]].spectrum_bitmap.freq_index_min
|
||||
freq_index_max = oms_list[path_oms[0]].spectrum_bitmap.freq_index_max
|
||||
|
||||
freq_availability = oms_list[path_oms[0]].spectrum_bitmap.bitmap
|
||||
for oms in path_oms[1:]:
|
||||
freq_availability = bitmap_sum(oms_list[oms].spectrum_bitmap.bitmap, freq_availability)
|
||||
if requested_n is None:
|
||||
# avoid slots reserved on the edge 0.15e-12 on both sides -> 24
|
||||
candidates = [(freq_index[i]+requested_m, freq_index[i], freq_index[i]+2*requested_m-1)
|
||||
for i in range(len(freq_availability))
|
||||
if freq_availability[i:i+2*requested_m] == [1] * (2*requested_m)
|
||||
and freq_index[i] >= freq_index_min
|
||||
and freq_index[i+2*requested_m-1] <= freq_index_max]
|
||||
|
||||
candidate = select_candidate(candidates, policy='first_fit')
|
||||
else:
|
||||
i = oms_list[path_oms[0]].spectrum_bitmap.geti(requested_n)
|
||||
# print(f'N {requested_n} i {i}')
|
||||
# print(freq_availability[i-m:i+m] )
|
||||
# print(freq_index[i-m:i+m])
|
||||
if (freq_availability[i-requested_m:i+requested_m] == [1] * (2*requested_m) and
|
||||
freq_index[i-requested_m] >= freq_index_min
|
||||
and freq_index[i+requested_m-1] <= freq_index_max):
|
||||
# candidate is the triplet center_n, startn and stopn
|
||||
candidate = (requested_n, requested_n-requested_m, requested_n+requested_m-1)
|
||||
else:
|
||||
candidate = (None, None, None)
|
||||
# print("coucou11")
|
||||
# print(candidate)
|
||||
# print(freq_availability[321:321+2*m])
|
||||
# a = [i+321 for i in range(2*m)]
|
||||
# print(a)
|
||||
# print(candidate)
|
||||
return candidate, path_oms
|
||||
|
||||
def select_candidate(candidates, policy):
|
||||
""" selects a candidate among all available spectrum
|
||||
"""
|
||||
if policy == 'first_fit':
|
||||
if candidates:
|
||||
return candidates[0]
|
||||
else:
|
||||
return (None, None, None)
|
||||
else:
|
||||
raise ServiceError('Only first_fit spectrum assignment policy is implemented.')
|
||||
|
||||
def pth_assign_spectrum(pths, rqs, oms_list, rpths):
|
||||
""" basic first fit assignment
|
||||
if reversed path are provided, means that occupation is bidir
|
||||
"""
|
||||
for i, pth in enumerate(pths):
|
||||
# computes the number of channels required
|
||||
try:
|
||||
if rqs[i].blocking_reason:
|
||||
rqs[i].blocked = True
|
||||
rqs[i].N = 0
|
||||
rqs[i].M = 0
|
||||
except AttributeError:
|
||||
nb_wl = ceil(rqs[i].path_bandwidth / rqs[i].bit_rate)
|
||||
# computes the total nb of slots according to requested spacing
|
||||
# TODO : express superchannels
|
||||
# assumes that all channels must be grouped
|
||||
# TODO : enables non contiguous reservation in case of blocking
|
||||
requested_m = ceil(rqs[i].spacing / 0.0125e12) * nb_wl
|
||||
# concatenate all path and reversed path elements to derive slots availability
|
||||
(center_n, startn, stopn), path_oms = spectrum_selection(pth + rpths[i], oms_list, requested_m,
|
||||
requested_n=None)
|
||||
# checks that requested_m is fitting startm and stopm
|
||||
# if not None, center_n and start, stop frequencies are applicable to all oms of pth
|
||||
# checks that spectrum is not None else indicate blocking reason
|
||||
if center_n is not None:
|
||||
# checks that requested_m is fitting startm and stopm
|
||||
if 2 * requested_m > (stopn - startn + 1):
|
||||
msg = f'candidate: {(center_n, startn, stopn)} is not consistant ' +\
|
||||
f'with {requested_m}'
|
||||
LOGGER.critical(msg)
|
||||
raise ValueError(msg)
|
||||
|
||||
for oms_elem in path_oms:
|
||||
oms_list[oms_elem].assign_spectrum(center_n, requested_m)
|
||||
oms_list[oms_elem].add_service(rqs[i].request_id, nb_wl)
|
||||
rqs[i].blocked = False
|
||||
rqs[i].N = center_n
|
||||
rqs[i].M = requested_m
|
||||
else:
|
||||
rqs[i].blocked = True
|
||||
rqs[i].N = 0
|
||||
rqs[i].M = 0
|
||||
rqs[i].blocking_reason = 'NO_SPECTRUM'
|
||||
5
gnpy/core/units.py
Normal file
5
gnpy/core/units.py
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
UNITS = {'m': 1,
|
||||
'km': 1E3}
|
||||
225
gnpy/core/utils.py
Normal file
225
gnpy/core/utils.py
Normal file
@@ -0,0 +1,225 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gnpy.core.utils
|
||||
===============
|
||||
|
||||
This module contains utility functions that are used with gnpy.
|
||||
'''
|
||||
|
||||
|
||||
import json
|
||||
|
||||
from csv import writer
|
||||
import numpy as np
|
||||
from numpy import pi, cos, sqrt, log10
|
||||
from scipy import constants
|
||||
|
||||
|
||||
def load_json(filename):
|
||||
with open(filename, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
return data
|
||||
|
||||
|
||||
def save_json(obj, filename):
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
json.dump(obj, f, indent=2, ensure_ascii=False)
|
||||
|
||||
def write_csv(obj, filename):
|
||||
"""
|
||||
convert dictionary items to a csv file
|
||||
the dictionary format :
|
||||
|
||||
{'result category 1':
|
||||
[
|
||||
# 1st line of results
|
||||
{'header 1' : value_xxx,
|
||||
'header 2' : value_yyy},
|
||||
# 2nd line of results: same headers, different results
|
||||
{'header 1' : value_www,
|
||||
'header 2' : value_zzz}
|
||||
],
|
||||
'result_category 2':
|
||||
[
|
||||
{},{}
|
||||
]
|
||||
}
|
||||
|
||||
the generated csv file will be:
|
||||
result_category 1
|
||||
header 1 header 2
|
||||
value_xxx value_yyy
|
||||
value_www value_zzz
|
||||
result_category 2
|
||||
...
|
||||
"""
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
w = writer(f)
|
||||
for data_key, data_list in obj.items():
|
||||
#main header
|
||||
w.writerow([data_key])
|
||||
#sub headers:
|
||||
headers = [_ for _ in data_list[0].keys()]
|
||||
w.writerow(headers)
|
||||
for data_dict in data_list:
|
||||
w.writerow([_ for _ in data_dict.values()])
|
||||
|
||||
def c():
|
||||
"""
|
||||
Returns the speed of light in meters per second
|
||||
"""
|
||||
return constants.c
|
||||
|
||||
|
||||
def arrange_frequencies(length, start, stop):
|
||||
"""Create an array of frequencies
|
||||
|
||||
:param length: number of elements
|
||||
:param star: Start frequency in THz
|
||||
:param stop: Stop frequency in THz
|
||||
:type length: integer
|
||||
:type start: float
|
||||
:type stop: float
|
||||
:return an array of frequencies determined by the spacing parameter
|
||||
:rtype: numpy.ndarray
|
||||
"""
|
||||
return np.linspace(start, stop, length)
|
||||
|
||||
def h():
|
||||
"""
|
||||
Returns plank's constant in J*s
|
||||
"""
|
||||
return constants.h
|
||||
|
||||
|
||||
def lin2db(value):
|
||||
return 10 * log10(value)
|
||||
|
||||
|
||||
def db2lin(value):
|
||||
return 10**(value / 10)
|
||||
|
||||
def round2float(number, step):
|
||||
step = round(step, 1)
|
||||
if step >= 0.01:
|
||||
number = round(number / step, 0)
|
||||
number = round(number * step, 1)
|
||||
else:
|
||||
number = round(number, 2)
|
||||
return number
|
||||
|
||||
wavelength2freq = constants.lambda2nu
|
||||
freq2wavelength = constants.nu2lambda
|
||||
|
||||
def freq2wavelength(value):
|
||||
""" Converts frequency units to wavelength units.
|
||||
"""
|
||||
return c() / value
|
||||
|
||||
def snr_sum(snr, bw, snr_added, bw_added=12.5e9):
|
||||
snr_added = snr_added - lin2db(bw/bw_added)
|
||||
snr = -lin2db(db2lin(-snr)+db2lin(-snr_added))
|
||||
return snr
|
||||
|
||||
def deltawl2deltaf(delta_wl, wavelength):
|
||||
""" deltawl2deltaf(delta_wl, wavelength):
|
||||
delta_wl is BW in wavelength units
|
||||
wavelength is the center wl
|
||||
units for delta_wl and wavelength must be same
|
||||
|
||||
:param delta_wl: delta wavelength BW in same units as wavelength
|
||||
:param wavelength: wavelength BW is relevant for
|
||||
:type delta_wl: float or numpy.ndarray
|
||||
:type wavelength: float
|
||||
:return: The BW in frequency units
|
||||
:rtype: float or ndarray
|
||||
|
||||
"""
|
||||
f = wavelength2freq(wavelength)
|
||||
return delta_wl * f / wavelength
|
||||
|
||||
|
||||
def deltaf2deltawl(delta_f, frequency):
|
||||
""" deltawl2deltaf(delta_f, frequency):
|
||||
converts delta frequency to delta wavelength
|
||||
units for delta_wl and wavelength must be same
|
||||
|
||||
:param delta_f: delta frequency in same units as frequency
|
||||
:param frequency: frequency BW is relevant for
|
||||
:type delta_f: float or numpy.ndarray
|
||||
:type frequency: float
|
||||
:return: The BW in wavelength units
|
||||
:rtype: float or ndarray
|
||||
|
||||
"""
|
||||
wl = freq2wavelength(frequency)
|
||||
return delta_f * wl / frequency
|
||||
|
||||
|
||||
def rrc(ffs, baud_rate, alpha):
|
||||
""" rrc(ffs, baud_rate, alpha): computes the root-raised cosine filter
|
||||
function.
|
||||
|
||||
:param ffs: A numpy array of frequencies
|
||||
:param baud_rate: The Baud Rate of the System
|
||||
:param alpha: The roll-off factor of the filter
|
||||
:type ffs: numpy.ndarray
|
||||
:type baud_rate: float
|
||||
:type alpha: float
|
||||
:return: hf a numpy array of the filter shape
|
||||
:rtype: numpy.ndarray
|
||||
|
||||
"""
|
||||
Ts = 1 / baud_rate
|
||||
l_lim = (1 - alpha) / (2 * Ts)
|
||||
r_lim = (1 + alpha) / (2 * Ts)
|
||||
hf = np.zeros(np.shape(ffs))
|
||||
slope_inds = np.where(
|
||||
np.logical_and(np.abs(ffs) > l_lim, np.abs(ffs) < r_lim))
|
||||
hf[slope_inds] = 0.5 * (1 + cos((pi * Ts / alpha) *
|
||||
(np.abs(ffs[slope_inds]) - l_lim)))
|
||||
p_inds = np.where(np.logical_and(np.abs(ffs) > 0, np.abs(ffs) < l_lim))
|
||||
hf[p_inds] = 1
|
||||
return sqrt(hf)
|
||||
|
||||
def merge_amplifier_restrictions(dict1, dict2):
|
||||
"""Updates contents of dicts recursively
|
||||
|
||||
>>> d1 = {'params': {'restrictions': {'preamp_variety_list': [], 'booster_variety_list': []}}}
|
||||
>>> d2 = {'params': {'target_pch_out_db': -20}}
|
||||
>>> merge_amplifier_restrictions(d1, d2)
|
||||
{'params': {'restrictions': {'preamp_variety_list': [], 'booster_variety_list': []}, 'target_pch_out_db': -20}}
|
||||
|
||||
>>> d3 = {'params': {'restrictions': {'preamp_variety_list': ['foo'], 'booster_variety_list': ['bar']}}}
|
||||
>>> merge_amplifier_restrictions(d1, d3)
|
||||
{'params': {'restrictions': {'preamp_variety_list': [], 'booster_variety_list': []}}}
|
||||
"""
|
||||
|
||||
copy_dict1 = dict1.copy()
|
||||
for key in dict2:
|
||||
if key in dict1:
|
||||
if isinstance(dict1[key], dict):
|
||||
copy_dict1[key] = merge_amplifier_restrictions(copy_dict1[key], dict2[key])
|
||||
else:
|
||||
copy_dict1[key] = dict2[key]
|
||||
return copy_dict1
|
||||
|
||||
def silent_remove(this_list, elem):
|
||||
"""Remove matching elements from a list without raising ValueError
|
||||
|
||||
>>> li = [0, 1]
|
||||
>>> li = silent_remove(li, 1)
|
||||
>>> li
|
||||
[0]
|
||||
>>> li = silent_remove(li, 1)
|
||||
>>> li
|
||||
[0]
|
||||
"""
|
||||
|
||||
try:
|
||||
this_list.remove(elem)
|
||||
except ValueError:
|
||||
pass
|
||||
return this_list
|
||||
904
gnpy/gnpy.py
904
gnpy/gnpy.py
@@ -1,904 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Top-level package for gnpy."""
|
||||
|
||||
__author__ = """<TBD>"""
|
||||
__email__ = '<TBD>@<TBD>.com'
|
||||
__version__ = '0.1.0'
|
||||
|
||||
import numpy as np
|
||||
import multiprocessing as mp
|
||||
import scipy.interpolate as interp
|
||||
|
||||
"""
|
||||
GNPy: a Python 3 implementation of the Gaussian Noise (GN) Model of nonlinear
|
||||
propagation, developed by the OptCom group, Department of Electronics and
|
||||
Telecommunications, Politecnico di Torino, Italy
|
||||
"""
|
||||
|
||||
__credits__ = ["Mattia Cantono", "Vittorio Curri", "Alessio Ferrari"]
|
||||
|
||||
|
||||
def raised_cosine_comb(f, rs, roll_off, center_freq, power):
|
||||
""" Returns an array storing the PSD of a WDM comb of raised cosine shaped
|
||||
channels at the input frequencies defined in array f
|
||||
|
||||
:param f: Array of frequencies in THz
|
||||
:param rs: Array of Symbol Rates in TBaud. One Symbol rate for each channel
|
||||
:param roll_off: Array of roll-off factors [0,1). One per channel
|
||||
:param center_freq: Array of channels central frequencies in THz. One per channel
|
||||
:param power: Array of channel powers in W. One per channel
|
||||
:return: PSD of the WDM comb evaluated over f
|
||||
"""
|
||||
ts_arr = 1.0 / rs
|
||||
passband_arr = (1.0 - roll_off) / (2.0 * ts_arr)
|
||||
stopband_arr = (1.0 + roll_off) / (2.0 * ts_arr)
|
||||
g = power / rs
|
||||
psd = np.zeros(np.shape(f))
|
||||
for ind in range(np.size(center_freq)):
|
||||
f_nch = center_freq[ind]
|
||||
g_ch = g[ind]
|
||||
ts = ts_arr[ind]
|
||||
passband = passband_arr[ind]
|
||||
stopband = stopband_arr[ind]
|
||||
ff = np.abs(f - f_nch)
|
||||
tf = ff - passband
|
||||
if roll_off[ind] == 0:
|
||||
psd = np.where(tf <= 0, g_ch, 0.) + psd
|
||||
else:
|
||||
psd = g_ch * (np.where(tf <= 0, 1., 0.) + 1.0 / 2.0 * (1 + np.cos(np.pi * ts / roll_off[ind] *
|
||||
tf)) * np.where(tf > 0, 1., 0.) *
|
||||
np.where(np.abs(ff) <= stopband, 1., 0.)) + psd
|
||||
|
||||
return psd
|
||||
|
||||
|
||||
def fwm_eff(a, Lspan, b2, ff):
|
||||
""" Computes the four-wave mixing efficiency given the fiber characteristics
|
||||
over a given frequency set ff
|
||||
:param a: Fiber loss coefficient in 1/km
|
||||
:param Lspan: Fiber length in km
|
||||
:param b2: Fiber Dispersion coefficient in ps/THz/km
|
||||
:param ff: Array of Frequency points in THz
|
||||
:return: FWM efficiency rho
|
||||
"""
|
||||
rho = np.power(np.abs((1.0 - np.exp(-2.0 * a * Lspan + 1j * 4.0 * np.pi * np.pi * b2 * Lspan * ff)) / (
|
||||
2.0 * a - 1j * 4.0 * np.pi * np.pi * b2 * ff)), 2)
|
||||
return rho
|
||||
|
||||
|
||||
def get_freqarray(f, Bopt, fmax, max_step, f_dense_low, f_dense_up, df_dense):
|
||||
""" Returns a non-uniformly spaced frequency array useful for fast GN-model.
|
||||
integration. The frequency array is made of a denser area, sided by two
|
||||
log-spaced arrays
|
||||
:param f: Central frequency at which NLI is evaluated in THz
|
||||
:param Bopt: Total optical bandwidth of the system in THz
|
||||
:param fmax: Upper limit of the integration domain in THz
|
||||
:param max_step: Maximum step size for frequency array definition in THz
|
||||
:param f_dense_low: Lower limit of denser frequency region in THz
|
||||
:param f_dense_up: Upper limit of denser frequency region in THz
|
||||
:param df_dense: Step size to be used in the denser frequency region in THz
|
||||
:return: Non uniformly defined frequency array
|
||||
"""
|
||||
f_dense = np.arange(f_dense_low, f_dense_up, df_dense)
|
||||
k = Bopt / 2.0 / (Bopt / 2.0 - max_step) # Compute Step ratio for log-spaced array definition
|
||||
if f < 0:
|
||||
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_low)) / np.log(k) + 1.0)
|
||||
f1_short = -(np.abs(f_dense_low) * np.power(k, np.arange(Nlog_short, 0.0, -1.0) - 1.0))
|
||||
k = (Bopt / 2 + (np.abs(f_dense_up) - f_dense_low)) / (Bopt / 2.0 - max_step + (np.abs(f_dense_up) - f_dense_up))
|
||||
Nlog_long = np.ceil(np.log((fmax + (np.abs(f_dense_up) - f_dense_up)) / abs(f_dense_up)) * 1.0 / np.log(k) + 1.0)
|
||||
f1_long = np.abs(f_dense_up) * np.power(k, (np.arange(1, Nlog_long + 1) - 1.0)) - (
|
||||
np.abs(f_dense_up) - f_dense_up)
|
||||
f1_array = np.concatenate([f1_short, f_dense[1:], f1_long])
|
||||
else:
|
||||
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_up)) / np.log(k) + 1.0)
|
||||
f1_short = f_dense_up * np.power(k, np.arange(1, Nlog_short + 1.0) - 1.0)
|
||||
k = (Bopt / 2.0 + (abs(f_dense_low) + f_dense_low)) / (Bopt / 2.0 - max_step + (abs(f_dense_low) + f_dense_low))
|
||||
Nlog_long = np.ceil(np.log((fmax + (np.abs(f_dense_low) + f_dense_low)) / np.abs(f_dense_low)) / np.log(k) + 1)
|
||||
f1_long = -(np.abs(f_dense_low) * np.power(k, np.arange(Nlog_long, 0, -1) - 1.0)) + (
|
||||
abs(f_dense_low) + f_dense_low)
|
||||
f1_array = np.concatenate([f1_long, f_dense[1:], f1_short])
|
||||
return f1_array
|
||||
|
||||
|
||||
def GN_integral(b2, Lspan, a_db, gam, f_ch, b_ch, roll_off, power, Nch, model_param):
|
||||
""" GN_integral computes the GN reference formula via smart brute force integration. The Gaussian Noise model is
|
||||
applied in its incoherent form (phased-array factor =1). The function computes the integral by columns: for each f1,
|
||||
a non-uniformly spaced f2 array is generated, and the integrand function is computed there. At the end of the loop
|
||||
on f1, the overall GNLI is computed. Accuracy can be tuned by operating on model_param argument.
|
||||
|
||||
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
:param Lspan: Fiber Span length in km. Scalar
|
||||
:param a_db: Fiber loss coeffiecient in dB/km. Scalar
|
||||
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param b_ch: Channels' -3 dB bandwidth. Array of size 1xNch
|
||||
:param roll_off: Channels' Roll-off factors [0,1). Array of size 1xNch
|
||||
:param power: Channels' power values in W. Array of size 1xNch
|
||||
:param Nch: Number of channels. Scalar
|
||||
:param model_param: Dictionary with model parameters for accuracy tuning
|
||||
model_param['min_FWM_inv']: Minimum FWM efficiency value to be considered for high density
|
||||
integration in dB
|
||||
model_param['n_grid']: Maximum Number of integration points to be used in each frequency slot of
|
||||
the spectrum
|
||||
model_param['n_grid_min']: Minimum Number of integration points to be used in each frequency
|
||||
slot of the spectrum
|
||||
model_param['f_array']: Frequencies at which evaluate GNLI, expressed in THz
|
||||
:return: GNLI: power spectral density in W/THz of the nonlinear interference at frequencies model_param['f_array']
|
||||
"""
|
||||
alpha_lin = a_db / 20.0 / np.log10(np.e) # Conversion in linear units 1/km
|
||||
min_FWM_inv = np.power(10, model_param['min_FWM_inv'] / 10) # Conversion in linear units
|
||||
n_grid = model_param['n_grid']
|
||||
n_grid_min = model_param['n_grid_min']
|
||||
f_array = model_param['f_array']
|
||||
fmax = (f_ch[-1] - (b_ch[-1] / 2.0)) - (f_ch[0] - (b_ch[0] / 2.0)) # Get frequency limit
|
||||
f2eval = np.max(np.diff(f_ch))
|
||||
Bopt = f2eval * Nch # Overall optical bandwidth [THz]
|
||||
min_step = f2eval / n_grid # Minimum integration step
|
||||
max_step = f2eval / n_grid_min # Maximum integration step
|
||||
f_dense_start = np.abs(
|
||||
np.sqrt(np.power(alpha_lin, 2) / (4.0 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1.0)) / f2eval)
|
||||
f_ind_eval = 0
|
||||
GNLI = np.full(f_array.size, np.nan) # Pre-allocate results
|
||||
for f in f_array: # Loop over f
|
||||
f_dense_low = f - f_dense_start
|
||||
f_dense_up = f + f_dense_start
|
||||
if f_dense_low < -fmax:
|
||||
f_dense_low = -fmax
|
||||
if f_dense_low == 0.0:
|
||||
f_dense_low = -min_step
|
||||
if f_dense_up == 0.0:
|
||||
f_dense_up = min_step
|
||||
if f_dense_up > fmax:
|
||||
f_dense_up = fmax
|
||||
f_dense_width = np.abs(f_dense_up - f_dense_low)
|
||||
n_grid_dense = np.ceil(f_dense_width / min_step)
|
||||
df = f_dense_width / n_grid_dense
|
||||
# Get non-uniformly spaced f1 array
|
||||
f1_array = get_freqarray(f, Bopt, fmax, max_step, f_dense_low, f_dense_up, df)
|
||||
G1 = raised_cosine_comb(f1_array, b_ch, roll_off, f_ch, power) # Get corresponding spectrum
|
||||
Gpart = np.zeros(f1_array.size) # Pre-allocate partial result for inner integral
|
||||
f_ind = 0
|
||||
for f1 in f1_array: # Loop over f1
|
||||
if f1 != f:
|
||||
f_lim = np.sqrt(np.power(alpha_lin, 2) / (4.0 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1.0)) / (
|
||||
f1 - f) + f
|
||||
f2_dense_up = np.maximum(f_lim, -f_lim)
|
||||
f2_dense_low = np.minimum(f_lim, -f_lim)
|
||||
if f2_dense_low == 0:
|
||||
f2_dense_low = -min_step
|
||||
if f2_dense_up == 0:
|
||||
f2_dense_up = min_step
|
||||
if f2_dense_low < -fmax:
|
||||
f2_dense_low = -fmax
|
||||
if f2_dense_up > fmax:
|
||||
f2_dense_up = fmax
|
||||
else:
|
||||
f2_dense_up = fmax
|
||||
f2_dense_low = -fmax
|
||||
f2_dense_width = np.abs(f2_dense_up - f2_dense_low)
|
||||
n2_grid_dense = np.ceil(f2_dense_width / min_step)
|
||||
df2 = f2_dense_width / n2_grid_dense
|
||||
# Get non-uniformly spaced f2 array
|
||||
f2_array = get_freqarray(f, Bopt, fmax, max_step, f2_dense_low, f2_dense_up, df2)
|
||||
f2_array = f2_array[f2_array >= f1] # Do not consider points below the bisector of quadrants I and III
|
||||
if f2_array.size > 0:
|
||||
G2 = raised_cosine_comb(f2_array, b_ch, roll_off, f_ch, power) # Get spectrum there
|
||||
f3_array = f1 + f2_array - f # Compute f3
|
||||
G3 = raised_cosine_comb(f3_array, b_ch, roll_off, f_ch, power) # Get spectrum over f3
|
||||
G = G2 * G3 * G1[f_ind]
|
||||
if np.count_nonzero(G):
|
||||
FWM_eff = fwm_eff(alpha_lin, Lspan, b2, (f1 - f) * (f2_array - f)) # Compute FWM efficiency
|
||||
Gpart[f_ind] = 2.0 * np.trapz(FWM_eff * G, f2_array) # Compute inner integral
|
||||
f_ind += 1
|
||||
# Compute outer integral. Nominal span loss already compensated
|
||||
GNLI[f_ind_eval] = 16.0 / 27.0 * gam * gam * np.trapz(Gpart, f1_array)
|
||||
f_ind_eval += 1 # Next frequency
|
||||
return GNLI # Return GNLI array in W/THz and the array of the corresponding frequencies
|
||||
|
||||
|
||||
def compute_psi(b2, l_eff_a, f_ch, channel_index, interfering_index, b_ch):
|
||||
""" compute_psi computes the psi coefficient of the analytical formula.
|
||||
|
||||
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
:param l_eff_a: Asymptotic effective length in km. Scalar
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param channel_index: Index of the channel. Scalar
|
||||
:param interfering_index: Index of the interfering signal. Scalar
|
||||
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
|
||||
:return: psi: the coefficient
|
||||
"""
|
||||
b2 = np.abs(b2)
|
||||
|
||||
if channel_index == interfering_index: # The signal interferes with itself
|
||||
b_ch_sig = b_ch[channel_index]
|
||||
psi = np.arcsinh(0.5 * np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig ** 2.0)
|
||||
else:
|
||||
f_sig = f_ch[channel_index]
|
||||
b_ch_sig = b_ch[channel_index]
|
||||
f_int = f_ch[interfering_index]
|
||||
b_ch_int = b_ch[interfering_index]
|
||||
del_f = f_sig - f_int
|
||||
psi = np.arcsinh(np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig * (del_f + 0.5 * b_ch_int))
|
||||
psi -= np.arcsinh(np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig * (del_f - 0.5 * b_ch_int))
|
||||
|
||||
return psi
|
||||
|
||||
|
||||
def analytic_formula(ind, b2, l_eff, l_eff_a, gam, f_ch, g_ch, b_ch, n_ch):
|
||||
""" analytic_formula computes the analytical formula.
|
||||
|
||||
:param ind: index of the channel at which g_nli is computed. Scalar
|
||||
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
:param l_eff: Effective length in km. Scalar
|
||||
:param l_eff_a: Asymptotic effective length in km. Scalar
|
||||
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param g_ch: Power spectral density W/THz. Array of size 1xNch
|
||||
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
|
||||
:param n_ch: Number of channels. Scalar
|
||||
:return: g_nli: power spectral density in W/THz of the nonlinear interference
|
||||
"""
|
||||
ch_psd = g_ch[ind]
|
||||
b2 = abs(b2)
|
||||
|
||||
g_nli = 0.0
|
||||
for n in np.arange(0, n_ch):
|
||||
psi = compute_psi(b2, l_eff_a, f_ch, ind, n, b_ch)
|
||||
g_nli += g_ch[n] * ch_psd ** 2.0 * psi
|
||||
|
||||
g_nli *= (16.0 / 27.0) * (gam * l_eff) ** 2.0 / (2.0 * np.pi * b2 * l_eff_a)
|
||||
|
||||
return g_nli
|
||||
|
||||
|
||||
def gn_analytic(b2, l_span, a_db, gam, f_ch, b_ch, power, n_ch):
|
||||
""" gn_analytic computes the GN reference formula via analytical solution.
|
||||
|
||||
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
:param l_span: Fiber Span length in km. Scalar
|
||||
:param a_db: Fiber loss coefficient in dB/km. Scalar
|
||||
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
|
||||
:param power: Channels' power values in W. Array of size 1xNch
|
||||
:param n_ch: Number of channels. Scalar
|
||||
:return: g_nli: power spectral density in W/THz of the nonlinear interference at frequencies model_param['f_array']
|
||||
"""
|
||||
g_ch = power / b_ch
|
||||
alpha_lin = a_db / 20.0 / np.log10(np.e) # Conversion in linear units 1/km
|
||||
l_eff = (1.0 - np.exp(-2.0 * alpha_lin * l_span)) / (2.0 * alpha_lin) # Effective length
|
||||
l_eff_a = 1.0 / (2.0 * alpha_lin) # Asymptotic effective length
|
||||
g_nli = np.zeros(f_ch.size)
|
||||
for ind in np.arange(0, f_ch.size):
|
||||
g_nli[ind] = analytic_formula(ind, b2, l_eff, l_eff_a, gam, f_ch, g_ch, b_ch, n_ch)
|
||||
|
||||
return g_nli
|
||||
|
||||
|
||||
def get_f_computed_interp(f_ch, n_not_interp):
|
||||
""" get_f_computed_array returns the arrays containing the frequencies at which g_nli is computed and interpolated.
|
||||
|
||||
:param f_ch: the overall frequency array. Array of size 1xnum_ch
|
||||
:param n_not_interp: the number of points at which g_nli has to be computed
|
||||
:return: f_nli_comp: the array containing the frequencies at which g_nli is computed
|
||||
:return: f_nli_interp: the array containing the frequencies at which g_nli is interpolated
|
||||
"""
|
||||
num_ch = len(f_ch)
|
||||
if num_ch < n_not_interp: # It's useless to compute g_nli in a number of points larger than num_ch
|
||||
n_not_interp = num_ch
|
||||
|
||||
# Compute f_nli_comp
|
||||
n_not_interp_left = np.ceil((n_not_interp - 1.0) / 2.0)
|
||||
n_not_interp_right = np.floor((n_not_interp - 1.0) / 2.0)
|
||||
central_index = len(f_ch) // 2
|
||||
print(central_index)
|
||||
|
||||
f_nli_central = np.array([f_ch[central_index]], copy=True)
|
||||
|
||||
if n_not_interp_left > 0:
|
||||
index = np.linspace(0, central_index - 1, n_not_interp_left, dtype='int')
|
||||
f_nli_left = np.array(f_ch[index], copy=True)
|
||||
else:
|
||||
f_nli_left = np.array([])
|
||||
|
||||
if n_not_interp_right > 0:
|
||||
index = np.linspace(-1, -central_index, n_not_interp_right, dtype='int')
|
||||
f_nli_right = np.array(f_ch[index], copy=True)
|
||||
f_nli_right = f_nli_right[::-1] # Reverse the order of the array
|
||||
else:
|
||||
f_nli_right = np.array([])
|
||||
|
||||
f_nli_comp = np.concatenate([f_nli_left, f_nli_central, f_nli_right])
|
||||
|
||||
# Compute f_nli_interp
|
||||
f_ch_sorted = np.sort(f_ch)
|
||||
index = np.searchsorted(f_ch_sorted, f_nli_comp)
|
||||
|
||||
f_nli_interp = np.array(f_ch, copy=True)
|
||||
f_nli_interp = np.delete(f_nli_interp, index)
|
||||
return f_nli_comp, f_nli_interp
|
||||
|
||||
|
||||
def interpolate_in_range(x, y, x_new, kind_interp):
|
||||
""" Given some samples y of the function y(x), interpolate_in_range returns the interpolation of values y(x_new)
|
||||
|
||||
:param x: The points at which y(x) is evaluated. Array
|
||||
:param y: The values of y(x). Array
|
||||
:param x_new: The values at which y(x) has to be interpolated. Array
|
||||
:param kind_interp: The interpolation method of the function scipy.interpolate.interp1d. String
|
||||
:return: y_new: the new interpolates samples
|
||||
"""
|
||||
if x.size == 1:
|
||||
y_new = y * np.ones(x_new.size)
|
||||
elif x.size == 2:
|
||||
x = np.append(x, x_new[-1])
|
||||
y = np.append(y, y[-1])
|
||||
func = interp.interp1d(x, y, kind=kind_interp, bounds_error=False)
|
||||
y_new = func(x_new)
|
||||
else:
|
||||
func = interp.interp1d(x, y, kind=kind_interp, bounds_error=False)
|
||||
y_new = func(x_new)
|
||||
|
||||
return y_new
|
||||
|
||||
|
||||
def gn_model(spectrum_param, fiber_param, accuracy_param, n_cores):
|
||||
""" gn_model can compute the gn model both analytically or through the smart brute force
|
||||
integral.
|
||||
|
||||
:param spectrum_param: Dictionary with spectrum parameters
|
||||
spectrum_param['num_ch']: Number of channels. Scalar
|
||||
spectrum_param['f_ch']: Baseband channels center frequencies in THz. Array of size 1xnum_ch
|
||||
spectrum_param['b_ch']: Channels' -3 dB band [THz]. Array of size 1xnum_ch
|
||||
spectrum_param['roll_off']: Channels' Roll-off factors [0,1). Array of size 1xnum_ch
|
||||
spectrum_param['power']: Channels' power values in W. Array of size 1xnum_ch
|
||||
:param fiber_param: Dictionary with the parameters of the fiber
|
||||
fiber_param['alpha']: Fiber loss coefficient in dB/km. Scalar
|
||||
fiber_param['span_length']: Fiber Span length in km. Scalar
|
||||
fiber_param['beta_2']: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
fiber_param['gamma']: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param accuracy_param: Dictionary with model parameters for accuracy tuning
|
||||
accuracy_param['is_analytic']: A boolean indicating if you want to compute the NLI through
|
||||
the analytic formula (is_analytic = True) of the smart brute force integration (is_analytic =
|
||||
False). Boolean
|
||||
accuracy_param['points_not_interp']: The number of NLI which will be calculated. Others are
|
||||
interpolated
|
||||
accuracy_param['kind_interp']: The kind of interpolation using the function
|
||||
scipy.interpolate.interp1d
|
||||
accuracy_param['th_fwm']: Minimum FWM efficiency value to be considered for high density
|
||||
integration in dB
|
||||
accuracy_param['n_points']: Maximum Number of integration points to be used in each frequency
|
||||
slot of the spectrum
|
||||
accuracy_param['n_points_min']: Minimum Number of integration points to be used in each
|
||||
frequency
|
||||
slot of the spectrum
|
||||
:return: g_nli_comp: the NLI power spectral density in W/THz computed through GN model
|
||||
:return: f_nli_comp: the frequencies at which g_nli_comp is evaluated
|
||||
:return: g_nli_interp: the NLI power spectral density in W/THz computed through interpolation of g_nli_comp
|
||||
:return: f_nli_interp: the frequencies at which g_nli_interp is estimated
|
||||
"""
|
||||
# Take signal parameters
|
||||
num_ch = spectrum_param['num_ch']
|
||||
f_ch = spectrum_param['f_ch']
|
||||
b_ch = spectrum_param['b_ch']
|
||||
roll_off = spectrum_param['roll_off']
|
||||
power = spectrum_param['power']
|
||||
|
||||
# Take fiber parameters
|
||||
a_db = fiber_param['alpha']
|
||||
l_span = fiber_param['span_length']
|
||||
beta2 = fiber_param['beta_2']
|
||||
gam = fiber_param['gamma']
|
||||
|
||||
# Take accuracy parameters
|
||||
is_analytic = accuracy_param['is_analytic']
|
||||
n_not_interp = accuracy_param['points_not_interp']
|
||||
kind_interp = accuracy_param['kind_interp']
|
||||
th_fwm = accuracy_param['th_fwm']
|
||||
n_points = accuracy_param['n_points']
|
||||
n_points_min = accuracy_param['n_points_min']
|
||||
|
||||
# Computing NLI
|
||||
if is_analytic: # Analytic solution
|
||||
g_nli_comp = gn_analytic(beta2, l_span, a_db, gam, f_ch, b_ch, power, num_ch)
|
||||
f_nli_comp = np.copy(f_ch)
|
||||
g_nli_interp = []
|
||||
f_nli_interp = []
|
||||
else: # Smart brute force integration
|
||||
f_nli_comp, f_nli_interp = get_f_computed_interp(f_ch, n_not_interp)
|
||||
|
||||
model_param = {'min_FWM_inv': th_fwm, 'n_grid': n_points, 'n_grid_min': n_points_min,
|
||||
'f_array': np.array(f_nli_comp, copy=True)}
|
||||
|
||||
g_nli_comp = GN_integral(beta2, l_span, a_db, gam, f_ch, b_ch, roll_off, power, num_ch, model_param)
|
||||
|
||||
# Interpolation
|
||||
g_nli_interp = interpolate_in_range(f_nli_comp, g_nli_comp, f_nli_interp, kind_interp)
|
||||
|
||||
a_zero = fiber_param['alpha'] * fiber_param['span_length']
|
||||
a_tilting = fiber_param['alpha_1st'] * fiber_param['span_length']
|
||||
|
||||
attenuation_db_comp = compute_attenuation_profile(a_zero, a_tilting, f_nli_comp)
|
||||
attenuation_lin_comp = 10 ** (-abs(attenuation_db_comp) / 10)
|
||||
|
||||
g_nli_comp *= attenuation_lin_comp
|
||||
|
||||
attenuation_db_interp = compute_attenuation_profile(a_zero, a_tilting, f_nli_interp)
|
||||
attenuation_lin_interp = 10 ** (-np.abs(attenuation_db_interp) / 10)
|
||||
|
||||
g_nli_interp *= attenuation_lin_interp
|
||||
|
||||
return g_nli_comp, f_nli_comp, g_nli_interp, f_nli_interp
|
||||
|
||||
|
||||
def compute_gain_profile(gain_zero, gain_tilting, freq):
|
||||
""" compute_gain_profile evaluates the gain at the frequencies freq.
|
||||
|
||||
:param gain_zero: the gain at f=0 in dB. Scalar
|
||||
:param gain_tilting: the gain tilt in dB/THz. Scalar
|
||||
:param freq: the baseband frequencies at which the gain profile is computed in THz. Array
|
||||
:return: gain: the gain profile in dB
|
||||
"""
|
||||
gain = gain_zero + gain_tilting * freq
|
||||
return gain
|
||||
|
||||
|
||||
def compute_ase_noise(noise_fig, gain, central_freq, freq):
|
||||
""" compute_ase_noise evaluates the ASE spectral density at the frequencies freq.
|
||||
|
||||
:param noise_fig: the amplifier noise figure in dB. Scalar
|
||||
:param gain: the gain profile in dB at the frequencies contained in freq array. Array
|
||||
:param central_freq: the central frequency of the WDM comb. Scalar
|
||||
:param freq: the baseband frequencies at which the ASE noise is computed in THz. Array
|
||||
:return: g_ase: the ase noise profile
|
||||
"""
|
||||
# the Planck constant in W/THz^2
|
||||
planck = (6.62607004 * 1e-34) * 1e24
|
||||
|
||||
# Conversion from dB to linear
|
||||
gain_lin = np.power(10, gain / 10.0)
|
||||
noise_fig_lin = np.power(10, noise_fig / 10.0)
|
||||
|
||||
g_ase = (gain_lin - 1) * noise_fig_lin * planck * (central_freq + freq)
|
||||
return g_ase
|
||||
|
||||
|
||||
def compute_edfa_profile(gain_zero, gain_tilting, noise_fig, central_freq, freq):
|
||||
""" compute_edfa_profile evaluates the gain profile and the ASE spectral density at the frequencies freq.
|
||||
|
||||
:param gain_zero: the gain at f=0 in dB. Scalar
|
||||
:param gain_tilting: the gain tilt in dB/THz. Scalar
|
||||
:param noise_fig: the amplifier noise figure in dB. Scalar
|
||||
:param central_freq: the central frequency of the WDM comb. Scalar
|
||||
:param freq: the baseband frequencies at which the ASE noise is computed in THz. Array
|
||||
:return: gain: the gain profile in dB
|
||||
:return: g_ase: the ase noise profile in W/THz
|
||||
"""
|
||||
gain = compute_gain_profile(gain_zero, gain_tilting, freq)
|
||||
g_ase = compute_ase_noise(noise_fig, gain, central_freq, freq)
|
||||
|
||||
return gain, g_ase
|
||||
|
||||
|
||||
def compute_attenuation_profile(a_zero, a_tilting, freq):
|
||||
"""compute_attenuation_profile returns the attenuation profile at the frequencies freq
|
||||
|
||||
:param a_zero: the attenuation [dB] @ the baseband central frequency. Scalar
|
||||
:param a_tilting: the attenuation tilt in dB/THz. Scalar
|
||||
:param freq: the baseband frequencies at which attenuation is computed [THz]. Array
|
||||
:return: attenuation: the attenuation profile in dB
|
||||
"""
|
||||
|
||||
if len(freq):
|
||||
attenuation = a_zero + a_tilting * freq
|
||||
|
||||
# abs in order to avoid ambiguity due to the sign convention
|
||||
attenuation = abs(attenuation)
|
||||
else:
|
||||
attenuation = []
|
||||
|
||||
return attenuation
|
||||
|
||||
|
||||
def passive_component(spectrum, a_zero, a_tilting, freq):
|
||||
"""passive_component updates the input spectrum with the attenuation described by a_zero and a_tilting
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param a_zero: attenuation at the central frequency [dB]. Scalar
|
||||
:param a_tilting: attenuation tilting [dB/THz]. Scalar
|
||||
:param freq: the baseband frequency of each WDM channel [THz]. Array
|
||||
:return: None
|
||||
"""
|
||||
attenuation_db = compute_attenuation_profile(a_zero, a_tilting, freq)
|
||||
attenuation_lin = 10 ** np.divide(-abs(attenuation_db), 10.0)
|
||||
|
||||
for index, s in enumerate(spectrum['signals']):
|
||||
spectrum['signals'][index]['p_ch'] *= attenuation_lin[index]
|
||||
spectrum['signals'][index]['p_nli'] *= attenuation_lin[index]
|
||||
spectrum['signals'][index]['p_ase'] *= attenuation_lin[index]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def optical_amplifier(spectrum, gain_zero, gain_tilting, noise_fig, central_freq, freq, b_eq):
|
||||
"""optical_amplifier updates the input spectrum with the gain described by gain_zero and gain_tilting plus ASE noise
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param gain_zero: gain at the central frequency [dB]. Scalar
|
||||
:param gain_tilting: gain tilting [dB/THz]. Scalar
|
||||
:param noise_fig: the noise figure of the amplifier [dB]. Scalar
|
||||
:param central_freq: the central frequency of the optical band [THz]. Scalar
|
||||
:param freq: the central frequency of each WDM channel [THz]. Array
|
||||
:param b_eq: the equivalent -3 dB bandwidth of each WDM channel [THZ]. Array
|
||||
:return: None
|
||||
"""
|
||||
|
||||
gain_db, g_ase = compute_edfa_profile(gain_zero, gain_tilting, noise_fig, central_freq, freq)
|
||||
|
||||
p_ase = np.multiply(g_ase, b_eq)
|
||||
|
||||
gain_lin = 10 ** np.divide(gain_db, 10.0)
|
||||
|
||||
for index, s in enumerate(spectrum['signals']):
|
||||
spectrum['signals'][index]['p_ch'] *= gain_lin[index]
|
||||
spectrum['signals'][index]['p_nli'] *= gain_lin[index]
|
||||
spectrum['signals'][index]['p_ase'] *= gain_lin[index]
|
||||
spectrum['signals'][index]['p_ase'] += p_ase[index]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def fiber(spectrum, fiber_param, fiber_length, f_ch, b_ch, roll_off, control_param):
|
||||
""" fiber updates spectrum with the effects of the fiber
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param fiber_param: Dictionary with the parameters of the fiber
|
||||
fiber_param['alpha']: Fiber loss coeffiecient in dB/km. Scalar
|
||||
fiber_param['beta_2']: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
fiber_param['n_2']: second-order nonlinear refractive index [m^2/W]. Scalar
|
||||
fiber_param['a_eff']: the effective area of the fiber [um^2]. Scalar
|
||||
:param fiber_length: the span length [km]. Scalar
|
||||
:param f_ch: the baseband frequencies of the WDM channels [THz]. Scalar
|
||||
:param b_ch: the -3 dB bandwidth of each WDM channel [THz]. Array
|
||||
:param roll_off: the roll off of each WDM channel. Array
|
||||
:param control_param: Dictionary with the control parameters
|
||||
control_param['save_each_comp']: a boolean flag. If true, it saves in output folder one spectrum file at
|
||||
the output of each component, otherwise it saves just the last spectrum. Boolean
|
||||
control_param['is_linear']: a bool flag. If true, is doesn't compute NLI, if false, OLE will consider
|
||||
NLI. Boolean
|
||||
control_param['is_analytic']: a boolean flag. If true, the NLI is computed through the analytic
|
||||
formula, otherwise it uses the double integral. Warning: the double integral is very slow. Boolean
|
||||
control_param['points_not_interp']: if the double integral is used, it indicates how much points are
|
||||
calculated, others will be interpolated. Scalar
|
||||
control_param['kind_interp']: the interpolation method when double integral is used. String
|
||||
control_param['th_fwm']: he threshold of the four wave mixing efficiency for the double integral. Scalar
|
||||
control_param['n_points']: number of points in the high FWM efficiency region in which the double
|
||||
integral is computed. Scalar
|
||||
control_param['n_points_min']: number of points in which the double integral is computed in the low FWM
|
||||
efficiency region. Scalar
|
||||
control_param['n_cores']: number of cores for parallel computation [not yet implemented]. Scalar
|
||||
:return: None
|
||||
"""
|
||||
|
||||
n_cores = control_param['n_cores']
|
||||
|
||||
# Evaluation of NLI
|
||||
if not control_param['is_linear']:
|
||||
num_ch = len(spectrum['signals'])
|
||||
spectrum_param = {
|
||||
'num_ch': num_ch,
|
||||
'f_ch': f_ch,
|
||||
'b_ch': b_ch,
|
||||
'roll_off': roll_off
|
||||
}
|
||||
|
||||
p_ch = np.zeros(num_ch)
|
||||
for index, signal in enumerate(spectrum['signals']):
|
||||
p_ch[index] = signal['p_ch']
|
||||
|
||||
spectrum_param['power'] = p_ch
|
||||
fiber_param['span_length'] = fiber_length
|
||||
|
||||
nli_cmp, f_nli_cmp, nli_int, f_nli_int = gn_model(spectrum_param, fiber_param, control_param, n_cores)
|
||||
f_nli = np.concatenate((f_nli_cmp, f_nli_int))
|
||||
order = np.argsort(f_nli)
|
||||
g_nli = np.concatenate((nli_cmp, nli_int))
|
||||
g_nli = np.array(g_nli)[order]
|
||||
|
||||
p_nli = np.multiply(g_nli, b_ch)
|
||||
|
||||
a_zero = fiber_param['alpha'] * fiber_length
|
||||
a_tilting = fiber_param['alpha_1st'] * fiber_length
|
||||
|
||||
# Apply attenuation
|
||||
passive_component(spectrum, a_zero, a_tilting, f_ch)
|
||||
|
||||
# Apply NLI
|
||||
if not control_param['is_linear']:
|
||||
for index, s in enumerate(spectrum['signals']):
|
||||
spectrum['signals'][index]['p_nli'] += p_nli[index]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_frequencies_wdm(spectrum, sys_param):
|
||||
""" the function computes the central frequency of the WDM comb and the frequency of each channel.
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param sys_param: a dictionary containing the system parameters:
|
||||
'f0': the starting frequency, i.e the frequency of the first spectral slot [THz]
|
||||
'ns': the number of spectral slots. The space between two slots is 6.25 GHz
|
||||
:return: f_cent: the central frequency of the WDM comb [THz]
|
||||
:return: f_ch: the baseband frequency of each WDM channel [THz]
|
||||
"""
|
||||
|
||||
delta_f = 6.25E-3
|
||||
# Evaluate the central frequency
|
||||
f0 = sys_param['f0']
|
||||
ns = sys_param['ns']
|
||||
|
||||
f_cent = f0 + ((ns // 2.0) * delta_f)
|
||||
|
||||
# Evaluate the baseband frequencies
|
||||
n_ch = spectrum['laser_position'].count(1)
|
||||
f_ch = np.zeros(n_ch)
|
||||
count = 0
|
||||
for index, bool_laser in enumerate(spectrum['laser_position']):
|
||||
if bool_laser:
|
||||
f_ch[count] = (f0 - f_cent) + delta_f * index
|
||||
count += 1
|
||||
|
||||
return f_cent, f_ch
|
||||
|
||||
|
||||
def get_spectrum_param(spectrum):
|
||||
""" the function returns the number of WDM channels and 3 arrays containing the power, the equivalent bandwidth
|
||||
and the roll off of each WDM channel.
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:return: power: the power of each WDM channel [W]
|
||||
:return: b_eq: the equivalent bandwidth of each WDM channel [THz]
|
||||
:return: roll_off: the roll off of each WDM channel
|
||||
:return: p_ase: the power of the ASE noise [W]
|
||||
:return: p_nli: the power of NLI [W]
|
||||
:return: n_ch: the number of WDM channels
|
||||
"""
|
||||
|
||||
n_ch = spectrum['laser_position'].count(1)
|
||||
roll_off = np.zeros(n_ch)
|
||||
b_eq = np.zeros(n_ch)
|
||||
power = np.zeros(n_ch)
|
||||
p_ase = np.zeros(n_ch)
|
||||
p_nli = np.zeros(n_ch)
|
||||
for index, signal in enumerate(spectrum['signals']):
|
||||
b_eq[index] = signal['b_ch']
|
||||
roll_off[index] = signal['roll_off']
|
||||
power[index] = signal['p_ch']
|
||||
p_ase[index] = signal['p_ase']
|
||||
p_nli[index] = signal['p_nli']
|
||||
|
||||
return power, b_eq, roll_off, p_ase, p_nli, n_ch
|
||||
|
||||
|
||||
def change_component_ref(f_ref, link, fibers):
|
||||
""" it updates the reference frequency of OA gain, PC attenuation and fiber attenuation coefficient
|
||||
|
||||
:param f_ref: the new reference frequency [THz]. Scalar
|
||||
:param link: the link structure. A list in which each element indicates one link component (PC, OA or fiber). List
|
||||
:param fibers: a dictionary containing the description of each fiber type. Dictionary
|
||||
:return: None
|
||||
"""
|
||||
|
||||
light_speed = 3e8 # [m/s]
|
||||
|
||||
# Change reference to the central frequency f_cent for OA and PC
|
||||
for index, component in enumerate(link):
|
||||
if component['comp_cat'] is 'PC':
|
||||
|
||||
old_loss = component['loss']
|
||||
delta_loss = component['loss_tlt']
|
||||
old_ref = component['ref_freq']
|
||||
new_loss = old_loss + delta_loss * (f_ref - old_ref)
|
||||
|
||||
link[index]['ref_freq'] = f_ref
|
||||
link[index]['loss'] = new_loss
|
||||
|
||||
elif component['comp_cat'] is 'OA':
|
||||
|
||||
old_gain = component['gain']
|
||||
delta_gain = component['gain_tlt']
|
||||
old_ref = component['ref_freq']
|
||||
new_gain = old_gain + delta_gain * (f_ref - old_ref)
|
||||
|
||||
link[index]['ref_freq'] = f_ref
|
||||
link[index]['gain'] = new_gain
|
||||
|
||||
elif not component['comp_cat'] is 'fiber':
|
||||
|
||||
error_string = 'Error in link structure: the ' + str(index+1) + '-th component have unknown category \n'\
|
||||
+ 'allowed values are (case sensitive): PC, OA and fiber'
|
||||
print(error_string)
|
||||
|
||||
# Change reference to the central frequency f_cent for fiber
|
||||
for fib_type in fibers:
|
||||
old_ref = fibers[fib_type]['reference_frequency']
|
||||
old_alpha = fibers[fib_type]['alpha']
|
||||
alpha_1st = fibers[fib_type]['alpha_1st']
|
||||
new_alpha = old_alpha + alpha_1st * (f_ref - old_ref)
|
||||
|
||||
fibers[fib_type]['reference_frequency'] = f_ref
|
||||
fibers[fib_type]['alpha'] = new_alpha
|
||||
|
||||
fibers[fib_type]['gamma'] = (2 * np.pi) * (f_ref / light_speed) * \
|
||||
(fibers[fib_type]['n_2'] / fibers[fib_type]['a_eff']) * 1e27
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def compute_and_save_osnr(spectrum, flag_save=False, file_name='00', output_path='./output/'):
|
||||
""" Given the spectrum structure, the function returns the linear and non linear OSNR. If the boolean variable
|
||||
flag_save is true, the function also saves the osnr values for the central channel, the osnr for each channel and
|
||||
spectrum in a file with the name file_name, in the folder indicated by output_path
|
||||
|
||||
:param spectrum: the spectrum dictionary containing the laser position (a list of boolean) and the list signals,
|
||||
which is a list of dictionaries (one for each channel) containing:
|
||||
'b_ch': the -3 dB bandwidth of the signal [THz]
|
||||
'roll_off': the roll off of the signal
|
||||
'p_ch': the signal power [W]
|
||||
'p_nli': the equivalent nli power [W]
|
||||
'p_ase': the ASE noise [W]
|
||||
:param flag_save: if True it saves all the data, otherwise it doesn't
|
||||
:param file_name: the name of the file in which the variables are saved
|
||||
:param output_path: the path in which you want to save the file
|
||||
:return: osnr_lin_db: the linear OSNR [dB]
|
||||
:return: osnr_nli_db: the non-linear equivalent OSNR (in linear units, NOT in [dB]
|
||||
"""
|
||||
|
||||
# Get the parameters from spectrum
|
||||
p_ch, b_eq, roll_off, p_ase, p_nli, n_ch = get_spectrum_param(spectrum)
|
||||
|
||||
# Compute the linear OSNR
|
||||
if (p_ase == 0).any():
|
||||
osnr_lin = np.zeros(n_ch)
|
||||
for index, p_noise in enumerate(p_ase):
|
||||
if p_noise == 0:
|
||||
osnr_lin[index] = float('inf')
|
||||
else:
|
||||
osnr_lin[index] = p_ch[index] / p_noise
|
||||
|
||||
else:
|
||||
osnr_lin = np.divide(p_ch, p_ase)
|
||||
|
||||
# Compute the non-linear OSNR
|
||||
if ((p_ase + p_nli) == 0).any():
|
||||
osnr_nli = np.zeros(n_ch)
|
||||
for index, p_noise in enumerate(p_ase + p_nli):
|
||||
|
||||
if p_noise == 0:
|
||||
osnr_nli[index] = float('inf')
|
||||
else:
|
||||
osnr_nli[index] = p_ch[index] / p_noise
|
||||
else:
|
||||
osnr_nli = np.divide(p_ch, p_ase + p_nli)
|
||||
|
||||
# Compute linear and non linear OSNR for the central channel
|
||||
ind_c = n_ch // 2
|
||||
osnr_lin_central_channel_db = 10 * np.log10(osnr_lin[ind_c])
|
||||
osnr_nl_central_channel_db = 10 * np.log10(osnr_nli[ind_c])
|
||||
|
||||
# Conversion in dB
|
||||
osnr_lin_db = 10 * np.log10(osnr_lin)
|
||||
osnr_nli_db = 10 * np.log10(osnr_nli)
|
||||
|
||||
# Save spectrum, the non linear OSNR and the linear OSNR
|
||||
out_fle_name = output_path + file_name
|
||||
|
||||
if flag_save:
|
||||
|
||||
f = open(out_fle_name, 'w')
|
||||
f.write(''.join(('# Output parameters. The values of OSNR are evaluated in the -3 dB channel band', '\n\n')))
|
||||
f.write(''.join(('osnr_lin_central_channel_db = ', str(osnr_lin_central_channel_db), '\n\n')))
|
||||
f.write(''.join(('osnr_nl_central_channel_db = ', str(osnr_nl_central_channel_db), '\n\n')))
|
||||
f.write(''.join(('osnr_lin_db = ', str(osnr_lin_db), '\n\n')))
|
||||
f.write(''.join(('osnr_nl_db = ', str(osnr_nli_db), '\n\n')))
|
||||
f.write(''.join(('spectrum = ', str(spectrum), '\n')))
|
||||
|
||||
f.close()
|
||||
|
||||
return osnr_nli_db, osnr_lin_db
|
||||
|
||||
|
||||
def ole(spectrum, link, fibers, sys_param, control_param, output_path='./output/'):
|
||||
""" The function takes the input spectrum, the link description, the fiber description, the system parameters,
|
||||
the control parameters and a string describing the destination folder of the output files. After the function is
|
||||
executed the spectrum is updated with all the impairments of the link. The function also returns the linear and
|
||||
non linear OSNR, computed in the equivalent bandwidth.
|
||||
|
||||
:param spectrum: the spectrum dictionary containing the laser position (a list of boolean) and the list signals,
|
||||
which is a list of dictionaries (one for each channel) containing:
|
||||
'b_ch': the -3 dB bandwidth of the signal [THz]
|
||||
'roll_off': the roll off of the signal
|
||||
'p_ch': the signal power [W]
|
||||
'p_nli': the equivalent nli power [W]
|
||||
'p_ase': the ASE noise [W]
|
||||
:param link: the link structure. A list in which each element is a dictionary and it indicates one link component
|
||||
(PC, OA or fiber). List
|
||||
:param fibers: fibers is a dictionary containing a dictionary for each kind of fiber. Each dictionary has to report:
|
||||
reference_frequency: the frequency at which the parameters are evaluated [THz]
|
||||
alpha: the attenuation coefficient [dB/km]
|
||||
alpha_1st: the first derivative of alpha indicating the alpha slope [dB/km/THz]
|
||||
if you assume a flat attenuation with respect to the frequency you put it as zero
|
||||
beta_2: the dispersion coefficient [ps^2/km]
|
||||
n_2: second-order nonlinear refractive index [m^2/W]
|
||||
a typical value is 2.5E-20 m^2/W
|
||||
a_eff: the effective area of the fiber [um^2]
|
||||
:param sys_param: a dictionary containing the general system parameters:
|
||||
f0: the starting frequency of the laser grid used to describe the WDM system
|
||||
ns: the number of 6.25 GHz slots in the grid
|
||||
:param control_param: a dictionary containing the following parameters:
|
||||
save_each_comp: a boolean flag. If true, it saves in output folder one spectrum file at the output of each
|
||||
component, otherwise it saves just the last spectrum
|
||||
is_linear: a bool flag. If true, is doesn't compute NLI, if false, OLE will consider NLI
|
||||
is_analytic: a boolean flag. If true, the NLI is computed through the analytic formula, otherwise it uses
|
||||
the double integral. Warning: the double integral is very slow.
|
||||
points_not_interp: if the double integral is used, it indicates how much points are calculated, others will
|
||||
be interpolated
|
||||
kind_interp: a string indicating the interpolation method for the double integral
|
||||
th_fwm: the threshold of the four wave mixing efficiency for the double integral
|
||||
n_points: number of points in which the double integral is computed in the high FWM efficiency region
|
||||
n_points_min: number of points in which the double integral is computed in the low FWM efficiency region
|
||||
n_cores: number of cores for parallel computation [not yet implemented]
|
||||
:param output_path: the path in which the output files are saved. String
|
||||
:return: osnr_nli_db: an array containing the non-linear OSNR [dB], one value for each WDM channel. Array
|
||||
:return: osnr_lin_db: an array containing the linear OSNR [dB], one value for each WDM channel. Array
|
||||
"""
|
||||
|
||||
# Take control parameters
|
||||
flag_save_each_comp = control_param['save_each_comp']
|
||||
|
||||
# Evaluate frequency parameters
|
||||
f_cent, f_ch = get_frequencies_wdm(spectrum, sys_param)
|
||||
|
||||
# Evaluate spectrum parameters
|
||||
power, b_eq, roll_off, p_ase, p_nli, n_ch = get_spectrum_param(spectrum)
|
||||
|
||||
# Change reference to the central frequency f_cent for OA, PC and fibers
|
||||
change_component_ref(f_cent, link, fibers)
|
||||
|
||||
# Emulate the link
|
||||
for component in link:
|
||||
if component['comp_cat'] is 'PC':
|
||||
a_zero = component['loss']
|
||||
a_tilting = component['loss_tlt']
|
||||
|
||||
passive_component(spectrum, a_zero, a_tilting, f_ch)
|
||||
|
||||
elif component['comp_cat'] is 'OA':
|
||||
gain_zero = component['gain']
|
||||
gain_tilting = component['gain_tlt']
|
||||
noise_fig = component['noise_figure']
|
||||
|
||||
optical_amplifier(spectrum, gain_zero, gain_tilting, noise_fig, f_cent, f_ch, b_eq)
|
||||
|
||||
elif component['comp_cat'] is 'fiber':
|
||||
fiber_type = component['fiber_type']
|
||||
fiber_param = fibers[fiber_type]
|
||||
fiber_length = component['length']
|
||||
|
||||
fiber(spectrum, fiber_param, fiber_length, f_ch, b_eq, roll_off, control_param)
|
||||
|
||||
else:
|
||||
error_string = 'Error in link structure: the ' + component['comp_cat'] + ' category is unknown \n' \
|
||||
+ 'allowed values are (case sensitive): PC, OA and fiber'
|
||||
print(error_string)
|
||||
|
||||
if flag_save_each_comp:
|
||||
f_name = 'Output from component ID #' + component['comp_id']
|
||||
osnr_nli_db, osnr_lin_db = \
|
||||
compute_and_save_osnr(spectrum, flag_save=True, file_name=f_name, output_path=output_path)
|
||||
|
||||
osnr_nli_db, osnr_lin_db = \
|
||||
compute_and_save_osnr(spectrum, flag_save=True, file_name='link_output', output_path=output_path)
|
||||
|
||||
return osnr_nli_db, osnr_lin_db
|
||||
@@ -1,29 +0,0 @@
|
||||
# coding=utf-8
|
||||
""" spectrum_in.py describes the input spectrum of OLE, i.e. spectrum.
|
||||
spectrum is a dictionary containing two fields:
|
||||
laser_position: a list of bool indicating if a laser is turned on or not
|
||||
signals: a list of dictionaries each of them, describing one channel in the WDM comb
|
||||
|
||||
The laser_position is defined respect to a frequency grid of 6.25 GHz space and the first slot is at the
|
||||
frequency described by the variable f0 in the dictionary sys_param in the file "general_parameters.py"
|
||||
|
||||
Each dictionary element of the list 'signals' describes the profile of a WDM channel:
|
||||
b_ch: the -3 dB channel bandwidth (for a root raised cosine, it is equal to the symbol rate)
|
||||
roll_off: the roll off parameter of the root raised cosine shape
|
||||
p_ch: the channel power [W]
|
||||
p_nli: power of accumulated NLI in b_ch [W]
|
||||
p_ase: power of accumulated ASE noise in b_ch [W]
|
||||
"""
|
||||
|
||||
n_ch = 41
|
||||
|
||||
spectrum = {
|
||||
'laser_position': [0, 0, 0, 1, 0, 0, 0, 0] * n_ch,
|
||||
'signals': [{
|
||||
'b_ch': 0.032,
|
||||
'roll_off': 0.15,
|
||||
'p_ch': 1E-3,
|
||||
'p_nli': 0,
|
||||
'p_ase': 0
|
||||
} for _ in range(n_ch)]
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user