Description
orchagent process gets killed when setting ecnconfig which causes ECN_WRED test failed
Steps to reproduce the issue:
-
Check orchagent process information
root@DUT:~# pgrep orchagent -a
5588 /usr/bin/orchagent -d /var/log/swss -b 8192 -m a4:8c:db:b9:b3:00
-
Try to set min threshold for packets marked "red"
root@DUT:~# ecnconfig -p AZURE_LOSSY -rmin 491520
-
Check orchagent process information again
root@DUT:~# pgrep orchagent -a
root@DUT:~# ===========> The orchagent was killed somehow
-
Show enc config
root@str-ne10032-01:~# ecnconfig -l
Profile: AZURE_LOSSY
red_min_threshold 491520
Describe the results you received:
Describe the results you expected:
Can someone tell me the reason of why orchagent stops in this case?
Thanks,
Additional information you deem important:
cat /var/log/swss/swss.rec
....
2018-05-02.21:14:16.682091|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:19.690266|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:22.694087|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:25.702088|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:28.710219|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:30.625925|NEIGH_TABLE:PortChannel0004:fc00::7e|SET|neigh:52:54:00:33:76:bf|family:IPv6
2018-05-02.21:14:31.714792|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:34.587346|NEIGH_TABLE:PortChannel0001:fc00::72|SET|neigh:52:54:00:e0:98:f2|family:IPv6
2018-05-02.21:14:34.718782|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:35.098055|NEIGH_TABLE:PortChannel0003:fc00::7a|SET|neigh:52:54:00:db:a6:dc|family:IPv6
2018-05-02.21:14:37.726192|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:40.345758|NEIGH_TABLE:PortChannel0002:fc00::76|SET|neigh:52:54:00:f8:f7:48|family:IPv6
2018-05-02.21:14:40.730085|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:43.733959|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:46.741973|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:49.745980|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:52.750078|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:54.681520|NEIGH_TABLE:eth0:10.240.157.36|SET|neigh:00:1a:64:8a:9f:a4|family:IPv4
2018-05-02.21:14:55.758192|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:58.770126|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:59.770155|NEIGH_TABLE:eth0:10.240.157.36|SET|neigh:00:1a:64:8a:9f:a4|family:IPv4
2018-05-02.21:15:01.773986|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:15:03.129307|WRED_PROFILE:AZURE_LOSSY|SET|red_min_threshold:491520
cat /var/log/swss/sairedis.rec
2018-05-02.21:15:03.129903|#|logrotate on: /var/log/swss/sairedis.rec
2018-05-02.21:15:03.133307|n|switch_shutdown_request||
Description
orchagent process gets killed when setting ecnconfig which causes ECN_WRED test failed
Steps to reproduce the issue:
Check orchagent process information
root@DUT:~# pgrep orchagent -a
5588 /usr/bin/orchagent -d /var/log/swss -b 8192 -m a4:8c:db:b9:b3:00
Try to set min threshold for packets marked "red"
root@DUT:~# ecnconfig -p AZURE_LOSSY -rmin 491520
Check orchagent process information again
root@DUT:~# pgrep orchagent -a
root@DUT:~# ===========> The orchagent was killed somehow
Show enc config
root@str-ne10032-01:~# ecnconfig -l
Profile: AZURE_LOSSY
red_min_threshold 491520
Describe the results you received:
Describe the results you expected:
Can someone tell me the reason of why orchagent stops in this case?
Thanks,
Additional information you deem important:
cat /var/log/swss/swss.rec
....
2018-05-02.21:14:16.682091|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:19.690266|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:22.694087|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:25.702088|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:28.710219|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:30.625925|NEIGH_TABLE:PortChannel0004:fc00::7e|SET|neigh:52:54:00:33:76:bf|family:IPv6
2018-05-02.21:14:31.714792|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:34.587346|NEIGH_TABLE:PortChannel0001:fc00::72|SET|neigh:52:54:00:e0:98:f2|family:IPv6
2018-05-02.21:14:34.718782|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:35.098055|NEIGH_TABLE:PortChannel0003:fc00::7a|SET|neigh:52:54:00:db:a6:dc|family:IPv6
2018-05-02.21:14:37.726192|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:40.345758|NEIGH_TABLE:PortChannel0002:fc00::76|SET|neigh:52:54:00:f8:f7:48|family:IPv6
2018-05-02.21:14:40.730085|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:43.733959|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:46.741973|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:49.745980|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:52.750078|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:54.681520|NEIGH_TABLE:eth0:10.240.157.36|SET|neigh:00:1a:64:8a:9f:a4|family:IPv4
2018-05-02.21:14:55.758192|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:58.770126|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:14:59.770155|NEIGH_TABLE:eth0:10.240.157.36|SET|neigh:00:1a:64:8a:9f:a4|family:IPv4
2018-05-02.21:15:01.773986|NEIGH_TABLE:eth0:10.240.157.37|DEL
2018-05-02.21:15:03.129307|WRED_PROFILE:AZURE_LOSSY|SET|red_min_threshold:491520
cat /var/log/swss/sairedis.rec
2018-05-02.21:15:03.129903|#|logrotate on: /var/log/swss/sairedis.rec
2018-05-02.21:15:03.133307|n|switch_shutdown_request||