mirror of
https://github.com/ARM-software/devlib.git
synced 2025-09-22 11:51:53 +01:00
Compare commits
605 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
56f3b1c317 | ||
|
34c6d1983b | ||
|
c4ababcd50 | ||
|
9fd690efb3 | ||
|
e16c42fe2c | ||
|
8aa9d672a1 | ||
|
533a2fd2c1 | ||
|
8e1dc1359a | ||
|
fec0868734 | ||
|
0915d97f71 | ||
|
d81b72a91b | ||
|
96ffa64ad8 | ||
|
38037850b6 | ||
|
56a7394d58 | ||
|
bda1115adb | ||
|
cc04e1a839 | ||
|
4a862d06bb | ||
|
f1c945bb5e | ||
|
51452d204c | ||
|
7231030991 | ||
|
085737bbfa | ||
|
9e45d65c94 | ||
|
008f96673f | ||
|
77a6de9453 | ||
|
d4b0dedc2a | ||
|
69cd3be96c | ||
|
7e942cdd4a | ||
|
41f460afbe | ||
|
804a044efc | ||
|
b06035fb12 | ||
|
6abe6067da | ||
|
c4f6a1a85f | ||
|
fe0d6eda2a | ||
|
5cafd2ec4d | ||
|
0d63386343 | ||
|
a35f715b63 | ||
|
55762edf19 | ||
|
1d9dc42af5 | ||
|
be4f01ebaf | ||
|
d6ccbb44c3 | ||
|
329df6f42e | ||
|
63bf68b49d | ||
|
7e39ecf142 | ||
|
1e839028a1 | ||
|
9eb88cd598 | ||
|
bb3ae48d25 | ||
|
58c0d30b26 | ||
|
87b235638a | ||
|
b88b400d8d | ||
|
8370c8fba3 | ||
|
2a23c435d4 | ||
|
59e2f2d126 | ||
|
56e9147e58 | ||
|
9678c7372e | ||
|
078f0dc641 | ||
|
335fa77e4e | ||
|
c585a4e489 | ||
|
a992a890b8 | ||
|
5001fae516 | ||
|
f515420387 | ||
|
e3d9c4b2fd | ||
|
e22d278267 | ||
|
17d32a4d40 | ||
|
7a8f98720d | ||
|
328e0ade4b | ||
|
d5ff73290e | ||
|
f39631293e | ||
|
c706e693ba | ||
|
f490a55be2 | ||
|
0e017ddf9f | ||
|
b368acb755 | ||
|
83e5ddfd1b | ||
|
8f3dc05f97 | ||
|
bb4f92c326 | ||
|
a0fc7202a1 | ||
|
9e8f77b8f2 | ||
|
515095d9b2 | ||
|
f3c8ce975e | ||
|
bfda5c4271 | ||
|
d1b08f6df6 | ||
|
17c110cc97 | ||
|
e9cf7f5cbe | ||
|
ead0c90069 | ||
|
2954a73c1c | ||
|
cc0210af37 | ||
|
730118d6d0 | ||
|
f0b58b32c4 | ||
|
30257456ab | ||
|
853bdff936 | ||
|
54d6a6d39d | ||
|
3761b488a0 | ||
|
462aecdca0 | ||
|
cafc0a4bc0 | ||
|
724c0ec8df | ||
|
ceb493f98d | ||
|
8ac588bc1f | ||
|
56a5f8ab12 | ||
|
75ff31c6c7 | ||
|
1e34390b99 | ||
|
a2072d5c48 | ||
|
35c7196396 | ||
|
0dde18bb56 | ||
|
7393ab757e | ||
|
002939d599 | ||
|
dd4c37901b | ||
|
0c7d440070 | ||
|
e414a3a193 | ||
|
857edbd48b | ||
|
f52ac6650d | ||
|
eaafe6c0eb | ||
|
2a8f2c51d7 | ||
|
01b0ab8dce | ||
|
c0a896642d | ||
|
c492f2e191 | ||
|
f3b04fcd73 | ||
|
02384615dd | ||
|
791edc297c | ||
|
4ef1e51b97 | ||
|
899dbfe4fb | ||
|
0390c9d26b | ||
|
405c155b96 | ||
|
bd03b2f8ac | ||
|
5d40b23310 | ||
|
6fae051deb | ||
|
aca3d451f7 | ||
|
fa9d7a17b3 | ||
|
61bbece59b | ||
|
efbd04992d | ||
|
a7b9ef594f | ||
|
e2ce5689bd | ||
|
fae12d70a6 | ||
|
61390a714c | ||
|
7b816b2345 | ||
|
1b71507d8e | ||
|
af0ed2ab48 | ||
|
417ab3df3e | ||
|
dcffccbb69 | ||
|
486b3f524e | ||
|
1ce96e0097 | ||
|
3056e333e1 | ||
|
a679d579fd | ||
|
fe403b629e | ||
|
16d5e0b6a7 | ||
|
4a6aacef99 | ||
|
9837b4012b | ||
|
71d5b8bc79 | ||
|
5421ddaae8 | ||
|
1d85501181 | ||
|
a01418b075 | ||
|
0f2ac2589f | ||
|
da22befd80 | ||
|
0bfb6e4e54 | ||
|
dc453ad891 | ||
|
b0457f7ed7 | ||
|
4d269774f7 | ||
|
34e7e4c895 | ||
|
535fc7ea63 | ||
|
99aca25438 | ||
|
7dd7811355 | ||
|
dbe568f51b | ||
|
0b04ffcc44 | ||
|
8a0554faab | ||
|
17bcabd461 | ||
|
1072a1a9f0 | ||
|
661ba19114 | ||
|
7e073c1fce | ||
|
98e19ae048 | ||
|
3e3f964e43 | ||
|
d1e83b53a3 | ||
|
a0b273b031 | ||
|
5c28e41677 | ||
|
d560aea660 | ||
|
4d8da589f8 | ||
|
f042646792 | ||
|
d7ca39e4d1 | ||
|
5a599f91db | ||
|
181bc180c4 | ||
|
92fb54d57b | ||
|
bfb4721715 | ||
|
e21265f6f6 | ||
|
a3f78cabc1 | ||
|
4593d8605d | ||
|
9f666320f3 | ||
|
f692315d9c | ||
|
e4fda7898d | ||
|
109fcc6deb | ||
|
96693a3035 | ||
|
d952abf52e | ||
|
50dfb297cd | ||
|
e7a319b0a7 | ||
|
6bb24aa12a | ||
|
fb5a260f4b | ||
|
e1ec1eacfb | ||
|
22c1f5e911 | ||
|
8cf4a44bd7 | ||
|
a59093465d | ||
|
b3242a1ee4 | ||
|
a290d28835 | ||
|
a8ca0fc6c8 | ||
|
01b5cffe03 | ||
|
adf25f93bb | ||
|
dd26b43ac5 | ||
|
8479af48c4 | ||
|
07ba177e58 | ||
|
9192deb8ee | ||
|
823ce718bf | ||
|
2afa8f86a4 | ||
|
15333eb09c | ||
|
dfd0b8ebd9 | ||
|
ff366b3fd9 | ||
|
25ad53feff | ||
|
1513db0951 | ||
|
90040e8b58 | ||
|
3658eec66c | ||
|
24d5630e54 | ||
|
ee153210c6 | ||
|
6bda0934ad | ||
|
a46f1038f8 | ||
|
4de973483e | ||
|
0e9221f58e | ||
|
0d3a0223b3 | ||
|
7c2fd87a3b | ||
|
035181a8f1 | ||
|
f5a00140e4 | ||
|
77482a6c70 | ||
|
34d73e6af1 | ||
|
4b36439de8 | ||
|
3c8294c6eb | ||
|
64c865de59 | ||
|
66a50a2f49 | ||
|
60e69fc4e8 | ||
|
c62905cfdc | ||
|
eeb5e93e6f | ||
|
c093d56754 | ||
|
049b275665 | ||
|
411719d58d | ||
|
7dd934a5d8 | ||
|
30fdfc23d3 | ||
|
d3c3015fc8 | ||
|
5ef99f2cff | ||
|
9b465c2766 | ||
|
2de2b36387 | ||
|
210712b384 | ||
|
2a0d110012 | ||
|
5b99c1613b | ||
|
3d10e3eae9 | ||
|
4d95656e49 | ||
|
38258eb74c | ||
|
8839ed01ba | ||
|
1229af0895 | ||
|
003785dde1 | ||
|
3e751746d6 | ||
|
ddd2e29b87 | ||
|
22b6514c35 | ||
|
380ad0515d | ||
|
93b39a7f47 | ||
|
70d755d75b | ||
|
1da8d3f95f | ||
|
7f347e9d71 | ||
|
d25beb5c8b | ||
|
edf200dbc9 | ||
|
63e60401d5 | ||
|
e206e9b24a | ||
|
0844a393ab | ||
|
36aa3af66d | ||
|
b392a0a1b4 | ||
|
9f74b9978c | ||
|
b54dc19b81 | ||
|
7919a5643c | ||
|
df4d06bc7f | ||
|
02c93b48ab | ||
|
98fb2e2306 | ||
|
68be9d8acc | ||
|
09f69dcf38 | ||
|
1fd5636217 | ||
|
6bc3479abb | ||
|
85036fbb30 | ||
|
b933dbda67 | ||
|
0c7eb9e91e | ||
|
f26f942723 | ||
|
b062097221 | ||
|
0a95bbed87 | ||
|
85f30ed4c7 | ||
|
86c9b6a1c7 | ||
|
3f1a1c4086 | ||
|
7a827e2b11 | ||
|
ce48ad217d | ||
|
f9bc6966c0 | ||
|
baedd676a9 | ||
|
8f63914b85 | ||
|
c8af995392 | ||
|
92b0c25ed3 | ||
|
fb58e47cf5 | ||
|
3660361df0 | ||
|
86c6a1a826 | ||
|
1199f2512b | ||
|
c837a29299 | ||
|
de15658025 | ||
|
d3396f2725 | ||
|
c33dd65249 | ||
|
7145b366ab | ||
|
2d96840873 | ||
|
48d717b301 | ||
|
1d3b4c8062 | ||
|
2b3cee6a7e | ||
|
4adefecb55 | ||
|
195085e28d | ||
|
59f36fc768 | ||
|
871c59a3f4 | ||
|
df81742100 | ||
|
8296d6c5d6 | ||
|
a900f94069 | ||
|
0cac92af27 | ||
|
2e106c9f70 | ||
|
a48775ec5a | ||
|
d92b18c102 | ||
|
b738655050 | ||
|
597231f3d5 | ||
|
cac70cba19 | ||
|
1198e42cdf | ||
|
e11573594a | ||
|
1a5c1dce07 | ||
|
1f7421bc39 | ||
|
64292ad6b4 | ||
|
3dbd3f7fda | ||
|
4a936da62f | ||
|
0b7ab6aa94 | ||
|
4e0c03ebdd | ||
|
dc32fa9704 | ||
|
35987d5281 | ||
|
69a83d4128 | ||
|
8b2ac8d29d | ||
|
97a89970d0 | ||
|
92d9e690f0 | ||
|
61b13383a4 | ||
|
7e80a381d8 | ||
|
4dc54728c1 | ||
|
f5906cb4ab | ||
|
bb7591e8fa | ||
|
783669371d | ||
|
0a20cec2d9 | ||
|
d72049d35b | ||
|
3cfbad19bd | ||
|
4522fe8d23 | ||
|
e17b9c33d1 | ||
|
d968098717 | ||
|
b5ecf63638 | ||
|
0d61ee5951 | ||
|
fc81477cf8 | ||
|
49b547a7f6 | ||
|
f7d1b0fb13 | ||
|
1bc29d7abf | ||
|
013fc59a41 | ||
|
f6d02c6611 | ||
|
98cf7d00c7 | ||
|
bd27de194c | ||
|
e276abfcb4 | ||
|
1ab8c25ff9 | ||
|
95102d324b | ||
|
d27c8e3362 | ||
|
f0f1847c60 | ||
|
b112ed424c | ||
|
55c27e2c54 | ||
|
f4d3c60137 | ||
|
5c036ea669 | ||
|
391e95cc75 | ||
|
a6fb5b57ae | ||
|
ae1bfccbe2 | ||
|
b131dc1e13 | ||
|
1061c94951 | ||
|
0655237217 | ||
|
119c259e73 | ||
|
fdc0c0477d | ||
|
8a9e0a4819 | ||
|
b444ae65c9 | ||
|
dfc63a1cc0 | ||
|
8300344f70 | ||
|
32a975be74 | ||
|
a068fb9b5b | ||
|
96ff1aa205 | ||
|
3298205b42 | ||
|
1fa6f92064 | ||
|
6410318b49 | ||
|
8733b9cb58 | ||
|
0687dac23b | ||
|
08e36bf782 | ||
|
d40e70d7f4 | ||
|
fef7c16b42 | ||
|
05215e7e1b | ||
|
66eaf15cdc | ||
|
1dd6950177 | ||
|
23087d14f5 | ||
|
6665693e8f | ||
|
18b77b8808 | ||
|
54adf80eab | ||
|
03561ee72c | ||
|
e968901fe6 | ||
|
9a8d539e03 | ||
|
b7ac9e7edc | ||
|
baa32ec716 | ||
|
9ce57c0875 | ||
|
da588ea091 | ||
|
9d5b1062dd | ||
|
680406bc37 | ||
|
28891a822b | ||
|
a9265031ba | ||
|
8abdfdc1ef | ||
|
a02d68decd | ||
|
c6b77432ba | ||
|
21f40035d7 | ||
|
e9cf93e754 | ||
|
29a7940731 | ||
|
5472b671ef | ||
|
179e45f98e | ||
|
b587049eb9 | ||
|
1cb4eb2285 | ||
|
6351a3bad9 | ||
|
adedad8e32 | ||
|
9038339373 | ||
|
44fe0370f8 | ||
|
76c4a725ed | ||
|
de61937d09 | ||
|
d0e28f0a89 | ||
|
dbd12994fb | ||
|
9f9910bc64 | ||
|
beaa229279 | ||
|
e88c6880ab | ||
|
be2775a29a | ||
|
d5460e1185 | ||
|
1ba7fbdc9a | ||
|
b3cea0c0d2 | ||
|
1ed29a8385 | ||
|
8528568c1c | ||
|
01253100cd | ||
|
925fccb4f9 | ||
|
889f72c883 | ||
|
beaf8d48ac | ||
|
c35230890e | ||
|
689c478ca8 | ||
|
c9f7e0e066 | ||
|
7cc8675fa0 | ||
|
d1263567d0 | ||
|
5d492ca957 | ||
|
b569a561a4 | ||
|
103f792736 | ||
|
b2ec957bf8 | ||
|
68f7585ac2 | ||
|
454a2d5db5 | ||
|
b35a283592 | ||
|
c1b5152790 | ||
|
78ac92bd84 | ||
|
7949b93114 | ||
|
bfdfc0e311 | ||
|
6eabf7fc56 | ||
|
ee38a4244a | ||
|
0dc65bddb6 | ||
|
3dd4ea69b4 | ||
|
e45fcca385 | ||
|
2f35999f37 | ||
|
c89f712923 | ||
|
27f545f3f6 | ||
|
290af6619d | ||
|
02696e99e0 | ||
|
6cdae6bbe1 | ||
|
df9b23aa4f | ||
|
b59f7c360e | ||
|
da128f917b | ||
|
d7f3092b46 | ||
|
a89c3fb009 | ||
|
e8e945a700 | ||
|
934075c76c | ||
|
d3a02d9d9e | ||
|
1a47cadfa7 | ||
|
f1b4bf2845 | ||
|
25818b035e | ||
|
af4214c3fb | ||
|
1cc6ddf140 | ||
|
119fd7dc24 | ||
|
cae239d1dc | ||
|
09ec88e946 | ||
|
f8440cf354 | ||
|
46d65c8237 | ||
|
2a4eafae6e | ||
|
3e6a040863 | ||
|
08b36e71cb | ||
|
6d854fd4dc | ||
|
390a544a92 | ||
|
d7aac2b5df | ||
|
0e8fc0d732 | ||
|
730bb606b1 | ||
|
c8f118da4f | ||
|
ca0b6e88a1 | ||
|
c307ffab15 | ||
|
23ad61fcae | ||
|
75a086d77a | ||
|
21d18f8b78 | ||
|
3cab786d03 | ||
|
d8ae3aba1a | ||
|
42efd0a2e2 | ||
|
83c1312b22 | ||
|
b9a16982d8 | ||
|
f9cb932d9c | ||
|
0c8f26763b | ||
|
2d496486bf | ||
|
f24493676c | ||
|
76b059c6b1 | ||
|
baab8ab131 | ||
|
73f2e28a06 | ||
|
f714dd39f1 | ||
|
c4784e0993 | ||
|
baaa67bfcc | ||
|
17692891ef | ||
|
16d87c6924 | ||
|
fa20e7c28d | ||
|
539e9b34b9 | ||
|
ee521f64e6 | ||
|
5880f6e9ef | ||
|
cf791d1e64 | ||
|
bbee251547 | ||
|
9af32ec485 | ||
|
89256fd408 | ||
|
616f229949 | ||
|
c4e46b7c26 | ||
|
1dc1e1364c | ||
|
232204633f | ||
|
4b58c573a5 | ||
|
3acf5d56df | ||
|
96392fd6b5 | ||
|
c976189444 | ||
|
658005a178 | ||
|
15f9c03b45 | ||
|
28739397c0 | ||
|
741157c000 | ||
|
c2329bd80e | ||
|
10978b0fd7 | ||
|
8de24b5601 | ||
|
192fb52cae | ||
|
6bda8cb867 | ||
|
91f4f97a0b | ||
|
3bf3017f85 | ||
|
95aaa2662e | ||
|
fbe4c4b730 | ||
|
7112cfef3a | ||
|
32defe1ce3 | ||
|
78aa774e25 | ||
|
d7bbad3aac | ||
|
a8dfd2e744 | ||
|
ebe3a8a0a8 | ||
|
9c89ca0437 | ||
|
3f804a42fe | ||
|
bdbf474023 | ||
|
e2e5e687e9 | ||
|
a65ff13617 | ||
|
615f1ce5e8 | ||
|
f5b7c82f52 | ||
|
a7f6ddb05a | ||
|
f420612b5b | ||
|
040daab2cb | ||
|
0a8b0c6989 | ||
|
e7aea717cc | ||
|
0c11289e18 | ||
|
ff8261e44b | ||
|
1424cebb90 | ||
|
aab487c1ac | ||
|
880a0bcb7c | ||
|
cafeb81b83 | ||
|
be8f972f60 | ||
|
84151f953a | ||
|
33603c6648 | ||
|
1890db7c04 | ||
|
40fce1392a | ||
|
10a80d2335 | ||
|
5a81fe9888 | ||
|
798745ff4e | ||
|
082a82c7c5 | ||
|
7f5a150b4f | ||
|
c5bc987226 | ||
|
bda7a16656 | ||
|
3f1577dd02 | ||
|
c2d81ea538 | ||
|
b1a7f3fcd0 | ||
|
d4686d08d1 | ||
|
ebd4349786 | ||
|
82e951b4ce | ||
|
51b7f01d36 | ||
|
cf761317bd | ||
|
f2eac51c69 | ||
|
c93e3d6d83 | ||
|
dcf239b06c | ||
|
d0c71fbc86 | ||
|
217a97485b | ||
|
3229bb181a | ||
|
47bf915b7c | ||
|
59f4f81447 | ||
|
a1e991c12f | ||
|
4c4d7f177e | ||
|
485b4a62e3 | ||
|
09915101d8 | ||
|
7f32efcb64 | ||
|
171cc25d50 | ||
|
f52bf79eb6 | ||
|
2d9c0bf8a5 | ||
|
64261a65cb | ||
|
42d41e9345 | ||
|
c7fc01c6b5 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -3,3 +3,7 @@
|
||||
*.orig
|
||||
.ropeproject
|
||||
*.egg-info
|
||||
devlib/bin/scripts/shutils
|
||||
doc/_build/
|
||||
build/
|
||||
dist/
|
||||
|
@@ -17,7 +17,7 @@ Installation
|
||||
Usage
|
||||
-----
|
||||
|
||||
Please refer to the "Overview" section of the documentation.
|
||||
Please refer to the "Overview" section of the `documentation <http://devlib.readthedocs.io/en/latest/>`_.
|
||||
|
||||
|
||||
License
|
||||
|
@@ -1,4 +1,19 @@
|
||||
from devlib.target import Target, LinuxTarget, AndroidTarget, LocalLinuxTarget
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from devlib.target import Target, LinuxTarget, AndroidTarget, LocalLinuxTarget, ChromeOsTarget
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.exception import DevlibError, TargetError, HostError, TargetNotRespondingError
|
||||
|
||||
@@ -7,12 +22,37 @@ from devlib.module import get_module, register_module
|
||||
|
||||
from devlib.platform import Platform
|
||||
from devlib.platform.arm import TC2, Juno, JunoEnergyInstrument
|
||||
from devlib.platform.gem5 import Gem5SimulationPlatform
|
||||
|
||||
from devlib.instrument import Instrument, InstrumentChannel, Measurement, MeasurementsCsv
|
||||
from devlib.instrument import MEASUREMENT_TYPES, INSTANTANEOUS, CONTINUOUS
|
||||
from devlib.instrument.daq import DaqInstrument
|
||||
from devlib.instrument.energy_probe import EnergyProbeInstrument
|
||||
from devlib.instrument.arm_energy_probe import ArmEnergyProbeInstrument
|
||||
from devlib.instrument.frames import GfxInfoFramesInstrument, SurfaceFlingerFramesInstrument
|
||||
from devlib.instrument.hwmon import HwmonInstrument
|
||||
from devlib.instrument.monsoon import MonsoonInstrument
|
||||
from devlib.instrument.netstats import NetstatsInstrument
|
||||
from devlib.instrument.gem5power import Gem5PowerInstrument
|
||||
|
||||
from devlib.derived import DerivedMeasurements, DerivedMetric
|
||||
from devlib.derived.energy import DerivedEnergyMeasurements
|
||||
from devlib.derived.fps import DerivedGfxInfoStats, DerivedSurfaceFlingerStats
|
||||
|
||||
from devlib.trace.ftrace import FtraceCollector
|
||||
from devlib.trace.serial_trace import SerialTraceCollector
|
||||
|
||||
from devlib.host import LocalConnection
|
||||
from devlib.utils.android import AdbConnection
|
||||
from devlib.utils.ssh import SshConnection, TelnetConnection, Gem5Connection
|
||||
|
||||
from devlib.utils.version import get_commit as __get_commit
|
||||
|
||||
|
||||
__version__ = '1.0.0'
|
||||
|
||||
__commit = __get_commit()
|
||||
if __commit:
|
||||
__full_version__ = '{}-{}'.format(__version__, __commit)
|
||||
else:
|
||||
__full_version__ = __version__
|
||||
|
BIN
devlib/bin/arm64/m5
Executable file
BIN
devlib/bin/arm64/m5
Executable file
Binary file not shown.
Binary file not shown.
BIN
devlib/bin/armeabi/m5
Executable file
BIN
devlib/bin/armeabi/m5
Executable file
Binary file not shown.
331
devlib/bin/scripts/shutils.in
Executable file
331
devlib/bin/scripts/shutils.in
Executable file
@@ -0,0 +1,331 @@
|
||||
#!__DEVLIB_SHELL__
|
||||
|
||||
CMD=$1
|
||||
shift
|
||||
|
||||
BUSYBOX=${BUSYBOX:-__DEVLIB_BUSYBOX__}
|
||||
FIND=${FIND:-$BUSYBOX find}
|
||||
GREP=${GREP:-$BUSYBOX grep}
|
||||
SED=${SED:-$BUSYBOX sed}
|
||||
CAT=${CAT:-$BUSYBOX cat}
|
||||
AWK=${AWK:-$BUSYBOX awk}
|
||||
PS=${PS:-$BUSYBOX ps}
|
||||
|
||||
################################################################################
|
||||
# CPUFrequency Utility Functions
|
||||
################################################################################
|
||||
|
||||
cpufreq_set_all_frequencies() {
|
||||
FREQ=$1
|
||||
for CPU in /sys/devices/system/cpu/cpu[0-9]*; do
|
||||
echo $FREQ > $CPU/cpufreq/scaling_cur_freq
|
||||
done
|
||||
}
|
||||
|
||||
cpufreq_get_all_frequencies() {
|
||||
$GREP '' /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq | \
|
||||
$SED -e 's|/sys/devices/system/cpu/cpu||' -e 's|/cpufreq/scaling_cur_freq:| |'
|
||||
}
|
||||
|
||||
cpufreq_set_all_governors() {
|
||||
GOV=$1
|
||||
for CPU in /sys/devices/system/cpu/cpu[0-9]*; do
|
||||
echo $GOV > $CPU/cpufreq/scaling_governor
|
||||
done
|
||||
}
|
||||
|
||||
cpufreq_get_all_governors() {
|
||||
$GREP '' /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor | \
|
||||
$SED -e 's|/sys/devices/system/cpu/cpu||' -e 's|/cpufreq/scaling_governor:| |'
|
||||
}
|
||||
|
||||
cpufreq_trace_all_frequencies() {
|
||||
FREQS=$($CAT /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq)
|
||||
CPU=0; for F in $FREQS; do
|
||||
echo "cpu_frequency_devlib: state=$F cpu_id=$CPU" > /sys/kernel/debug/tracing/trace_marker
|
||||
CPU=$((CPU + 1))
|
||||
done
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# DevFrequency Utility Functions
|
||||
################################################################################
|
||||
|
||||
devfreq_set_all_frequencies() {
|
||||
FREQ=$1
|
||||
for DEV in /sys/class/devfreq/*; do
|
||||
echo $FREQ > $DEV/min_freq
|
||||
echo $FREQ > $DEV/max_freq
|
||||
done
|
||||
}
|
||||
|
||||
devfreq_get_all_frequencies() {
|
||||
for DEV in /sys/class/devfreq/*; do
|
||||
echo "`basename $DEV` `cat $DEV/cur_freq`"
|
||||
done
|
||||
}
|
||||
|
||||
devfreq_set_all_governors() {
|
||||
GOV=$1
|
||||
for DEV in /sys/class/devfreq/*; do
|
||||
echo $GOV > $DEV/governor
|
||||
done
|
||||
}
|
||||
|
||||
devfreq_get_all_governors() {
|
||||
for DEV in /sys/class/devfreq/*; do
|
||||
echo "`basename $DEV` `cat $DEV/governor`"
|
||||
done
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# CPUIdle Utility Functions
|
||||
################################################################################
|
||||
|
||||
cpuidle_wake_all_cpus() {
|
||||
CPU_PATHS=/sys/devices/system/cpu/cpu[0-9]*
|
||||
MASK=0x1; for F in $CPU_PATHS; do
|
||||
$BUSYBOX taskset $MASK true &
|
||||
MASK=$($BUSYBOX printf '0x%x' $((MASK * 2)))
|
||||
done
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# FTrace Utility Functions
|
||||
################################################################################
|
||||
|
||||
ftrace_get_function_stats() {
|
||||
for CPU in $(ls /sys/kernel/debug/tracing/trace_stat | sed 's/function//'); do
|
||||
REPLACE_STRING="s/ Function/\n Function (CPU$CPU)/"
|
||||
$CAT /sys/kernel/debug/tracing/trace_stat/function$CPU \
|
||||
| sed "$REPLACE_STRING"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
################################################################################
|
||||
# CGroups Utility Functions
|
||||
################################################################################
|
||||
|
||||
cgroups_get_attributes() {
|
||||
test $# -eq 2 || exit -1
|
||||
CGROUP="$1"
|
||||
CONTROLLER="$2"
|
||||
# Check if controller is mounted with "noprefix" option, which is quite
|
||||
# common on Android for backward compatibility
|
||||
ls $CGROUP/$CONTROLLER\.* 2>&1 >/dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
# no "noprefix" option, attributes format is:
|
||||
# mnt_point/controller.attribute_name
|
||||
$GREP '' $CGROUP/* | \
|
||||
$GREP "$CONTROLLER\." | \
|
||||
$SED -e "s|$CONTROLLER\.||" -e "s|$CGROUP/||"
|
||||
else
|
||||
# "noprefix" option, attribute format is:
|
||||
# mnt_point/attribute_name
|
||||
$GREP '' $(\
|
||||
$FIND $CGROUP -type f -maxdepth 1 |
|
||||
$GREP -v -e ".*tasks" -e ".*cgroup\..*") | \
|
||||
$SED "s|$CGROUP/||"
|
||||
fi
|
||||
}
|
||||
|
||||
cgroups_run_into() {
|
||||
|
||||
# Control groups mount point
|
||||
CGMOUNT=${CGMOUNT:-/sys/fs/cgroup}
|
||||
# The control group we want to run into
|
||||
CGP=${1}
|
||||
shift 1
|
||||
# The command to run
|
||||
CMD="${@}"
|
||||
|
||||
# Execution under root CGgroup
|
||||
if [ "x/" == "x$CGP" ]; then
|
||||
|
||||
$FIND $CGMOUNT -type d -maxdepth 0 | \
|
||||
while read CGPATH; do
|
||||
# Move this shell into that control group
|
||||
echo $$ > $CGPATH/cgroup.procs
|
||||
echo "Moving task into root CGroup ($CGPATH)"
|
||||
done
|
||||
|
||||
# Execution under specified CGroup
|
||||
else
|
||||
|
||||
# Check if the required CGroup exists
|
||||
$FIND $CGMOUNT -type d -mindepth 1 | \
|
||||
$GREP -E "^$CGMOUNT/devlib_cgh[0-9]{1,2}$CGP" &>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: could not find any $CGP cgroup under $CGMOUNT"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
$FIND $CGMOUNT -type d -mindepth 1 | \
|
||||
$GREP -E "^$CGMOUNT/devlib_cgh[0-9]{1,2}$CGP$" | \
|
||||
while read CGPATH; do
|
||||
# Move this shell into that control group
|
||||
echo $$ > $CGPATH/cgroup.procs
|
||||
echo "Moving task into $CGPATH"
|
||||
done
|
||||
|
||||
fi
|
||||
|
||||
# Execute the command
|
||||
exec $CMD
|
||||
|
||||
}
|
||||
|
||||
cgroups_tasks_move() {
|
||||
SRC_GRP=${1}
|
||||
DST_GRP=${2}
|
||||
shift 2
|
||||
FILTERS=$*
|
||||
|
||||
$CAT $SRC_GRP/tasks | while read TID; do
|
||||
echo $TID > $DST_GRP/cgroup.procs
|
||||
done
|
||||
|
||||
[ "x$FILTERS" = "x" ] && exit 0
|
||||
|
||||
PIDS=`$PS -o comm,pid | $GREP $FILTERS | $AWK '{print $2}'`
|
||||
PIDS=`echo $PIDS`
|
||||
echo "PIDs to save: [$PIDS]"
|
||||
for TID in $PIDS; do
|
||||
COMM=`$CAT /proc/$TID/comm`
|
||||
echo "$TID : $COMM"
|
||||
echo $TID > $SRC_GRP/cgroup.procs || true
|
||||
done
|
||||
}
|
||||
|
||||
cgroups_tasks_in() {
|
||||
GRP=${1}
|
||||
for TID in $($CAT $GRP/tasks); do
|
||||
COMM=`$CAT /proc/$TID/comm 2>/dev/null`
|
||||
[ "$COMM" != "" ] && CMDL=`$CAT /proc/$TID/cmdline 2>/dev/null`
|
||||
[ "$COMM" != "" ] && echo "$TID,$COMM,$CMDL"
|
||||
done
|
||||
exit 0
|
||||
}
|
||||
|
||||
cgroups_freezer_set_state() {
|
||||
STATE=${1}
|
||||
SYSFS_ENTRY=${2}/freezer.state
|
||||
|
||||
# Set the state of the freezer
|
||||
echo $STATE > $SYSFS_ENTRY
|
||||
|
||||
# And check it applied cleanly
|
||||
for i in `seq 1 10`; do
|
||||
[ $($CAT $SYSFS_ENTRY) = $STATE ] && exit 0
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# We have an issue
|
||||
echo "ERROR: Freezer stalled while changing state to \"$STATE\"." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Hotplug
|
||||
################################################################################
|
||||
|
||||
hotplug_online_all() {
|
||||
for path in /sys/devices/system/cpu/cpu[0-9]*; do
|
||||
if [ $(cat $path/online) -eq 0 ]; then
|
||||
echo 1 > $path/online
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Misc
|
||||
################################################################################
|
||||
|
||||
read_tree_values() {
|
||||
BASEPATH=$1
|
||||
MAXDEPTH=$2
|
||||
|
||||
if [ ! -e $BASEPATH ]; then
|
||||
echo "ERROR: $BASEPATH does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PATHS=$($BUSYBOX find $BASEPATH -follow -maxdepth $MAXDEPTH)
|
||||
i=0
|
||||
for path in $PATHS; do
|
||||
i=$(expr $i + 1)
|
||||
if [ $i -gt 1 ]; then
|
||||
break;
|
||||
fi
|
||||
done
|
||||
if [ $i -gt 1 ]; then
|
||||
$BUSYBOX grep -s '' $PATHS
|
||||
fi
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Main Function Dispatcher
|
||||
################################################################################
|
||||
|
||||
case $CMD in
|
||||
cpufreq_set_all_frequencies)
|
||||
cpufreq_set_all_frequencies $*
|
||||
;;
|
||||
cpufreq_get_all_frequencies)
|
||||
cpufreq_get_all_frequencies
|
||||
;;
|
||||
cpufreq_set_all_governors)
|
||||
cpufreq_set_all_governors $*
|
||||
;;
|
||||
cpufreq_get_all_governors)
|
||||
cpufreq_get_all_governors
|
||||
;;
|
||||
cpufreq_trace_all_frequencies)
|
||||
cpufreq_trace_all_frequencies $*
|
||||
;;
|
||||
devfreq_set_all_frequencies)
|
||||
devfreq_set_all_frequencies $*
|
||||
;;
|
||||
devfreq_get_all_frequencies)
|
||||
devfreq_get_all_frequencies
|
||||
;;
|
||||
devfreq_set_all_governors)
|
||||
devfreq_set_all_governors $*
|
||||
;;
|
||||
devfreq_get_all_governors)
|
||||
devfreq_get_all_governors
|
||||
;;
|
||||
cpuidle_wake_all_cpus)
|
||||
cpuidle_wake_all_cpus $*
|
||||
;;
|
||||
cgroups_get_attributes)
|
||||
cgroups_get_attributes $*
|
||||
;;
|
||||
cgroups_run_into)
|
||||
cgroups_run_into $*
|
||||
;;
|
||||
cgroups_tasks_move)
|
||||
cgroups_tasks_move $*
|
||||
;;
|
||||
cgroups_tasks_in)
|
||||
cgroups_tasks_in $*
|
||||
;;
|
||||
cgroups_freezer_set_state)
|
||||
cgroups_freezer_set_state $*
|
||||
;;
|
||||
ftrace_get_function_stats)
|
||||
ftrace_get_function_stats
|
||||
;;
|
||||
hotplug_online_all)
|
||||
hotplug_online_all
|
||||
;;
|
||||
read_tree_values)
|
||||
read_tree_values $*
|
||||
;;
|
||||
*)
|
||||
echo "Command [$CMD] not supported"
|
||||
exit -1
|
||||
esac
|
||||
|
||||
# vim: tabstop=4 shiftwidth=4
|
BIN
devlib/bin/x86_64/busybox
Executable file
BIN
devlib/bin/x86_64/busybox
Executable file
Binary file not shown.
BIN
devlib/bin/x86_64/trace-cmd
Executable file
BIN
devlib/bin/x86_64/trace-cmd
Executable file
Binary file not shown.
60
devlib/derived/__init__.py
Normal file
60
devlib/derived/__init__.py
Normal file
@@ -0,0 +1,60 @@
|
||||
# Copyright 2015-2017 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from devlib.instrument import MeasurementType, MEASUREMENT_TYPES
|
||||
|
||||
|
||||
class DerivedMetric(object):
|
||||
|
||||
__slots__ = ['name', 'value', 'measurement_type']
|
||||
|
||||
@property
|
||||
def units(self):
|
||||
return self.measurement_type.units
|
||||
|
||||
def __init__(self, name, value, measurement_type):
|
||||
self.name = name
|
||||
self.value = value
|
||||
if isinstance(measurement_type, MeasurementType):
|
||||
self.measurement_type = measurement_type
|
||||
else:
|
||||
try:
|
||||
self.measurement_type = MEASUREMENT_TYPES[measurement_type]
|
||||
except KeyError:
|
||||
msg = 'Unknown measurement type: {}'
|
||||
raise ValueError(msg.format(measurement_type))
|
||||
|
||||
def __cmp__(self, other):
|
||||
if hasattr(other, 'value'):
|
||||
return cmp(self.value, other.value)
|
||||
else:
|
||||
return cmp(self.value, other)
|
||||
|
||||
def __str__(self):
|
||||
if self.units:
|
||||
return '{}: {} {}'.format(self.name, self.value, self.units)
|
||||
else:
|
||||
return '{}: {}'.format(self.name, self.value)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
class DerivedMeasurements(object):
|
||||
|
||||
def process(self, measurements_csv):
|
||||
return []
|
||||
|
||||
def process_raw(self, *args):
|
||||
return []
|
97
devlib/derived/energy.py
Normal file
97
devlib/derived/energy.py
Normal file
@@ -0,0 +1,97 @@
|
||||
# Copyright 2013-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from __future__ import division
|
||||
from collections import defaultdict
|
||||
|
||||
from devlib import DerivedMeasurements, DerivedMetric
|
||||
from devlib.instrument import MEASUREMENT_TYPES, InstrumentChannel
|
||||
|
||||
|
||||
class DerivedEnergyMeasurements(DerivedMeasurements):
|
||||
|
||||
@staticmethod
|
||||
def process(measurements_csv):
|
||||
|
||||
should_calculate_energy = []
|
||||
use_timestamp = False
|
||||
|
||||
# Determine sites to calculate energy for
|
||||
channel_map = defaultdict(list)
|
||||
for channel in measurements_csv.channels:
|
||||
channel_map[channel.site].append(channel.kind)
|
||||
if channel.site == 'timestamp':
|
||||
use_timestamp = True
|
||||
time_measurment = channel.measurement_type
|
||||
for site, kinds in channel_map.items():
|
||||
if 'power' in kinds and not 'energy' in kinds:
|
||||
should_calculate_energy.append(site)
|
||||
|
||||
if measurements_csv.sample_rate_hz is None and not use_timestamp:
|
||||
msg = 'Timestamp data is unavailable, please provide a sample rate'
|
||||
raise ValueError(msg)
|
||||
|
||||
if use_timestamp:
|
||||
# Find index of timestamp column
|
||||
ts_index = [i for i, chan in enumerate(measurements_csv.channels)
|
||||
if chan.site == 'timestamp']
|
||||
if len(ts_index) > 1:
|
||||
raise ValueError('Multiple timestamps detected')
|
||||
ts_index = ts_index[0]
|
||||
|
||||
row_ts = 0
|
||||
last_ts = 0
|
||||
energy_results = defaultdict(dict)
|
||||
power_results = defaultdict(float)
|
||||
|
||||
# Process data
|
||||
for count, row in enumerate(measurements_csv.iter_measurements()):
|
||||
if use_timestamp:
|
||||
last_ts = row_ts
|
||||
row_ts = time_measurment.convert(float(row[ts_index].value), 'time')
|
||||
for entry in row:
|
||||
channel = entry.channel
|
||||
site = channel.site
|
||||
if channel.kind == 'energy':
|
||||
if count == 0:
|
||||
energy_results[site]['start'] = entry.value
|
||||
else:
|
||||
energy_results[site]['end'] = entry.value
|
||||
|
||||
if channel.kind == 'power':
|
||||
power_results[site] += entry.value
|
||||
|
||||
if site in should_calculate_energy:
|
||||
if count == 0:
|
||||
energy_results[site]['start'] = 0
|
||||
energy_results[site]['end'] = 0
|
||||
elif use_timestamp:
|
||||
energy_results[site]['end'] += entry.value * (row_ts - last_ts)
|
||||
else:
|
||||
energy_results[site]['end'] += entry.value * (1 /
|
||||
measurements_csv.sample_rate_hz)
|
||||
|
||||
# Calculate final measurements
|
||||
derived_measurements = []
|
||||
for site in energy_results:
|
||||
total_energy = energy_results[site]['end'] - energy_results[site]['start']
|
||||
name = '{}_total_energy'.format(site)
|
||||
derived_measurements.append(DerivedMetric(name, total_energy, MEASUREMENT_TYPES['energy']))
|
||||
|
||||
for site in power_results:
|
||||
power = power_results[site] / (count + 1) #pylint: disable=undefined-loop-variable
|
||||
name = '{}_average_power'.format(site)
|
||||
derived_measurements.append(DerivedMetric(name, power, MEASUREMENT_TYPES['power']))
|
||||
|
||||
return derived_measurements
|
232
devlib/derived/fps.py
Normal file
232
devlib/derived/fps.py
Normal file
@@ -0,0 +1,232 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from __future__ import division
|
||||
import os
|
||||
import re
|
||||
|
||||
try:
|
||||
import pandas as pd
|
||||
except ImportError:
|
||||
pd = None
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib import DerivedMeasurements, DerivedMetric, MeasurementsCsv, InstrumentChannel
|
||||
from devlib.exception import HostError
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
from devlib.utils.rendering import gfxinfo_get_last_dump, VSYNC_INTERVAL
|
||||
from devlib.utils.types import numeric
|
||||
|
||||
|
||||
class DerivedFpsStats(DerivedMeasurements):
|
||||
|
||||
def __init__(self, drop_threshold=5, suffix=None, filename=None, outdir=None):
|
||||
self.drop_threshold = drop_threshold
|
||||
self.suffix = suffix
|
||||
self.filename = filename
|
||||
self.outdir = outdir
|
||||
if (filename is None) and (suffix is None):
|
||||
self.suffix = '-fps'
|
||||
elif (filename is not None) and (suffix is not None):
|
||||
raise ValueError('suffix and filename cannot be specified at the same time.')
|
||||
if filename is not None and os.sep in filename:
|
||||
raise ValueError('filename cannot be a path (cannot countain "{}"'.format(os.sep))
|
||||
|
||||
def process(self, measurements_csv):
|
||||
if isinstance(measurements_csv, basestring):
|
||||
measurements_csv = MeasurementsCsv(measurements_csv)
|
||||
if pd is not None:
|
||||
return self._process_with_pandas(measurements_csv)
|
||||
return self._process_without_pandas(measurements_csv)
|
||||
|
||||
def _get_csv_file_name(self, frames_file):
|
||||
outdir = self.outdir or os.path.dirname(frames_file)
|
||||
if self.filename:
|
||||
return os.path.join(outdir, self.filename)
|
||||
|
||||
frames_basename = os.path.basename(frames_file)
|
||||
rest, ext = os.path.splitext(frames_basename)
|
||||
csv_basename = rest + self.suffix + ext
|
||||
return os.path.join(outdir, csv_basename)
|
||||
|
||||
|
||||
class DerivedGfxInfoStats(DerivedFpsStats):
|
||||
|
||||
@staticmethod
|
||||
def process_raw(filepath, *args):
|
||||
metrics = []
|
||||
dump = gfxinfo_get_last_dump(filepath)
|
||||
seen_stats = False
|
||||
for line in dump.split('\n'):
|
||||
if seen_stats and not line.strip():
|
||||
break
|
||||
elif line.startswith('Janky frames:'):
|
||||
text = line.split(': ')[-1]
|
||||
val_text, pc_text = text.split('(')
|
||||
metrics.append(DerivedMetric('janks', numeric(val_text.strip()), 'count'))
|
||||
metrics.append(DerivedMetric('janks_pc', numeric(pc_text[:-3]), 'percent'))
|
||||
elif ' percentile: ' in line:
|
||||
ptile, val_text = line.split(' percentile: ')
|
||||
name = 'render_time_{}_ptile'.format(ptile)
|
||||
value = numeric(val_text.strip()[:-2])
|
||||
metrics.append(DerivedMetric(name, value, 'time_ms'))
|
||||
elif line.startswith('Number '):
|
||||
name_text, val_text = line.strip().split(': ')
|
||||
name = name_text[7:].lower().replace(' ', '_')
|
||||
value = numeric(val_text)
|
||||
metrics.append(DerivedMetric(name, value, 'count'))
|
||||
else:
|
||||
continue
|
||||
seen_stats = True
|
||||
return metrics
|
||||
|
||||
def _process_without_pandas(self, measurements_csv):
|
||||
per_frame_fps = []
|
||||
start_vsync, end_vsync = None, None
|
||||
frame_count = 0
|
||||
|
||||
for frame_data in measurements_csv.iter_values():
|
||||
if frame_data.Flags_flags != 0:
|
||||
continue
|
||||
frame_count += 1
|
||||
|
||||
if start_vsync is None:
|
||||
start_vsync = frame_data.Vsync_time_us
|
||||
end_vsync = frame_data.Vsync_time_us
|
||||
|
||||
frame_time = frame_data.FrameCompleted_time_us - frame_data.IntendedVsync_time_us
|
||||
pff = 1e9 / frame_time
|
||||
if pff > self.drop_threshold:
|
||||
per_frame_fps.append([pff])
|
||||
|
||||
if frame_count:
|
||||
duration = end_vsync - start_vsync
|
||||
fps = (1e6 * frame_count) / float(duration)
|
||||
else:
|
||||
duration = 0
|
||||
fps = 0
|
||||
|
||||
csv_file = self._get_csv_file_name(measurements_csv.path)
|
||||
with csvwriter(csv_file) as writer:
|
||||
writer.writerow(['fps'])
|
||||
writer.writerows(per_frame_fps)
|
||||
|
||||
return [DerivedMetric('fps', fps, 'fps'),
|
||||
DerivedMetric('total_frames', frame_count, 'frames'),
|
||||
MeasurementsCsv(csv_file)]
|
||||
|
||||
def _process_with_pandas(self, measurements_csv):
|
||||
data = pd.read_csv(measurements_csv.path)
|
||||
data = data[data.Flags_flags == 0]
|
||||
frame_time = data.FrameCompleted_time_us - data.IntendedVsync_time_us
|
||||
per_frame_fps = (1e6 / frame_time)
|
||||
keep_filter = per_frame_fps > self.drop_threshold
|
||||
per_frame_fps = per_frame_fps[keep_filter]
|
||||
per_frame_fps.name = 'fps'
|
||||
|
||||
frame_count = data.index.size
|
||||
if frame_count > 1:
|
||||
duration = data.Vsync_time_us.iloc[-1] - data.Vsync_time_us.iloc[0]
|
||||
fps = (1e9 * frame_count) / float(duration)
|
||||
else:
|
||||
duration = 0
|
||||
fps = 0
|
||||
|
||||
csv_file = self._get_csv_file_name(measurements_csv.path)
|
||||
per_frame_fps.to_csv(csv_file, index=False, header=True)
|
||||
|
||||
return [DerivedMetric('fps', fps, 'fps'),
|
||||
DerivedMetric('total_frames', frame_count, 'frames'),
|
||||
MeasurementsCsv(csv_file)]
|
||||
|
||||
|
||||
class DerivedSurfaceFlingerStats(DerivedFpsStats):
|
||||
|
||||
def _process_with_pandas(self, measurements_csv):
|
||||
data = pd.read_csv(measurements_csv.path)
|
||||
|
||||
# fiter out bogus frames.
|
||||
bogus_frames_filter = data.actual_present_time_us != 0x7fffffffffffffff
|
||||
actual_present_times = data.actual_present_time_us[bogus_frames_filter]
|
||||
actual_present_time_deltas = actual_present_times.diff().dropna()
|
||||
|
||||
vsyncs_to_compose = actual_present_time_deltas.div(VSYNC_INTERVAL)
|
||||
vsyncs_to_compose.apply(lambda x: int(round(x, 0)))
|
||||
|
||||
# drop values lower than drop_threshold FPS as real in-game frame
|
||||
# rate is unlikely to drop below that (except on loading screens
|
||||
# etc, which should not be factored in frame rate calculation).
|
||||
per_frame_fps = (1.0 / (vsyncs_to_compose.multiply(VSYNC_INTERVAL / 1e9)))
|
||||
keep_filter = per_frame_fps > self.drop_threshold
|
||||
filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter]
|
||||
per_frame_fps.name = 'fps'
|
||||
|
||||
csv_file = self._get_csv_file_name(measurements_csv.path)
|
||||
per_frame_fps.to_csv(csv_file, index=False, header=True)
|
||||
|
||||
if not filtered_vsyncs_to_compose.empty:
|
||||
fps = 0
|
||||
total_vsyncs = filtered_vsyncs_to_compose.sum()
|
||||
frame_count = filtered_vsyncs_to_compose.size
|
||||
|
||||
if total_vsyncs:
|
||||
fps = 1e9 * frame_count / (VSYNC_INTERVAL * total_vsyncs)
|
||||
|
||||
janks = self._calc_janks(filtered_vsyncs_to_compose)
|
||||
not_at_vsync = self._calc_not_at_vsync(vsyncs_to_compose)
|
||||
else:
|
||||
fps = 0
|
||||
frame_count = 0
|
||||
janks = 0
|
||||
not_at_vsync = 0
|
||||
|
||||
janks_pc = 0 if frame_count == 0 else janks * 100 / frame_count
|
||||
|
||||
return [DerivedMetric('fps', fps, 'fps'),
|
||||
DerivedMetric('total_frames', frame_count, 'frames'),
|
||||
MeasurementsCsv(csv_file),
|
||||
DerivedMetric('janks', janks, 'count'),
|
||||
DerivedMetric('janks_pc', janks_pc, 'percent'),
|
||||
DerivedMetric('missed_vsync', not_at_vsync, 'count')]
|
||||
|
||||
def _process_without_pandas(self, measurements_csv):
|
||||
# Given that SurfaceFlinger has been deprecated in favor of GfxInfo,
|
||||
# it does not seem worth it implementing this.
|
||||
raise HostError('Please install "pandas" Python package to process SurfaceFlinger frames')
|
||||
|
||||
@staticmethod
|
||||
def _calc_janks(filtered_vsyncs_to_compose):
|
||||
"""
|
||||
Internal method for calculating jank frames.
|
||||
"""
|
||||
pause_latency = 20
|
||||
vtc_deltas = filtered_vsyncs_to_compose.diff().dropna()
|
||||
vtc_deltas = vtc_deltas.abs()
|
||||
janks = vtc_deltas.apply(lambda x: (pause_latency > x > 1.5) and 1 or 0).sum()
|
||||
|
||||
return janks
|
||||
|
||||
@staticmethod
|
||||
def _calc_not_at_vsync(vsyncs_to_compose):
|
||||
"""
|
||||
Internal method for calculating the number of frames that did not
|
||||
render in a single vsync cycle.
|
||||
"""
|
||||
epsilon = 0.0001
|
||||
func = lambda x: (abs(x - 1.0) > epsilon) and 1 or 0
|
||||
not_at_vsync = vsyncs_to_compose.apply(func).sum()
|
||||
|
||||
return not_at_vsync
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
# Copyright 2013-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,13 +13,13 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from devlib.utils.misc import TimeoutError # NOQA pylint: disable=W0611
|
||||
|
||||
|
||||
class DevlibError(Exception):
|
||||
"""Base class for all Workload Automation exceptions."""
|
||||
pass
|
||||
"""Base class for all Devlib exceptions."""
|
||||
@property
|
||||
def message(self):
|
||||
if self.args:
|
||||
return self.args[0]
|
||||
return str(self)
|
||||
|
||||
|
||||
class TargetError(DevlibError):
|
||||
@@ -29,12 +29,63 @@ class TargetError(DevlibError):
|
||||
|
||||
class TargetNotRespondingError(DevlibError):
|
||||
"""The target is unresponsive."""
|
||||
|
||||
def __init__(self, target):
|
||||
super(TargetNotRespondingError, self).__init__('Target {} is not responding.'.format(target))
|
||||
pass
|
||||
|
||||
|
||||
class HostError(DevlibError):
|
||||
"""An error has occured on the host"""
|
||||
pass
|
||||
|
||||
|
||||
class TimeoutError(DevlibError):
|
||||
"""Raised when a subprocess command times out. This is basically a ``DevlibError``-derived version
|
||||
of ``subprocess.CalledProcessError``, the thinking being that while a timeout could be due to
|
||||
programming error (e.g. not setting long enough timers), it is often due to some failure in the
|
||||
environment, and there fore should be classed as a "user error"."""
|
||||
|
||||
def __init__(self, command, output):
|
||||
super(TimeoutError, self).__init__('Timed out: {}'.format(command))
|
||||
self.command = command
|
||||
self.output = output
|
||||
|
||||
def __str__(self):
|
||||
return '\n'.join([self.message, 'OUTPUT:', self.output or ''])
|
||||
|
||||
|
||||
class WorkerThreadError(DevlibError):
|
||||
"""
|
||||
This should get raised in the main thread if a non-WAError-derived
|
||||
exception occurs on a worker/background thread. If a WAError-derived
|
||||
exception is raised in the worker, then it that exception should be
|
||||
re-raised on the main thread directly -- the main point of this is to
|
||||
preserve the backtrace in the output, and backtrace doesn't get output for
|
||||
WAErrors.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, thread, exc_info):
|
||||
self.thread = thread
|
||||
self.exc_info = exc_info
|
||||
orig = self.exc_info[1]
|
||||
orig_name = type(orig).__name__
|
||||
message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread)
|
||||
message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
|
||||
super(WorkerThreadError, self).__init__(message)
|
||||
|
||||
|
||||
def get_traceback(exc=None):
|
||||
"""
|
||||
Returns the string with the traceback for the specifiec exc
|
||||
object, or for the current exception exc is not specified.
|
||||
|
||||
"""
|
||||
import io, traceback, sys
|
||||
if exc is None:
|
||||
exc = sys.exc_info()
|
||||
if not exc:
|
||||
return None
|
||||
tb = exc[2]
|
||||
sio = io.BytesIO()
|
||||
traceback.print_tb(tb, file=sio)
|
||||
del tb # needs to be done explicitly see: http://docs.python.org/2/library/sys.html#sys.exc_info
|
||||
return sio.getvalue()
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2017 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,7 +12,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from glob import iglob
|
||||
import os
|
||||
import signal
|
||||
import shutil
|
||||
import subprocess
|
||||
import logging
|
||||
@@ -23,17 +25,22 @@ from devlib.utils.misc import check_output
|
||||
|
||||
PACKAGE_BIN_DIRECTORY = os.path.join(os.path.dirname(__file__), 'bin')
|
||||
|
||||
def kill_children(pid, signal=signal.SIGKILL):
|
||||
with open('/proc/{0}/task/{0}/children'.format(pid), 'r') as fd:
|
||||
for cpid in map(int, fd.read().strip().split()):
|
||||
kill_children(cpid, signal)
|
||||
os.kill(cpid, signal)
|
||||
|
||||
class LocalConnection(object):
|
||||
|
||||
name = 'local'
|
||||
|
||||
def __init__(self, timeout=10, keep_password=True, unrooted=False):
|
||||
def __init__(self, platform=None, keep_password=True, unrooted=False,
|
||||
password=None, timeout=None):
|
||||
self.logger = logging.getLogger('local_connection')
|
||||
self.timeout = timeout
|
||||
self.keep_password = keep_password
|
||||
self.unrooted = unrooted
|
||||
self.password = None
|
||||
self.password = password
|
||||
|
||||
def push(self, source, dest, timeout=None, as_root=False): # pylint: disable=unused-argument
|
||||
self.logger.debug('cp {} {}'.format(source, dest))
|
||||
@@ -41,9 +48,15 @@ class LocalConnection(object):
|
||||
|
||||
def pull(self, source, dest, timeout=None, as_root=False): # pylint: disable=unused-argument
|
||||
self.logger.debug('cp {} {}'.format(source, dest))
|
||||
shutil.copy(source, dest)
|
||||
if ('*' in source or '?' in source) and os.path.isdir(dest):
|
||||
# Pull all files matching a wildcard expression
|
||||
for each_source in iglob(source):
|
||||
shutil.copy(each_source, dest)
|
||||
else:
|
||||
shutil.copy(source, dest)
|
||||
|
||||
def execute(self, command, timeout=None, check_exit_code=True, as_root=False):
|
||||
def execute(self, command, timeout=None, check_exit_code=True,
|
||||
as_root=False, strip_colors=True):
|
||||
self.logger.debug(command)
|
||||
if as_root:
|
||||
if self.unrooted:
|
||||
@@ -54,7 +67,9 @@ class LocalConnection(object):
|
||||
try:
|
||||
return check_output(command, shell=True, timeout=timeout, ignore=ignore)[0]
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise TargetError(e)
|
||||
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'.format(
|
||||
e.returncode, command, e.output)
|
||||
raise TargetError(message)
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
if as_root:
|
||||
@@ -77,4 +92,3 @@ class LocalConnection(object):
|
||||
if self.keep_password:
|
||||
self.password = password
|
||||
return password
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,10 +12,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import csv
|
||||
from __future__ import division
|
||||
import logging
|
||||
import collections
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.utils.csvutil import csvreader
|
||||
from devlib.utils.types import numeric
|
||||
from devlib.utils.types import identifier
|
||||
|
||||
|
||||
# Channel modes describe what sort of measurement the instrument supports.
|
||||
@@ -23,28 +28,35 @@ from devlib.utils.types import numeric
|
||||
INSTANTANEOUS = 1
|
||||
CONTINUOUS = 2
|
||||
|
||||
MEASUREMENT_TYPES = {} # populated further down
|
||||
|
||||
class MeasurementType(tuple):
|
||||
|
||||
__slots__ = []
|
||||
class MeasurementType(object):
|
||||
|
||||
def __new__(cls, name, units, category=None):
|
||||
return tuple.__new__(cls, (name, units, category))
|
||||
def __init__(self, name, units, category=None, conversions=None):
|
||||
self.name = name
|
||||
self.units = units
|
||||
self.category = category
|
||||
self.conversions = {}
|
||||
if conversions is not None:
|
||||
for key, value in conversions.items():
|
||||
if not callable(value):
|
||||
msg = 'Converter must be callable; got {} "{}"'
|
||||
raise ValueError(msg.format(type(value), value))
|
||||
self.conversions[key] = value
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return tuple.__getitem__(self, 0)
|
||||
|
||||
@property
|
||||
def units(self):
|
||||
return tuple.__getitem__(self, 1)
|
||||
|
||||
@property
|
||||
def category(self):
|
||||
return tuple.__getitem__(self, 2)
|
||||
|
||||
def __getitem__(self, item):
|
||||
raise TypeError()
|
||||
def convert(self, value, to):
|
||||
if isinstance(to, basestring) and to in MEASUREMENT_TYPES:
|
||||
to = MEASUREMENT_TYPES[to]
|
||||
if not isinstance(to, MeasurementType):
|
||||
msg = 'Unexpected conversion target: "{}"'
|
||||
raise ValueError(msg.format(to))
|
||||
if to.name == self.name:
|
||||
return value
|
||||
if not to.name in self.conversions:
|
||||
msg = 'No conversion from {} to {} available'
|
||||
raise ValueError(msg.format(self.name, to.name))
|
||||
return self.conversions[to.name](value)
|
||||
|
||||
def __cmp__(self, other):
|
||||
if isinstance(other, MeasurementType):
|
||||
@@ -54,24 +66,73 @@ class MeasurementType(tuple):
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
__repr__ = __str__
|
||||
def __repr__(self):
|
||||
if self.category:
|
||||
text = 'MeasurementType({}, {}, {})'
|
||||
return text.format(self.name, self.units, self.category)
|
||||
else:
|
||||
text = 'MeasurementType({}, {})'
|
||||
return text.format(self.name, self.units)
|
||||
|
||||
|
||||
# Standard measures
|
||||
# Standard measures. In order to make sure that downstream data processing is not tied
|
||||
# to particular insturments (e.g. a particular method of mearuing power), instruments
|
||||
# must, where possible, resport their measurments formatted as on of the standard types
|
||||
# defined here.
|
||||
_measurement_types = [
|
||||
MeasurementType('time', 'seconds'),
|
||||
MeasurementType('temperature', 'degrees'),
|
||||
# For whatever reason, the type of measurement could not be established.
|
||||
MeasurementType('unknown', None),
|
||||
|
||||
# Generic measurements
|
||||
MeasurementType('count', 'count'),
|
||||
MeasurementType('percent', 'percent'),
|
||||
|
||||
# Time measurement. While there is typically a single "canonical" unit
|
||||
# used for each type of measurmenent, time may be measured to a wide variety
|
||||
# of events occuring at a wide range of scales. Forcing everying into a
|
||||
# single scale will lead to inefficient and awkward to work with result tables.
|
||||
# Coversion functions between the formats are specified, so that downstream
|
||||
# processors that expect all times time be at a particular scale can automatically
|
||||
# covert without being familar with individual instruments.
|
||||
MeasurementType('time', 'seconds', 'time',
|
||||
conversions={
|
||||
'time_us': lambda x: x * 1000000,
|
||||
'time_ms': lambda x: x * 1000,
|
||||
}
|
||||
),
|
||||
MeasurementType('time_us', 'microseconds', 'time',
|
||||
conversions={
|
||||
'time': lambda x: x / 1000000,
|
||||
'time_ms': lambda x: x / 1000,
|
||||
}
|
||||
),
|
||||
MeasurementType('time_ms', 'milliseconds', 'time',
|
||||
conversions={
|
||||
'time': lambda x: x / 1000,
|
||||
'time_us': lambda x: x * 1000,
|
||||
}
|
||||
),
|
||||
|
||||
# Measurements related to thermals.
|
||||
MeasurementType('temperature', 'degrees', 'thermal'),
|
||||
|
||||
# Measurements related to power end energy consumption.
|
||||
MeasurementType('power', 'watts', 'power/energy'),
|
||||
MeasurementType('voltage', 'volts', 'power/energy'),
|
||||
MeasurementType('current', 'amps', 'power/energy'),
|
||||
MeasurementType('energy', 'joules', 'power/energy'),
|
||||
|
||||
# Measurments realted to data transfer, e.g. neworking,
|
||||
# memory, or backing storage.
|
||||
MeasurementType('tx', 'bytes', 'data transfer'),
|
||||
MeasurementType('rx', 'bytes', 'data transfer'),
|
||||
MeasurementType('tx/rx', 'bytes', 'data transfer'),
|
||||
|
||||
MeasurementType('fps', 'fps', 'ui render'),
|
||||
MeasurementType('frames', 'frames', 'ui render'),
|
||||
]
|
||||
MEASUREMENT_TYPES = {m.name: m for m in _measurement_types}
|
||||
for m in _measurement_types:
|
||||
MEASUREMENT_TYPES[m.name] = m
|
||||
|
||||
|
||||
class Measurement(object):
|
||||
@@ -91,7 +152,7 @@ class Measurement(object):
|
||||
self.channel = channel
|
||||
|
||||
def __cmp__(self, other):
|
||||
if isinstance(other, Measurement):
|
||||
if hasattr(other, 'value'):
|
||||
return cmp(self.value, other.value)
|
||||
else:
|
||||
return cmp(self.value, other)
|
||||
@@ -107,28 +168,72 @@ class Measurement(object):
|
||||
|
||||
class MeasurementsCsv(object):
|
||||
|
||||
def __init__(self, path, channels):
|
||||
def __init__(self, path, channels=None, sample_rate_hz=None):
|
||||
self.path = path
|
||||
self.channels = channels
|
||||
self._fh = open(path, 'rb')
|
||||
self.sample_rate_hz = sample_rate_hz
|
||||
if self.channels is None:
|
||||
self._load_channels()
|
||||
headings = [chan.label for chan in self.channels]
|
||||
self.data_tuple = collections.namedtuple('csv_entry',
|
||||
map(identifier, headings))
|
||||
|
||||
def measurements(self):
|
||||
return list(self.itermeasurements())
|
||||
return list(self.iter_measurements())
|
||||
|
||||
def itermeasurements(self):
|
||||
self._fh.seek(0)
|
||||
reader = csv.reader(self._fh)
|
||||
reader.next() # headings
|
||||
for row in reader:
|
||||
def iter_measurements(self):
|
||||
for row in self._iter_rows():
|
||||
values = map(numeric, row)
|
||||
yield [Measurement(v, c) for (v, c) in zip(values, self.channels)]
|
||||
|
||||
def values(self):
|
||||
return list(self.iter_values())
|
||||
|
||||
def iter_values(self):
|
||||
for row in self._iter_rows():
|
||||
values = list(map(numeric, row))
|
||||
yield self.data_tuple(*values)
|
||||
|
||||
def _load_channels(self):
|
||||
header = []
|
||||
with csvreader(self.path) as reader:
|
||||
header = next(reader)
|
||||
|
||||
self.channels = []
|
||||
for entry in header:
|
||||
for mt in MEASUREMENT_TYPES:
|
||||
suffix = '_{}'.format(mt)
|
||||
if entry.endswith(suffix):
|
||||
site = entry[:-len(suffix)]
|
||||
measure = mt
|
||||
break
|
||||
else:
|
||||
if entry in MEASUREMENT_TYPES:
|
||||
site = None
|
||||
measure = entry
|
||||
else:
|
||||
site = entry
|
||||
measure = 'unknown'
|
||||
|
||||
chan = InstrumentChannel(site, measure)
|
||||
self.channels.append(chan)
|
||||
|
||||
def _iter_rows(self):
|
||||
with csvreader(self.path) as reader:
|
||||
next(reader) # headings
|
||||
for row in reader:
|
||||
yield row
|
||||
|
||||
|
||||
class InstrumentChannel(object):
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
return '{}_{}'.format(self.site, self.kind)
|
||||
if self.site is not None:
|
||||
return '{}_{}'.format(self.site, self.kind)
|
||||
return self.kind
|
||||
|
||||
name = label
|
||||
|
||||
@property
|
||||
def kind(self):
|
||||
@@ -138,8 +243,7 @@ class InstrumentChannel(object):
|
||||
def units(self):
|
||||
return self.measurement_type.units
|
||||
|
||||
def __init__(self, name, site, measurement_type, **attrs):
|
||||
self.name = name
|
||||
def __init__(self, site, measurement_type, **attrs):
|
||||
self.site = site
|
||||
if isinstance(measurement_type, MeasurementType):
|
||||
self.measurement_type = measurement_type
|
||||
@@ -148,7 +252,7 @@ class InstrumentChannel(object):
|
||||
self.measurement_type = MEASUREMENT_TYPES[measurement_type]
|
||||
except KeyError:
|
||||
raise ValueError('Unknown measurement type: {}'.format(measurement_type))
|
||||
for atname, atvalue in attrs.iteritems():
|
||||
for atname, atvalue in attrs.items():
|
||||
setattr(self, atname, atvalue)
|
||||
|
||||
def __str__(self):
|
||||
@@ -167,23 +271,22 @@ class Instrument(object):
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
self.logger = logging.getLogger(self.__class__.__name__)
|
||||
self.channels = {}
|
||||
self.channels = collections.OrderedDict()
|
||||
self.active_channels = []
|
||||
self.sample_rate_hz = None
|
||||
|
||||
# channel management
|
||||
|
||||
def list_channels(self):
|
||||
return self.channels.values()
|
||||
return list(self.channels.values())
|
||||
|
||||
def get_channels(self, measure):
|
||||
if hasattr(measure, 'name'):
|
||||
measure = measure.name
|
||||
return [c for c in self.channels if c.measure.name == measure]
|
||||
return [c for c in self.list_channels() if c.kind == measure]
|
||||
|
||||
def add_channel(self, site, measure, name=None, **attrs):
|
||||
if name is None:
|
||||
name = '{}_{}'.format(site, measure)
|
||||
chan = InstrumentChannel(name, site, measure, **attrs)
|
||||
def add_channel(self, site, measure, **attrs):
|
||||
chan = InstrumentChannel(site, measure, **attrs)
|
||||
self.channels[chan.label] = chan
|
||||
|
||||
# initialization and teardown
|
||||
@@ -194,19 +297,27 @@ class Instrument(object):
|
||||
def teardown(self):
|
||||
pass
|
||||
|
||||
def reset(self, sites=None, kinds=None):
|
||||
if kinds is None and sites is None:
|
||||
self.active_channels = sorted(self.channels.values(), key=lambda x: x.label)
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
if channels is not None:
|
||||
if sites is not None or kinds is not None:
|
||||
raise ValueError('sites and kinds should not be set if channels is set')
|
||||
|
||||
try:
|
||||
self.active_channels = [self.channels[ch] for ch in channels]
|
||||
except KeyError as e:
|
||||
msg = 'Unexpected channel "{}"; must be in {}'
|
||||
raise ValueError(msg.format(e, self.channels.keys()))
|
||||
elif sites is None and kinds is None:
|
||||
self.active_channels = sorted(self.channels.itervalues(), key=lambda x: x.label)
|
||||
else:
|
||||
if isinstance(sites, basestring):
|
||||
sites = [sites]
|
||||
if isinstance(kinds, basestring):
|
||||
kinds = [kinds]
|
||||
self.active_channels = []
|
||||
for chan in self.channels.values():
|
||||
if (kinds is None or chan.kind in kinds) and \
|
||||
(sites is None or chan.site in sites):
|
||||
self.active_channels.append(chan)
|
||||
|
||||
wanted = lambda ch : ((kinds is None or ch.kind in kinds) and
|
||||
(sites is None or ch.site in sites))
|
||||
self.active_channels = filter(wanted, self.channels.itervalues())
|
||||
|
||||
# instantaneous
|
||||
|
||||
@@ -223,3 +334,6 @@ class Instrument(object):
|
||||
|
||||
def get_data(self, outfile):
|
||||
pass
|
||||
|
||||
def get_raw(self):
|
||||
return []
|
||||
|
157
devlib/instrument/acmecape.py
Normal file
157
devlib/instrument/acmecape.py
Normal file
@@ -0,0 +1,157 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#pylint: disable=attribute-defined-outside-init
|
||||
from __future__ import division
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import tempfile
|
||||
from fcntl import fcntl, F_GETFL, F_SETFL
|
||||
from string import Template
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
|
||||
from devlib import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import HostError
|
||||
from devlib.utils.csvutil import csvreader, csvwriter
|
||||
from devlib.utils.misc import which
|
||||
|
||||
OUTPUT_CAPTURE_FILE = 'acme-cape.csv'
|
||||
IIOCAP_CMD_TEMPLATE = Template("""
|
||||
${iio_capture} -n ${host} -b ${buffer_size} -c -f ${outfile} ${iio_device}
|
||||
""")
|
||||
|
||||
def _read_nonblock(pipe, size=1024):
|
||||
fd = pipe.fileno()
|
||||
flags = fcntl(fd, F_GETFL)
|
||||
flags |= os.O_NONBLOCK
|
||||
fcntl(fd, F_SETFL, flags)
|
||||
|
||||
output = ''
|
||||
try:
|
||||
while True:
|
||||
output += pipe.read(size)
|
||||
except IOError:
|
||||
pass
|
||||
return output
|
||||
|
||||
|
||||
class AcmeCapeInstrument(Instrument):
|
||||
|
||||
mode = CONTINUOUS
|
||||
|
||||
def __init__(self, target,
|
||||
iio_capture=which('iio-capture'),
|
||||
host='baylibre-acme.local',
|
||||
iio_device='iio:device0',
|
||||
buffer_size=256):
|
||||
super(AcmeCapeInstrument, self).__init__(target)
|
||||
self.iio_capture = iio_capture
|
||||
self.host = host
|
||||
self.iio_device = iio_device
|
||||
self.buffer_size = buffer_size
|
||||
self.sample_rate_hz = 100
|
||||
if self.iio_capture is None:
|
||||
raise HostError('Missing iio-capture binary')
|
||||
self.command = None
|
||||
self.process = None
|
||||
|
||||
self.add_channel('shunt', 'voltage')
|
||||
self.add_channel('bus', 'voltage')
|
||||
self.add_channel('device', 'power')
|
||||
self.add_channel('device', 'current')
|
||||
self.add_channel('timestamp', 'time_ms')
|
||||
|
||||
def __del__(self):
|
||||
if self.process and self.process.pid:
|
||||
self.logger.warning('killing iio-capture process [{}]...'.format(self.process.pid))
|
||||
self.process.kill()
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(AcmeCapeInstrument, self).reset(sites, kinds, channels)
|
||||
self.raw_data_file = tempfile.mkstemp('.csv')[1]
|
||||
params = dict(
|
||||
iio_capture=self.iio_capture,
|
||||
host=self.host,
|
||||
buffer_size=self.buffer_size,
|
||||
iio_device=self.iio_device,
|
||||
outfile=self.raw_data_file
|
||||
)
|
||||
self.command = IIOCAP_CMD_TEMPLATE.substitute(**params)
|
||||
self.logger.debug('ACME cape command: {}'.format(self.command))
|
||||
|
||||
def start(self):
|
||||
self.process = Popen(self.command.split(), stdout=PIPE, stderr=STDOUT)
|
||||
|
||||
def stop(self):
|
||||
self.process.terminate()
|
||||
timeout_secs = 10
|
||||
output = ''
|
||||
for _ in range(timeout_secs):
|
||||
if self.process.poll() is not None:
|
||||
break
|
||||
time.sleep(1)
|
||||
else:
|
||||
output += _read_nonblock(self.process.stdout)
|
||||
self.process.kill()
|
||||
self.logger.error('iio-capture did not terminate gracefully')
|
||||
if self.process.poll() is None:
|
||||
msg = 'Could not terminate iio-capture:\n{}'
|
||||
raise HostError(msg.format(output))
|
||||
if self.process.returncode != 15: # iio-capture exits with 15 when killed
|
||||
if sys.version_info[0] == 3:
|
||||
output += self.process.stdout.read().decode(sys.stdout.encoding, 'replace')
|
||||
else:
|
||||
output += self.process.stdout.read()
|
||||
self.logger.info('ACME instrument encountered an error, '
|
||||
'you may want to try rebooting the ACME device:\n'
|
||||
' ssh root@{} reboot'.format(self.host))
|
||||
raise HostError('iio-capture exited with an error ({}), output:\n{}'
|
||||
.format(self.process.returncode, output))
|
||||
if not os.path.isfile(self.raw_data_file):
|
||||
raise HostError('Output CSV not generated.')
|
||||
self.process = None
|
||||
|
||||
def get_data(self, outfile):
|
||||
if os.stat(self.raw_data_file).st_size == 0:
|
||||
self.logger.warning('"{}" appears to be empty'.format(self.raw_data_file))
|
||||
return
|
||||
|
||||
all_channels = [c.label for c in self.list_channels()]
|
||||
active_channels = [c.label for c in self.active_channels]
|
||||
active_indexes = [all_channels.index(ac) for ac in active_channels]
|
||||
|
||||
with csvreader(self.raw_data_file, skipinitialspace=True) as reader:
|
||||
with csvwriter(outfile) as writer:
|
||||
writer.writerow(active_channels)
|
||||
|
||||
header = next(reader)
|
||||
ts_index = header.index('timestamp ms')
|
||||
|
||||
|
||||
for row in reader:
|
||||
output_row = []
|
||||
for i in active_indexes:
|
||||
if i == ts_index:
|
||||
# Leave time in ms
|
||||
output_row.append(float(row[i]))
|
||||
else:
|
||||
# Convert rest into standard units.
|
||||
output_row.append(float(row[i])/1000)
|
||||
writer.writerow(output_row)
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
|
||||
def get_raw(self):
|
||||
return [self.raw_data_file]
|
145
devlib/instrument/arm_energy_probe.py
Normal file
145
devlib/instrument/arm_energy_probe.py
Normal file
@@ -0,0 +1,145 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Copyright 2018 Linaro Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
# pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init
|
||||
from __future__ import division
|
||||
import os
|
||||
import subprocess
|
||||
import signal
|
||||
import struct
|
||||
import sys
|
||||
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import HostError
|
||||
from devlib.utils.csvutil import csvreader, csvwriter
|
||||
from devlib.utils.misc import which
|
||||
|
||||
from devlib.utils.parse_aep import AepParser
|
||||
|
||||
class ArmEnergyProbeInstrument(Instrument):
|
||||
"""
|
||||
Collects power traces using the ARM Energy Probe.
|
||||
|
||||
This instrument requires ``arm-probe`` utility to be installed on the host and be in the PATH.
|
||||
arm-probe is available here:
|
||||
``https://git.linaro.org/tools/arm-probe.git``.
|
||||
|
||||
Details about how to build and use it is available here:
|
||||
``https://git.linaro.org/tools/arm-probe.git/tree/README``
|
||||
|
||||
ARM energy probe (AEP) device can simultaneously collect power from up to 3 power rails and
|
||||
arm-probe utility can record data from several AEP devices simultaneously.
|
||||
|
||||
To connect the energy probe on a rail, connect the white wire to the pin that is closer to the
|
||||
Voltage source and the black wire to the pin that is closer to the load (the SoC or the device
|
||||
you are probing). Between the pins there should be a shunt resistor of known resistance in the
|
||||
range of 5 to 500 mOhm but the voltage on the shunt resistor must stay smaller than 165mV.
|
||||
The resistance of the shunt resistors is a mandatory parameter to be set in the ``config`` file.
|
||||
"""
|
||||
|
||||
mode = CONTINUOUS
|
||||
|
||||
MAX_CHANNELS = 12 # 4 Arm Energy Probes
|
||||
|
||||
def __init__(self, target, config_file='./config-aep', ):
|
||||
super(ArmEnergyProbeInstrument, self).__init__(target)
|
||||
self.arm_probe = which('arm-probe')
|
||||
if self.arm_probe is None:
|
||||
raise HostError('arm-probe must be installed on the host')
|
||||
#todo detect is config file exist
|
||||
self.attributes = ['power', 'voltage', 'current']
|
||||
self.sample_rate_hz = 10000
|
||||
self.config_file = config_file
|
||||
|
||||
self.parser = AepParser()
|
||||
#TODO make it generic
|
||||
topo = self.parser.topology_from_config(self.config_file)
|
||||
for item in topo:
|
||||
if item == 'time':
|
||||
self.add_channel('timestamp', 'time')
|
||||
else:
|
||||
self.add_channel(item, 'power')
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(ArmEnergyProbeInstrument, self).reset(sites, kinds, channels)
|
||||
self.output_directory = tempfile.mkdtemp(prefix='energy_probe')
|
||||
self.output_file_raw = os.path.join(self.output_directory, 'data_raw')
|
||||
self.output_file = os.path.join(self.output_directory, 'data')
|
||||
self.output_file_figure = os.path.join(self.output_directory, 'summary.txt')
|
||||
self.output_file_error = os.path.join(self.output_directory, 'error.log')
|
||||
self.output_fd_error = open(self.output_file_error, 'w')
|
||||
self.command = 'arm-probe --config {} > {}'.format(self.config_file, self.output_file_raw)
|
||||
|
||||
def start(self):
|
||||
self.logger.debug(self.command)
|
||||
self.armprobe = subprocess.Popen(self.command,
|
||||
stderr=self.output_fd_error,
|
||||
preexec_fn=os.setpgrp,
|
||||
shell=True)
|
||||
|
||||
def stop(self):
|
||||
self.logger.debug("kill running arm-probe")
|
||||
os.killpg(self.armprobe.pid, signal.SIGTERM)
|
||||
|
||||
def get_data(self, outfile): # pylint: disable=R0914
|
||||
self.logger.debug("Parse data and compute consumed energy")
|
||||
self.parser.prepare(self.output_file_raw, self.output_file, self.output_file_figure)
|
||||
self.parser.parse_aep()
|
||||
self.parser.unprepare()
|
||||
skip_header = 1
|
||||
|
||||
all_channels = [c.label for c in self.list_channels()]
|
||||
active_channels = [c.label for c in self.active_channels]
|
||||
active_indexes = [all_channels.index(ac) for ac in active_channels]
|
||||
|
||||
with csvreader(self.output_file, delimiter=' ') as reader:
|
||||
with csvwriter(outfile) as writer:
|
||||
for row in reader:
|
||||
if skip_header == 1:
|
||||
writer.writerow(active_channels)
|
||||
skip_header = 0
|
||||
continue
|
||||
if len(row) < len(active_channels):
|
||||
continue
|
||||
# all data are in micro (seconds/watt)
|
||||
new = [ float(row[i])/1000000 for i in active_indexes ]
|
||||
writer.writerow(new)
|
||||
|
||||
self.output_fd_error.close()
|
||||
shutil.rmtree(self.output_directory)
|
||||
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
|
||||
def get_raw(self):
|
||||
return [self.output_file_raw]
|
@@ -1,19 +1,34 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import csv
|
||||
import tempfile
|
||||
from itertools import chain
|
||||
|
||||
from devlib.instrument import Instrument, MeasurementsCsv, CONTINUOUS
|
||||
from devlib.exception import HostError
|
||||
from devlib.utils.csvutil import csvwriter, create_reader
|
||||
from devlib.utils.misc import unique
|
||||
|
||||
try:
|
||||
from daqpower.client import execute_command, Status
|
||||
from daqpower.config import DeviceConfiguration, ServerConfiguration
|
||||
except ImportError, e:
|
||||
except ImportError as e:
|
||||
execute_command, Status = None, None
|
||||
DeviceConfiguration, ServerConfiguration, ConfigurationError = None, None, None
|
||||
import_error_mesg = e.message
|
||||
import_error_mesg = e.args[0] if e.args else str(e)
|
||||
|
||||
|
||||
class DaqInstrument(Instrument):
|
||||
@@ -27,16 +42,17 @@ class DaqInstrument(Instrument):
|
||||
device_id='Dev1',
|
||||
v_range=2.5,
|
||||
dv_range=0.2,
|
||||
sampling_rate=10000,
|
||||
sample_rate_hz=10000,
|
||||
channel_map=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23),
|
||||
):
|
||||
# pylint: disable=no-member
|
||||
super(DaqInstrument, self).__init__(target)
|
||||
self._need_reset = True
|
||||
self._raw_files = []
|
||||
if execute_command is None:
|
||||
raise HostError('Could not import "daqpower": {}'.format(import_error_mesg))
|
||||
if labels is None:
|
||||
labels = ['PORT_{}'.format(i) for i in xrange(len(resistor_values))]
|
||||
labels = ['PORT_{}'.format(i) for i in range(len(resistor_values))]
|
||||
if len(labels) != len(resistor_values):
|
||||
raise ValueError('"labels" and "resistor_values" must be of the same length')
|
||||
self.server_config = ServerConfiguration(host=host,
|
||||
@@ -51,22 +67,24 @@ class DaqInstrument(Instrument):
|
||||
self.device_config = DeviceConfiguration(device_id=device_id,
|
||||
v_range=v_range,
|
||||
dv_range=dv_range,
|
||||
sampling_rate=sampling_rate,
|
||||
sampling_rate=sample_rate_hz,
|
||||
resistor_values=resistor_values,
|
||||
channel_map=channel_map,
|
||||
labels=labels)
|
||||
self.sample_rate_hz = sample_rate_hz
|
||||
|
||||
for label in labels:
|
||||
for kind in ['power', 'voltage']:
|
||||
self.add_channel(label, kind)
|
||||
|
||||
def reset(self, sites=None, kinds=None):
|
||||
super(DaqInstrument, self).reset(sites, kinds)
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(DaqInstrument, self).reset(sites, kinds, channels)
|
||||
self.execute('close')
|
||||
result = self.execute('configure', config=self.device_config)
|
||||
if not result.status == Status.OK: # pylint: disable=no-member
|
||||
raise HostError(result.message)
|
||||
self._need_reset = False
|
||||
self._raw_files = []
|
||||
|
||||
def start(self):
|
||||
if self._need_reset:
|
||||
@@ -85,6 +103,7 @@ class DaqInstrument(Instrument):
|
||||
site = os.path.splitext(entry)[0]
|
||||
path = os.path.join(tempdir, entry)
|
||||
raw_file_map[site] = path
|
||||
self._raw_files.append(path)
|
||||
|
||||
active_sites = unique([c.site for c in self.active_channels])
|
||||
file_handles = []
|
||||
@@ -93,8 +112,8 @@ class DaqInstrument(Instrument):
|
||||
for site in active_sites:
|
||||
try:
|
||||
site_file = raw_file_map[site]
|
||||
fh = open(site_file, 'rb')
|
||||
site_readers[site] = csv.reader(fh)
|
||||
reader, fh = create_reader(site_file)
|
||||
site_readers[site] = reader
|
||||
file_handles.append(fh)
|
||||
except KeyError:
|
||||
message = 'Could not get DAQ trace for {}; Obtained traces are in {}'
|
||||
@@ -102,22 +121,21 @@ class DaqInstrument(Instrument):
|
||||
|
||||
# The first row is the headers
|
||||
channel_order = []
|
||||
for site, reader in site_readers.iteritems():
|
||||
for site, reader in site_readers.items():
|
||||
channel_order.extend(['{}_{}'.format(site, kind)
|
||||
for kind in reader.next()])
|
||||
for kind in next(reader)])
|
||||
|
||||
def _read_next_rows():
|
||||
parts = []
|
||||
for reader in site_readers.itervalues():
|
||||
for reader in site_readers.values():
|
||||
try:
|
||||
parts.extend(reader.next())
|
||||
parts.extend(next(reader))
|
||||
except StopIteration:
|
||||
parts.extend([None, None])
|
||||
return list(chain(parts))
|
||||
|
||||
with open(outfile, 'wb') as wfh:
|
||||
with csvwriter(outfile) as writer:
|
||||
field_names = [c.label for c in self.active_channels]
|
||||
writer = csv.writer(wfh)
|
||||
writer.writerow(field_names)
|
||||
raw_row = _read_next_rows()
|
||||
while any(raw_row):
|
||||
@@ -125,11 +143,14 @@ class DaqInstrument(Instrument):
|
||||
writer.writerow(row)
|
||||
raw_row = _read_next_rows()
|
||||
|
||||
return MeasurementsCsv(outfile, self.active_channels)
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
finally:
|
||||
for fh in file_handles:
|
||||
fh.close()
|
||||
|
||||
def get_raw(self):
|
||||
return self._raw_files
|
||||
|
||||
def teardown(self):
|
||||
self.execute('close')
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,19 +14,15 @@
|
||||
#
|
||||
from __future__ import division
|
||||
import os
|
||||
import csv
|
||||
import signal
|
||||
import tempfile
|
||||
import struct
|
||||
import subprocess
|
||||
|
||||
try:
|
||||
import pandas
|
||||
except ImportError:
|
||||
pandas = None
|
||||
import sys
|
||||
|
||||
from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import HostError
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
from devlib.utils.misc import which
|
||||
|
||||
|
||||
@@ -44,33 +40,33 @@ class EnergyProbeInstrument(Instrument):
|
||||
self.labels = labels
|
||||
else:
|
||||
self.labels = ['PORT_{}'.format(i)
|
||||
for i in xrange(len(resistor_values))]
|
||||
for i in range(len(resistor_values))]
|
||||
self.device_entry = device_entry
|
||||
self.caiman = which('caiman')
|
||||
if self.caiman is None:
|
||||
raise HostError('caiman must be installed on the host '
|
||||
'(see https://github.com/ARM-software/caiman)')
|
||||
if pandas is None:
|
||||
self.logger.info("pandas package will significantly speed up this instrument")
|
||||
self.logger.info("to install it try: pip install pandas")
|
||||
self.attributes_per_sample = 3
|
||||
self.bytes_per_sample = self.attributes_per_sample * 4
|
||||
self.attributes = ['power', 'voltage', 'current']
|
||||
self.command = None
|
||||
self.raw_output_directory = None
|
||||
self.process = None
|
||||
self.sample_rate_hz = 10000 # Determined empirically
|
||||
self.raw_data_file = None
|
||||
|
||||
for label in self.labels:
|
||||
for kind in self.attributes:
|
||||
self.add_channel(label, kind)
|
||||
|
||||
def reset(self, sites=None, kinds=None):
|
||||
super(EnergyProbeInstrument, self).reset(sites, kinds)
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(EnergyProbeInstrument, self).reset(sites, kinds, channels)
|
||||
self.raw_output_directory = tempfile.mkdtemp(prefix='eprobe-caiman-')
|
||||
parts = ['-r {}:{} '.format(i, int(1000 * rval))
|
||||
for i, rval in enumerate(self.resistor_values)]
|
||||
rstring = ''.join(parts)
|
||||
self.command = '{} -d {} -l {} {}'.format(self.caiman, self.device_entry, rstring, self.raw_output_directory)
|
||||
self.raw_data_file = None
|
||||
|
||||
def start(self):
|
||||
self.logger.debug(self.command)
|
||||
@@ -82,7 +78,17 @@ class EnergyProbeInstrument(Instrument):
|
||||
shell=True)
|
||||
|
||||
def stop(self):
|
||||
os.killpg(self.process.pid, signal.SIGTERM)
|
||||
self.process.poll()
|
||||
if self.process.returncode is not None:
|
||||
stdout, stderr = self.process.communicate()
|
||||
if sys.version_info[0] == 3:
|
||||
stdout = stdout.decode(sys.stdout.encoding, 'replace')
|
||||
stderr = stderr.decode(sys.stdout.encoding, 'replace')
|
||||
raise HostError(
|
||||
'Energy Probe: Caiman exited unexpectedly with exit code {}.\n'
|
||||
'stdout:\n{}\nstderr:\n{}'.format(self.process.returncode,
|
||||
stdout, stderr))
|
||||
os.killpg(self.process.pid, signal.SIGINT)
|
||||
|
||||
def get_data(self, outfile): # pylint: disable=R0914
|
||||
all_channels = [c.label for c in self.list_channels()]
|
||||
@@ -92,12 +98,11 @@ class EnergyProbeInstrument(Instrument):
|
||||
num_of_ports = len(self.resistor_values)
|
||||
struct_format = '{}I'.format(num_of_ports * self.attributes_per_sample)
|
||||
not_a_full_row_seen = False
|
||||
raw_data_file = os.path.join(self.raw_output_directory, '0000000000')
|
||||
self.raw_data_file = os.path.join(self.raw_output_directory, '0000000000')
|
||||
|
||||
self.logger.debug('Parsing raw data file: {}'.format(raw_data_file))
|
||||
with open(raw_data_file, 'rb') as bfile:
|
||||
with open(outfile, 'wb') as wfh:
|
||||
writer = csv.writer(wfh)
|
||||
self.logger.debug('Parsing raw data file: {}'.format(self.raw_data_file))
|
||||
with open(self.raw_data_file, 'rb') as bfile:
|
||||
with csvwriter(outfile) as writer:
|
||||
writer.writerow(active_channels)
|
||||
while True:
|
||||
data = bfile.read(num_of_ports * self.bytes_per_sample)
|
||||
@@ -113,4 +118,7 @@ class EnergyProbeInstrument(Instrument):
|
||||
continue
|
||||
else:
|
||||
not_a_full_row_seen = True
|
||||
return MeasurementsCsv(outfile, self.active_channels)
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
|
||||
def get_raw(self):
|
||||
return [self.raw_data_file]
|
||||
|
97
devlib/instrument/frames.py
Normal file
97
devlib/instrument/frames.py
Normal file
@@ -0,0 +1,97 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from __future__ import division
|
||||
from devlib.instrument import (Instrument, CONTINUOUS,
|
||||
MeasurementsCsv, MeasurementType)
|
||||
from devlib.utils.rendering import (GfxinfoFrameCollector,
|
||||
SurfaceFlingerFrameCollector,
|
||||
SurfaceFlingerFrame,
|
||||
read_gfxinfo_columns)
|
||||
|
||||
|
||||
class FramesInstrument(Instrument):
|
||||
|
||||
mode = CONTINUOUS
|
||||
collector_cls = None
|
||||
|
||||
def __init__(self, target, collector_target, period=2, keep_raw=True):
|
||||
super(FramesInstrument, self).__init__(target)
|
||||
self.collector_target = collector_target
|
||||
self.period = period
|
||||
self.keep_raw = keep_raw
|
||||
self.sample_rate_hz = 1 / self.period
|
||||
self.collector = None
|
||||
self.header = None
|
||||
self._need_reset = True
|
||||
self._raw_file = None
|
||||
self._init_channels()
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(FramesInstrument, self).reset(sites, kinds, channels)
|
||||
self.collector = self.collector_cls(self.target, self.period,
|
||||
self.collector_target, self.header)
|
||||
self._need_reset = False
|
||||
self._raw_file = None
|
||||
|
||||
def start(self):
|
||||
if self._need_reset:
|
||||
self.reset()
|
||||
self.collector.start()
|
||||
|
||||
def stop(self):
|
||||
self.collector.stop()
|
||||
self._need_reset = True
|
||||
|
||||
def get_data(self, outfile):
|
||||
if self.keep_raw:
|
||||
self._raw_file = outfile + '.raw'
|
||||
self.collector.process_frames(self._raw_file)
|
||||
active_sites = [chan.label for chan in self.active_channels]
|
||||
self.collector.write_frames(outfile, columns=active_sites)
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
|
||||
def get_raw(self):
|
||||
return [self._raw_file] if self._raw_file else []
|
||||
|
||||
def _init_channels(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class GfxInfoFramesInstrument(FramesInstrument):
|
||||
|
||||
mode = CONTINUOUS
|
||||
collector_cls = GfxinfoFrameCollector
|
||||
|
||||
def _init_channels(self):
|
||||
columns = read_gfxinfo_columns(self.target)
|
||||
for entry in columns:
|
||||
if entry == 'Flags':
|
||||
self.add_channel('Flags', MeasurementType('flags', 'flags'))
|
||||
else:
|
||||
self.add_channel(entry, 'time_us')
|
||||
self.header = [chan.label for chan in self.channels.values()]
|
||||
|
||||
|
||||
class SurfaceFlingerFramesInstrument(FramesInstrument):
|
||||
|
||||
mode = CONTINUOUS
|
||||
collector_cls = SurfaceFlingerFrameCollector
|
||||
|
||||
def _init_channels(self):
|
||||
for field in SurfaceFlingerFrame._fields:
|
||||
# remove the "_time" from filed names to avoid duplication
|
||||
self.add_channel(field[:-5], 'time_us')
|
||||
self.header = [chan.label for chan in self.channels.values()]
|
80
devlib/instrument/gem5power.py
Normal file
80
devlib/instrument/gem5power.py
Normal file
@@ -0,0 +1,80 @@
|
||||
# Copyright 2017-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import division
|
||||
import re
|
||||
|
||||
from devlib.platform.gem5 import Gem5SimulationPlatform
|
||||
from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
|
||||
|
||||
class Gem5PowerInstrument(Instrument):
|
||||
'''
|
||||
Instrument enabling power monitoring in gem5
|
||||
'''
|
||||
|
||||
mode = CONTINUOUS
|
||||
roi_label = 'power_instrument'
|
||||
site_mapping = {'timestamp': 'sim_seconds'}
|
||||
|
||||
def __init__(self, target, power_sites):
|
||||
'''
|
||||
Parameter power_sites is a list of gem5 identifiers for power values.
|
||||
One example of such a field:
|
||||
system.cluster0.cores0.power_model.static_power
|
||||
'''
|
||||
if not isinstance(target.platform, Gem5SimulationPlatform):
|
||||
raise TargetError('Gem5PowerInstrument requires a gem5 platform')
|
||||
if not target.has('gem5stats'):
|
||||
raise TargetError('Gem5StatsModule is not loaded')
|
||||
super(Gem5PowerInstrument, self).__init__(target)
|
||||
|
||||
# power_sites is assumed to be a list later
|
||||
if isinstance(power_sites, list):
|
||||
self.power_sites = power_sites
|
||||
else:
|
||||
self.power_sites = [power_sites]
|
||||
self.add_channel('timestamp', 'time')
|
||||
for field in self.power_sites:
|
||||
self.add_channel(field, 'power')
|
||||
self.target.gem5stats.book_roi(self.roi_label)
|
||||
self.sample_period_ns = 10000000
|
||||
# Sample rate must remain unset as gem5 does not provide samples
|
||||
# at regular intervals therefore the reported timestamp should be used.
|
||||
self.sample_rate_hz = None
|
||||
self.target.gem5stats.start_periodic_dump(0, self.sample_period_ns)
|
||||
self._base_stats_dump = 0
|
||||
|
||||
def start(self):
|
||||
self.target.gem5stats.roi_start(self.roi_label)
|
||||
|
||||
def stop(self):
|
||||
self.target.gem5stats.roi_end(self.roi_label)
|
||||
|
||||
def get_data(self, outfile):
|
||||
active_sites = [c.site for c in self.active_channels]
|
||||
with csvwriter(outfile) as writer:
|
||||
writer.writerow([c.label for c in self.active_channels]) # headers
|
||||
sites_to_match = [self.site_mapping.get(s, s) for s in active_sites]
|
||||
for rec, rois in self.target.gem5stats.match_iter(sites_to_match,
|
||||
[self.roi_label], self._base_stats_dump):
|
||||
writer.writerow([rec[s] for s in sites_to_match])
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(Gem5PowerInstrument, self).reset(sites, kinds, channels)
|
||||
self._base_stats_dump = self.target.gem5stats.next_dump_no()
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2017 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -45,7 +45,7 @@ class HwmonInstrument(Instrument):
|
||||
measure = self.measure_map.get(ts.kind)[0]
|
||||
if measure:
|
||||
self.logger.debug('\tAdding sensor {}'.format(ts.name))
|
||||
self.add_channel(_guess_site(ts), measure, name=ts.name, sensor=ts)
|
||||
self.add_channel(_guess_site(ts), measure, sensor=ts)
|
||||
else:
|
||||
self.logger.debug('\tSkipping sensor {} (unknown kind "{}")'.format(ts.name, ts.kind))
|
||||
except ValueError:
|
||||
|
153
devlib/instrument/monsoon.py
Normal file
153
devlib/instrument/monsoon.py
Normal file
@@ -0,0 +1,153 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from subprocess import Popen, PIPE
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import HostError
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
from devlib.utils.misc import which
|
||||
|
||||
|
||||
INSTALL_INSTRUCTIONS="""
|
||||
MonsoonInstrument requires the monsoon.py tool, available from AOSP:
|
||||
|
||||
https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py
|
||||
|
||||
Download this script and put it in your $PATH (or pass it as the monsoon_bin
|
||||
parameter to MonsoonInstrument). `pip install python-gflags pyserial` to install
|
||||
the dependencies.
|
||||
"""
|
||||
|
||||
|
||||
class MonsoonInstrument(Instrument):
|
||||
"""Instrument for Monsoon Solutions power monitor
|
||||
|
||||
To use this instrument, you need to install the monsoon.py script available
|
||||
from the Android Open Source Project. As of May 2017 this is under the CTS
|
||||
repository:
|
||||
|
||||
https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py
|
||||
|
||||
Collects power measurements only, from a selection of two channels, the USB
|
||||
passthrough channel and the main output channel.
|
||||
|
||||
:param target: Ignored
|
||||
:param monsoon_bin: Path to monsoon.py executable. If not provided,
|
||||
``$PATH`` is searched.
|
||||
:param tty_device: TTY device to use to communicate with the Power
|
||||
Monitor. If not provided, a sane default is used.
|
||||
"""
|
||||
|
||||
mode = CONTINUOUS
|
||||
|
||||
def __init__(self, target, monsoon_bin=None, tty_device=None):
|
||||
super(MonsoonInstrument, self).__init__(target)
|
||||
self.monsoon_bin = monsoon_bin or which('monsoon.py')
|
||||
if not self.monsoon_bin:
|
||||
raise HostError(INSTALL_INSTRUCTIONS)
|
||||
|
||||
self.tty_device = tty_device
|
||||
|
||||
self.process = None
|
||||
self.output = None
|
||||
|
||||
self.sample_rate_hz = 500
|
||||
self.add_channel('output', 'power')
|
||||
self.add_channel('USB', 'power')
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(MonsoonInstrument, self).reset(sites, kinds)
|
||||
|
||||
def start(self):
|
||||
if self.process:
|
||||
self.process.kill()
|
||||
|
||||
cmd = [self.monsoon_bin,
|
||||
'--hz', str(self.sample_rate_hz),
|
||||
'--samples', '-1', # -1 means sample indefinitely
|
||||
'--includeusb']
|
||||
if self.tty_device:
|
||||
cmd += ['--device', self.tty_device]
|
||||
|
||||
self.logger.debug(' '.join(cmd))
|
||||
self.buffer_file = NamedTemporaryFile(prefix='monsoon', delete=False)
|
||||
self.process = Popen(cmd, stdout=self.buffer_file, stderr=PIPE)
|
||||
|
||||
def stop(self):
|
||||
process = self.process
|
||||
self.process = None
|
||||
if not process:
|
||||
raise RuntimeError('Monsoon script not started')
|
||||
|
||||
process.poll()
|
||||
if process.returncode is not None:
|
||||
stdout, stderr = process.communicate()
|
||||
if sys.version_info[0] == 3:
|
||||
stdout = stdout.encode(sys.stdout.encoding)
|
||||
stderr = stderr.encode(sys.stdout.encoding)
|
||||
raise HostError(
|
||||
'Monsoon script exited unexpectedly with exit code {}.\n'
|
||||
'stdout:\n{}\nstderr:\n{}'.format(process.returncode,
|
||||
stdout, stderr))
|
||||
|
||||
process.send_signal(signal.SIGINT)
|
||||
|
||||
stderr = process.stderr.read()
|
||||
|
||||
self.buffer_file.close()
|
||||
with open(self.buffer_file.name) as f:
|
||||
stdout = f.read()
|
||||
os.remove(self.buffer_file.name)
|
||||
self.buffer_file = None
|
||||
|
||||
self.output = (stdout, stderr)
|
||||
|
||||
def get_data(self, outfile):
|
||||
if self.process:
|
||||
raise RuntimeError('`get_data` called before `stop`')
|
||||
|
||||
stdout, stderr = self.output
|
||||
|
||||
with csvwriter(outfile) as writer:
|
||||
active_sites = [c.site for c in self.active_channels]
|
||||
|
||||
# Write column headers
|
||||
row = []
|
||||
if 'output' in active_sites:
|
||||
row.append('output_power')
|
||||
if 'USB' in active_sites:
|
||||
row.append('USB_power')
|
||||
writer.writerow(row)
|
||||
|
||||
# Write data
|
||||
for line in stdout.splitlines():
|
||||
# Each output line is a main_output, usb_output measurement pair.
|
||||
# (If our user only requested one channel we still collect both,
|
||||
# and just ignore one of them)
|
||||
output, usb = line.split()
|
||||
row = []
|
||||
if 'output' in active_sites:
|
||||
row.append(output)
|
||||
if 'USB' in active_sites:
|
||||
row.append(usb)
|
||||
writer.writerow(row)
|
||||
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
@@ -1,14 +1,30 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
import csv
|
||||
import tempfile
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
from itertools import izip_longest
|
||||
|
||||
from future.moves.itertools import zip_longest
|
||||
|
||||
from devlib.instrument import Instrument, MeasurementsCsv, CONTINUOUS
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.utils.android import ApkInfo
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
|
||||
|
||||
THIS_DIR = os.path.dirname(__file__)
|
||||
@@ -46,10 +62,9 @@ def netstats_to_measurements(netstats):
|
||||
def write_measurements_csv(measurements, filepath):
|
||||
headers = sorted(measurements.keys())
|
||||
columns = [measurements[h] for h in headers]
|
||||
with open(filepath, 'wb') as wfh:
|
||||
writer = csv.writer(wfh)
|
||||
with csvwriter(filepath) as writer:
|
||||
writer.writerow(headers)
|
||||
writer.writerows(izip_longest(*columns))
|
||||
writer.writerows(zip_longest(*columns))
|
||||
|
||||
|
||||
class NetstatsInstrument(Instrument):
|
||||
@@ -98,8 +113,8 @@ class NetstatsInstrument(Instrument):
|
||||
self.logger.debug('Deploying {} to target'.format(self.package))
|
||||
self.target.install(self.apk)
|
||||
|
||||
def reset(self, sites=None, kinds=None, period=None): # pylint: disable=arguments-differ
|
||||
super(NetstatsInstrument, self).reset(sites, kinds)
|
||||
def reset(self, sites=None, kinds=None, channels=None, period=None): # pylint: disable=arguments-differ
|
||||
super(NetstatsInstrument, self).reset(sites, kinds, channels)
|
||||
period_arg, packages_arg = '', ''
|
||||
self.tag = 'netstats-{}'.format(datetime.now().strftime('%Y%m%d%H%M%s'))
|
||||
tag_arg = ' --es tag {}'.format(self.tag)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,6 +15,8 @@
|
||||
import logging
|
||||
from inspect import isclass
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.utils.misc import walk_modules
|
||||
from devlib.utils.types import identifier
|
||||
|
||||
@@ -56,7 +58,7 @@ class Module(object):
|
||||
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
self.logger = logging.getLogger(self.__class__.__name__)
|
||||
self.logger = logging.getLogger(self.name)
|
||||
|
||||
|
||||
class HardRestModule(Module): # pylint: disable=R0921
|
||||
@@ -75,7 +77,7 @@ class BootModule(Module): # pylint: disable=R0921
|
||||
raise NotImplementedError()
|
||||
|
||||
def update(self, **kwargs):
|
||||
for name, value in kwargs.iteritems():
|
||||
for name, value in kwargs.items():
|
||||
if not hasattr(self, name):
|
||||
raise ValueError('Unknown parameter "{}" for {}'.format(name, self.name))
|
||||
self.logger.debug('Updating "{}" to "{}"'.format(name, value))
|
||||
@@ -117,6 +119,6 @@ def register_module(mod):
|
||||
|
||||
def __load_cache():
|
||||
for module in walk_modules('devlib.module'):
|
||||
for obj in vars(module).itervalues():
|
||||
for obj in vars(module).values():
|
||||
if isclass(obj) and issubclass(obj, Module) and obj.name:
|
||||
register_module(obj)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -63,7 +63,7 @@ class FastbootFlashModule(FlashModule):
|
||||
image_bundle = expand_path(image_bundle)
|
||||
to_flash = self._bundle_to_images(image_bundle)
|
||||
to_flash = merge_dicts(to_flash, images or {}, should_normalize=False)
|
||||
for partition, image_path in to_flash.iteritems():
|
||||
for partition, image_path in to_flash.items():
|
||||
self.logger.debug('flashing {}'.format(partition))
|
||||
self._flash_image(self.target, partition, expand_path(image_path))
|
||||
fastboot_command('reboot')
|
||||
|
@@ -1,3 +1,18 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from devlib.module import Module
|
||||
|
||||
|
||||
@@ -44,79 +59,151 @@ class BigLittleModule(Module):
|
||||
# cpufreq
|
||||
|
||||
def list_bigs_frequencies(self):
|
||||
return self.target.cpufreq.list_frequencies(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
return self.target.cpufreq.list_frequencies(bigs_online[0])
|
||||
|
||||
def list_bigs_governors(self):
|
||||
return self.target.cpufreq.list_governors(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
return self.target.cpufreq.list_governors(bigs_online[0])
|
||||
|
||||
def list_bigs_governor_tunables(self):
|
||||
return self.target.cpufreq.list_governor_tunables(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
return self.target.cpufreq.list_governor_tunables(bigs_online[0])
|
||||
|
||||
def list_littles_frequencies(self):
|
||||
return self.target.cpufreq.list_frequencies(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
return self.target.cpufreq.list_frequencies(littles_online[0])
|
||||
|
||||
def list_littles_governors(self):
|
||||
return self.target.cpufreq.list_governors(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
return self.target.cpufreq.list_governors(littles_online[0])
|
||||
|
||||
def list_littles_governor_tunables(self):
|
||||
return self.target.cpufreq.list_governor_tunables(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
return self.target.cpufreq.list_governor_tunables(littles_online[0])
|
||||
|
||||
def get_bigs_governor(self):
|
||||
return self.target.cpufreq.get_governor(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
return self.target.cpufreq.get_governor(bigs_online[0])
|
||||
|
||||
def get_bigs_governor_tunables(self):
|
||||
return self.target.cpufreq.get_governor_tunables(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
return self.target.cpufreq.get_governor_tunables(bigs_online[0])
|
||||
|
||||
def get_bigs_frequency(self):
|
||||
return self.target.cpufreq.get_frequency(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
return self.target.cpufreq.get_frequency(bigs_online[0])
|
||||
|
||||
def get_bigs_min_frequency(self):
|
||||
return self.target.cpufreq.get_min_frequency(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
return self.target.cpufreq.get_min_frequency(bigs_online[0])
|
||||
|
||||
def get_bigs_max_frequency(self):
|
||||
return self.target.cpufreq.get_max_frequency(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
return self.target.cpufreq.get_max_frequency(bigs_online[0])
|
||||
|
||||
def get_littles_governor(self):
|
||||
return self.target.cpufreq.get_governor(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
return self.target.cpufreq.get_governor(littles_online[0])
|
||||
|
||||
def get_littles_governor_tunables(self):
|
||||
return self.target.cpufreq.get_governor_tunables(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
return self.target.cpufreq.get_governor_tunables(littles_online[0])
|
||||
|
||||
def get_littles_frequency(self):
|
||||
return self.target.cpufreq.get_frequency(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
return self.target.cpufreq.get_frequency(littles_online[0])
|
||||
|
||||
def get_littles_min_frequency(self):
|
||||
return self.target.cpufreq.get_min_frequency(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
return self.target.cpufreq.get_min_frequency(littles_online[0])
|
||||
|
||||
def get_littles_max_frequency(self):
|
||||
return self.target.cpufreq.get_max_frequency(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
return self.target.cpufreq.get_max_frequency(littles_online[0])
|
||||
|
||||
def set_bigs_governor(self, governor, **kwargs):
|
||||
self.target.cpufreq.set_governor(self.bigs_online[0], governor, **kwargs)
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
self.target.cpufreq.set_governor(bigs_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_governor_tunables(self, governor, **kwargs):
|
||||
self.target.cpufreq.set_governor_tunables(self.bigs_online[0], governor, **kwargs)
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
self.target.cpufreq.set_governor_tunables(bigs_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_frequency(self.bigs_online[0], frequency, exact)
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
self.target.cpufreq.set_frequency(bigs_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_min_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_min_frequency(self.bigs_online[0], frequency, exact)
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
self.target.cpufreq.set_min_frequency(bigs_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_max_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_max_frequency(self.bigs_online[0], frequency, exact)
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
self.target.cpufreq.set_max_frequency(bigs_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_littles_governor(self, governor, **kwargs):
|
||||
self.target.cpufreq.set_governor(self.littles_online[0], governor, **kwargs)
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
self.target.cpufreq.set_governor(littles_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_governor_tunables(self, governor, **kwargs):
|
||||
self.target.cpufreq.set_governor_tunables(self.littles_online[0], governor, **kwargs)
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
self.target.cpufreq.set_governor_tunables(littles_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_frequency(self.littles_online[0], frequency, exact)
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
self.target.cpufreq.set_frequency(littles_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_min_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_min_frequency(self.littles_online[0], frequency, exact)
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
self.target.cpufreq.set_min_frequency(littles_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_max_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_max_frequency(self.littles_online[0], frequency, exact)
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
self.target.cpufreq.set_max_frequency(littles_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,6 +14,7 @@
|
||||
#
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
import logging
|
||||
import re
|
||||
from collections import namedtuple
|
||||
|
||||
from devlib.module import Module
|
||||
@@ -24,29 +25,33 @@ from devlib.utils.types import boolean
|
||||
|
||||
class Controller(object):
|
||||
|
||||
def __new__(cls, arg):
|
||||
if isinstance(arg, cls):
|
||||
return arg
|
||||
else:
|
||||
return object.__new__(cls, arg)
|
||||
def __init__(self, kind, hid, clist):
|
||||
"""
|
||||
Initialize a controller given the hierarchy it belongs to.
|
||||
|
||||
def __init__(self, kind):
|
||||
self.mount_name = 'devlib_'+kind
|
||||
:param kind: the name of the controller
|
||||
:type kind: str
|
||||
|
||||
:param hid: the Hierarchy ID this controller is mounted on
|
||||
:type hid: int
|
||||
|
||||
:param clist: the list of controller mounted in the same hierarchy
|
||||
:type clist: list(str)
|
||||
"""
|
||||
self.mount_name = 'devlib_cgh{}'.format(hid)
|
||||
self.kind = kind
|
||||
self.hid = hid
|
||||
self.clist = clist
|
||||
self.target = None
|
||||
self._noprefix = False
|
||||
|
||||
self.logger = logging.getLogger('CGroup.'+self.kind)
|
||||
self.logger.debug('Initialized [%s, %d, %s]',
|
||||
self.kind, self.hid, self.clist)
|
||||
|
||||
self.logger = logging.getLogger('cgroups.'+self.kind)
|
||||
self.mount_point = None
|
||||
self._cgroups = {}
|
||||
|
||||
def probe(self, target):
|
||||
try:
|
||||
exists = target.execute('{} grep {} /proc/cgroups'\
|
||||
.format(target.busybox, self.kind))
|
||||
except TargetError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def mount(self, target, mount_root):
|
||||
|
||||
mounted = target.list_file_systems()
|
||||
@@ -63,13 +68,20 @@ class Controller(object):
|
||||
target.execute('mkdir -p {} 2>/dev/null'\
|
||||
.format(self.mount_point), as_root=True)
|
||||
target.execute('mount -t cgroup -o {} {} {}'\
|
||||
.format(self.kind,
|
||||
.format(','.join(self.clist),
|
||||
self.mount_name,
|
||||
self.mount_point),
|
||||
as_root=True)
|
||||
|
||||
self.logger.info('Controller %s mounted under: %s',
|
||||
self.kind, self.mount_point)
|
||||
# Check if this controller uses "noprefix" option
|
||||
output = target.execute('mount | grep "{} "'.format(self.mount_name))
|
||||
if 'noprefix' in output:
|
||||
self._noprefix = True
|
||||
# self.logger.debug('Controller %s using "noprefix" option',
|
||||
# self.kind)
|
||||
|
||||
self.logger.debug('Controller %s mounted under: %s (noprefix=%s)',
|
||||
self.kind, self.mount_point, self._noprefix)
|
||||
|
||||
# Mark this contoller as available
|
||||
self.target = target
|
||||
@@ -91,14 +103,15 @@ class Controller(object):
|
||||
.format(self.kind))
|
||||
if name not in self._cgroups:
|
||||
self._cgroups[name] = CGroup(self, name, create=False)
|
||||
return self._cgroups[name].existe()
|
||||
return self._cgroups[name].exists()
|
||||
|
||||
def list_all(self):
|
||||
self.logger.debug('Listing groups for %s controller', self.kind)
|
||||
output = self.target.execute('{} find {} -type d'\
|
||||
.format(self.target.busybox, self.mount_point))
|
||||
.format(self.target.busybox, self.mount_point),
|
||||
as_root=True)
|
||||
cgroups = []
|
||||
for cg in output.split('\n'):
|
||||
for cg in output.splitlines():
|
||||
cg = cg.replace(self.mount_point + '/', '/')
|
||||
cg = cg.replace(self.mount_point, '/')
|
||||
cg = cg.strip()
|
||||
@@ -108,24 +121,131 @@ class Controller(object):
|
||||
cgroups.append(cg)
|
||||
return cgroups
|
||||
|
||||
def move_tasks(self, source, dest):
|
||||
def move_tasks(self, source, dest, exclude=[]):
|
||||
try:
|
||||
srcg = self._cgroups[source]
|
||||
dstg = self._cgroups[dest]
|
||||
command = 'for task in $(cat {}); do echo $task>{}; done'
|
||||
self.target.execute(command.format(srcg.tasks_file, dstg.tasks_file),
|
||||
# this will always fail as some of the tasks
|
||||
# are kthreads that cannot be migrated, but we
|
||||
# don't care about those, so don't check exit
|
||||
# code.
|
||||
check_exit_code=False, as_root=True)
|
||||
except KeyError as e:
|
||||
raise ValueError('Unkown group: {}'.format(e))
|
||||
output = self.target._execute_util(
|
||||
'cgroups_tasks_move {} {} \'{}\''.format(
|
||||
srcg.directory, dstg.directory, exclude),
|
||||
as_root=True)
|
||||
|
||||
def move_all_tasks_to(self, dest, exclude=[]):
|
||||
"""
|
||||
Move all the tasks to the specified CGroup
|
||||
|
||||
Tasks are moved from all their original CGroup the the specified on.
|
||||
The tasks which name matches one of the string in exclude are moved
|
||||
instead in the root CGroup for the controller.
|
||||
The name of a tasks to exclude must be a substring of the task named as
|
||||
reported by the "ps" command. Indeed, this list will be translated into
|
||||
a: "ps | grep -e name1 -e name2..." in order to obtain the PID of these
|
||||
tasks.
|
||||
|
||||
:param exclude: list of commands to keep in the root CGroup
|
||||
:type exlude: list(str)
|
||||
"""
|
||||
|
||||
if isinstance(exclude, str):
|
||||
exclude = [exclude]
|
||||
if not isinstance(exclude, list):
|
||||
raise ValueError('wrong type for "exclude" parameter, '
|
||||
'it must be a str or a list')
|
||||
|
||||
logging.debug('Moving all tasks into %s', dest)
|
||||
|
||||
# Build list of tasks to exclude
|
||||
grep_filters = ''
|
||||
for comm in exclude:
|
||||
grep_filters += '-e {} '.format(comm)
|
||||
logging.debug(' using grep filter: %s', grep_filters)
|
||||
if grep_filters != '':
|
||||
logging.debug(' excluding tasks which name matches:')
|
||||
logging.debug(' %s', ', '.join(exclude))
|
||||
|
||||
def move_all_tasks_to(self, dest):
|
||||
for cgroup in self._cgroups:
|
||||
if cgroup != dest:
|
||||
self.move_tasks(cgroup, dest)
|
||||
self.move_tasks(cgroup, dest, grep_filters)
|
||||
|
||||
def tasks(self, cgroup,
|
||||
filter_tid='',
|
||||
filter_tname='',
|
||||
filter_tcmdline=''):
|
||||
"""
|
||||
Report the tasks that are included in a cgroup. The tasks can be
|
||||
filtered by their tid, tname or tcmdline if filter_tid, filter_tname or
|
||||
filter_tcmdline are defined respectively. In this case, the reported
|
||||
tasks are the ones in the cgroup that match these patterns.
|
||||
|
||||
Example of tasks format:
|
||||
TID,tname,tcmdline
|
||||
903,cameraserver,/system/bin/cameraserver
|
||||
|
||||
:params filter_tid: regexp pattern to filter by TID
|
||||
:type filter_tid: str
|
||||
|
||||
:params filter_tname: regexp pattern to filter by tname
|
||||
:type filter_tname: str
|
||||
|
||||
:params filter_tcmdline: regexp pattern to filter by tcmdline
|
||||
:type filter_tcmdline: str
|
||||
|
||||
:returns: a dictionary in the form: {tid:(tname, tcmdline)}
|
||||
"""
|
||||
if not isinstance(filter_tid, str):
|
||||
raise TypeError('filter_tid should be a str')
|
||||
if not isinstance(filter_tname, str):
|
||||
raise TypeError('filter_tname should be a str')
|
||||
if not isinstance(filter_tcmdline, str):
|
||||
raise TypeError('filter_tcmdline should be a str')
|
||||
try:
|
||||
cg = self._cgroups[cgroup]
|
||||
except KeyError as e:
|
||||
raise ValueError('Unkown group: {}'.format(e))
|
||||
output = self.target._execute_util(
|
||||
'cgroups_tasks_in {}'.format(cg.directory),
|
||||
as_root=True)
|
||||
entries = output.splitlines()
|
||||
tasks = {}
|
||||
for task in entries:
|
||||
fields = task.split(',', 2)
|
||||
nr_fields = len(fields)
|
||||
if nr_fields < 2:
|
||||
continue
|
||||
elif nr_fields == 2:
|
||||
tid_str, tname = fields
|
||||
tcmdline = ''
|
||||
else:
|
||||
tid_str, tname, tcmdline = fields
|
||||
|
||||
if not re.search(filter_tid, tid_str):
|
||||
continue
|
||||
if not re.search(filter_tname, tname):
|
||||
continue
|
||||
if not re.search(filter_tcmdline, tcmdline):
|
||||
continue
|
||||
|
||||
tasks[int(tid_str)] = (tname, tcmdline)
|
||||
return tasks
|
||||
|
||||
def tasks_count(self, cgroup):
|
||||
try:
|
||||
cg = self._cgroups[cgroup]
|
||||
except KeyError as e:
|
||||
raise ValueError('Unkown group: {}'.format(e))
|
||||
output = self.target.execute(
|
||||
'{} wc -l {}/tasks'.format(
|
||||
self.target.busybox, cg.directory),
|
||||
as_root=True)
|
||||
return int(output.split()[0])
|
||||
|
||||
def tasks_per_group(self):
|
||||
tasks = {}
|
||||
for cg in self.list_all():
|
||||
tasks[cg] = self.tasks_count(cg)
|
||||
return tasks
|
||||
|
||||
class CGroup(object):
|
||||
|
||||
@@ -147,14 +267,14 @@ class CGroup(object):
|
||||
if not create:
|
||||
return
|
||||
|
||||
self.logger.info('Creating cgroup %s', self.directory)
|
||||
self.logger.debug('Creating cgroup %s', self.directory)
|
||||
self.target.execute('[ -d {0} ] || mkdir -p {0}'\
|
||||
.format(self.directory), as_root=True)
|
||||
|
||||
def exists(self):
|
||||
try:
|
||||
self.target.execute('[ -d {0} ]'\
|
||||
.format(self.directory))
|
||||
.format(self.directory), as_root=True)
|
||||
return True
|
||||
except TargetError:
|
||||
return False
|
||||
@@ -166,14 +286,11 @@ class CGroup(object):
|
||||
self.controller.kind)
|
||||
logging.debug(' %s',
|
||||
self.directory)
|
||||
output = self.target.execute('{} grep \'\' {}/{}.*'.format(
|
||||
self.target.busybox,
|
||||
self.directory,
|
||||
self.controller.kind))
|
||||
for res in output.split('\n'):
|
||||
if res.find(self.controller.kind) < 0:
|
||||
continue
|
||||
res = res.split('.')[1]
|
||||
output = self.target._execute_util(
|
||||
'cgroups_get_attributes {} {}'.format(
|
||||
self.directory, self.controller.kind),
|
||||
as_root=True)
|
||||
for res in output.splitlines():
|
||||
attr = res.split(':')[0]
|
||||
value = res.split(':')[1]
|
||||
conf[attr] = value
|
||||
@@ -185,19 +302,30 @@ class CGroup(object):
|
||||
if isiterable(attrs[idx]):
|
||||
attrs[idx] = list_to_ranges(attrs[idx])
|
||||
# Build attribute path
|
||||
path = '{}.{}'.format(self.controller.kind, idx)
|
||||
path = self.target.path.join(self.directory, path)
|
||||
if self.controller._noprefix:
|
||||
attr_name = '{}'.format(idx)
|
||||
else:
|
||||
attr_name = '{}.{}'.format(self.controller.kind, idx)
|
||||
path = self.target.path.join(self.directory, attr_name)
|
||||
|
||||
self.logger.debug('Set attribute [%s] to: %s"',
|
||||
path, attrs[idx])
|
||||
|
||||
# Set the attribute value
|
||||
self.target.write_value(path, attrs[idx])
|
||||
try:
|
||||
self.target.write_value(path, attrs[idx])
|
||||
except TargetError:
|
||||
# Check if the error is due to a non-existing attribute
|
||||
attrs = self.get()
|
||||
if idx not in attrs:
|
||||
raise ValueError('Controller [{}] does not provide attribute [{}]'\
|
||||
.format(self.controller.kind, attr_name))
|
||||
raise
|
||||
|
||||
def get_tasks(self):
|
||||
task_ids = self.target.read_value(self.tasks_file).split()
|
||||
logging.debug('Tasks: %s', task_ids)
|
||||
return map(int, task_ids)
|
||||
return list(map(int, task_ids))
|
||||
|
||||
def add_task(self, tid):
|
||||
self.target.write_value(self.tasks_file, tid, verify=False)
|
||||
@@ -214,54 +342,59 @@ CgroupSubsystemEntry = namedtuple('CgroupSubsystemEntry', 'name hierarchy num_cg
|
||||
class CgroupsModule(Module):
|
||||
|
||||
name = 'cgroups'
|
||||
cgroup_root = '/sys/fs/cgroup'
|
||||
stage = 'setup'
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
return target.config.has('cgroups') and target.is_rooted
|
||||
if not target.is_rooted:
|
||||
return False
|
||||
if target.file_exists('/proc/cgroups'):
|
||||
return True
|
||||
return target.config.has('cgroups')
|
||||
|
||||
def __init__(self, target):
|
||||
super(CgroupsModule, self).__init__(target)
|
||||
|
||||
self.logger = logging.getLogger('CGroups')
|
||||
|
||||
# Initialize controllers mount point
|
||||
mounted = self.target.list_file_systems()
|
||||
if self.cgroup_root not in [e.mount_point for e in mounted]:
|
||||
self.target.execute('mount -t tmpfs {} {}'\
|
||||
.format('cgroup_root',
|
||||
self.cgroup_root),
|
||||
as_root=True)
|
||||
else:
|
||||
self.logger.debug('cgroup_root already mounted at %s',
|
||||
self.cgroup_root)
|
||||
# Set Devlib's CGroups mount point
|
||||
self.cgroup_root = target.path.join(
|
||||
target.working_directory, 'cgroups')
|
||||
|
||||
# Load list of available controllers
|
||||
controllers = []
|
||||
# Get the list of the available controllers
|
||||
subsys = self.list_subsystems()
|
||||
for (n, h, c, e) in subsys:
|
||||
controllers.append(n)
|
||||
self.logger.info('Available controllers: %s', controllers)
|
||||
if len(subsys) == 0:
|
||||
self.logger.warning('No CGroups controller available')
|
||||
return
|
||||
|
||||
# Map hierarchy IDs into a list of controllers
|
||||
hierarchy = {}
|
||||
for ss in subsys:
|
||||
try:
|
||||
hierarchy[ss.hierarchy].append(ss.name)
|
||||
except KeyError:
|
||||
hierarchy[ss.hierarchy] = [ss.name]
|
||||
self.logger.debug('Available hierarchies: %s', hierarchy)
|
||||
|
||||
# Initialize controllers
|
||||
self.logger.info('Available controllers:')
|
||||
self.controllers = {}
|
||||
for idx in controllers:
|
||||
controller = Controller(idx)
|
||||
self.logger.debug('Init %s controller...', controller.kind)
|
||||
if not controller.probe(self.target):
|
||||
continue
|
||||
for ss in subsys:
|
||||
hid = ss.hierarchy
|
||||
controller = Controller(ss.name, hid, hierarchy[hid])
|
||||
try:
|
||||
controller.mount(self.target, self.cgroup_root)
|
||||
except TargetError:
|
||||
message = 'cgroups {} controller is not supported by the target'
|
||||
message = 'Failed to mount "{}" controller'
|
||||
raise TargetError(message.format(controller.kind))
|
||||
self.logger.debug('Controller %s enabled', controller.kind)
|
||||
self.controllers[idx] = controller
|
||||
self.logger.info(' %-12s : %s', controller.kind,
|
||||
controller.mount_point)
|
||||
self.controllers[ss.name] = controller
|
||||
|
||||
def list_subsystems(self):
|
||||
subsystems = []
|
||||
for line in self.target.execute('{} cat /proc/cgroups'\
|
||||
.format(self.target.busybox)).split('\n')[1:]:
|
||||
.format(self.target.busybox), as_root=self.target.is_rooted).splitlines()[1:]:
|
||||
line = line.strip()
|
||||
if not line or line.startswith('#'):
|
||||
continue
|
||||
@@ -279,3 +412,117 @@ class CgroupsModule(Module):
|
||||
return None
|
||||
return self.controllers[kind]
|
||||
|
||||
def run_into_cmd(self, cgroup, cmdline):
|
||||
"""
|
||||
Get the command to run a command into a given cgroup
|
||||
|
||||
:param cmdline: Commdand to be run into cgroup
|
||||
:param cgroup: Name of cgroup to run command into
|
||||
:returns: A command to run `cmdline` into `cgroup`
|
||||
"""
|
||||
return 'CGMOUNT={} {} cgroups_run_into {} {}'\
|
||||
.format(self.cgroup_root, self.target.shutils,
|
||||
cgroup, cmdline)
|
||||
|
||||
def run_into(self, cgroup, cmdline):
|
||||
"""
|
||||
Run the specified command into the specified CGroup
|
||||
|
||||
:param cmdline: Command to be run into cgroup
|
||||
:param cgroup: Name of cgroup to run command into
|
||||
:returns: Output of command.
|
||||
"""
|
||||
cmd = self.run_into_cmd(cgroup, cmdline)
|
||||
raw_output = self.target.execute(cmd)
|
||||
|
||||
# First line of output comes from shutils; strip it out.
|
||||
return raw_output.split('\n', 1)[1]
|
||||
|
||||
def cgroups_tasks_move(self, srcg, dstg, exclude=''):
|
||||
"""
|
||||
Move all the tasks from the srcg CGroup to the dstg one.
|
||||
A regexps of tasks names can be used to defined tasks which should not
|
||||
be moved.
|
||||
"""
|
||||
return self.target._execute_util(
|
||||
'cgroups_tasks_move {} {} {}'.format(srcg, dstg, exclude),
|
||||
as_root=True)
|
||||
|
||||
def isolate(self, cpus, exclude=[]):
|
||||
"""
|
||||
Remove all userspace tasks from specified CPUs.
|
||||
|
||||
A list of CPUs can be specified where we do not want userspace tasks
|
||||
running. This functions creates a sandbox cpuset CGroup where all
|
||||
user-space tasks and not-pinned kernel-space tasks are moved into.
|
||||
This should allows to isolate the specified CPUs which will not get
|
||||
tasks running unless explicitely moved into the isolated group.
|
||||
|
||||
:param cpus: the list of CPUs to isolate
|
||||
:type cpus: list(int)
|
||||
|
||||
:return: the (sandbox, isolated) tuple, where:
|
||||
sandbox is the CGroup of sandboxed CPUs
|
||||
isolated is the CGroup of isolated CPUs
|
||||
"""
|
||||
all_cpus = set(range(self.target.number_of_cpus))
|
||||
sbox_cpus = list(all_cpus - set(cpus))
|
||||
isol_cpus = list(all_cpus - set(sbox_cpus))
|
||||
|
||||
# Create Sandbox and Isolated cpuset CGroups
|
||||
cpuset = self.controller('cpuset')
|
||||
sbox_cg = cpuset.cgroup('/DEVLIB_SBOX')
|
||||
isol_cg = cpuset.cgroup('/DEVLIB_ISOL')
|
||||
|
||||
# Set CPUs for Sandbox and Isolated CGroups
|
||||
sbox_cg.set(cpus=sbox_cpus, mems=0)
|
||||
isol_cg.set(cpus=isol_cpus, mems=0)
|
||||
|
||||
# Move all currently running tasks to the Sandbox CGroup
|
||||
cpuset.move_all_tasks_to('/DEVLIB_SBOX', exclude)
|
||||
|
||||
return sbox_cg, isol_cg
|
||||
|
||||
def freeze(self, exclude=[], thaw=False):
|
||||
"""
|
||||
Freeze all user-space tasks but the specified ones
|
||||
|
||||
A freezer cgroup is used to stop all the tasks in the target system but
|
||||
the ones which name match one of the path specified by the exclude
|
||||
paramater. The name of a tasks to exclude must be a substring of the
|
||||
task named as reported by the "ps" command. Indeed, this list will be
|
||||
translated into a: "ps | grep -e name1 -e name2..." in order to obtain
|
||||
the PID of these tasks.
|
||||
|
||||
:param exclude: list of commands paths to exclude from freezer
|
||||
:type exclude: list(str)
|
||||
|
||||
:param thaw: if true thaw tasks instead
|
||||
:type thaw: bool
|
||||
"""
|
||||
|
||||
# Create Freezer CGroup
|
||||
freezer = self.controller('freezer')
|
||||
if freezer is None:
|
||||
raise RuntimeError('freezer cgroup controller not present')
|
||||
freezer_cg = freezer.cgroup('/DEVLIB_FREEZER')
|
||||
cmd = 'cgroups_freezer_set_state {{}} {}'.format(freezer_cg.directory)
|
||||
|
||||
if thaw:
|
||||
# Restart froozen tasks
|
||||
freezer.target._execute_util(cmd.format('THAWED'), as_root=True)
|
||||
# Remove all tasks from freezer
|
||||
freezer.move_all_tasks_to('/')
|
||||
return
|
||||
|
||||
# Move all tasks into the freezer group
|
||||
freezer.move_all_tasks_to('/DEVLIB_FREEZER', exclude)
|
||||
|
||||
# Get list of not frozen tasks, which is reported as output
|
||||
tasks = freezer.tasks('/')
|
||||
|
||||
# Freeze all tasks
|
||||
freezer.target._execute_util(cmd.format('FROZEN'), as_root=True)
|
||||
|
||||
return tasks
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -37,7 +37,7 @@ class CpufreqModule(Module):
|
||||
return True
|
||||
|
||||
# Generic CPUFreq support (single policy)
|
||||
path = '/sys/devices/system/cpu/cpufreq'
|
||||
path = '/sys/devices/system/cpu/cpufreq/policy0'
|
||||
if target.file_exists(path):
|
||||
return True
|
||||
|
||||
@@ -133,7 +133,7 @@ class CpufreqModule(Module):
|
||||
keyword arguments. Which tunables and values are valid depends on the
|
||||
governor.
|
||||
|
||||
:param cpu: The cpu for which the governor will be set. This must be the
|
||||
:param cpu: The cpu for which the governor will be set. ``int`` or
|
||||
full cpu name as it appears in sysfs, e.g. ``cpu0``.
|
||||
:param governor: The name of the governor. Must be all lower case.
|
||||
|
||||
@@ -150,12 +150,16 @@ class CpufreqModule(Module):
|
||||
if governor is None:
|
||||
governor = self.get_governor(cpu)
|
||||
valid_tunables = self.list_governor_tunables(cpu)
|
||||
for tunable, value in kwargs.iteritems():
|
||||
for tunable, value in kwargs.items():
|
||||
if tunable in valid_tunables:
|
||||
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
|
||||
try:
|
||||
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
|
||||
self.target.write_value(path, value)
|
||||
except TargetError: # May be an older kernel
|
||||
except TargetError:
|
||||
if self.target.file_exists(path):
|
||||
# File exists but we did something wrong
|
||||
raise
|
||||
# Expected file doesn't exist, try older sysfs layout.
|
||||
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
|
||||
self.target.write_value(path, value)
|
||||
else:
|
||||
@@ -172,16 +176,41 @@ class CpufreqModule(Module):
|
||||
try:
|
||||
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu)
|
||||
output = self.target.execute(cmd)
|
||||
available_frequencies = map(int, output.strip().split()) # pylint: disable=E1103
|
||||
available_frequencies = list(map(int, output.strip().split())) # pylint: disable=E1103
|
||||
except TargetError:
|
||||
# On some devices scaling_frequencies is not generated.
|
||||
# http://adrynalyne-teachtofish.blogspot.co.uk/2011/11/how-to-enable-scalingavailablefrequenci.html
|
||||
# Fall back to parsing stats/time_in_state
|
||||
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/stats/time_in_state'.format(cpu)
|
||||
out_iter = iter(self.target.execute(cmd).strip().split())
|
||||
available_frequencies = map(int, reversed([f for f, _ in zip(out_iter, out_iter)]))
|
||||
path = '/sys/devices/system/cpu/{}/cpufreq/stats/time_in_state'.format(cpu)
|
||||
try:
|
||||
out_iter = iter(self.target.read_value(path).split())
|
||||
except TargetError:
|
||||
if not self.target.file_exists(path):
|
||||
# Probably intel_pstate. Can't get available freqs.
|
||||
return []
|
||||
raise
|
||||
|
||||
available_frequencies = list(map(int, reversed([f for f, _ in zip(out_iter, out_iter)])))
|
||||
return available_frequencies
|
||||
|
||||
@memoized
|
||||
def get_max_available_frequency(self, cpu):
|
||||
"""
|
||||
Returns the maximum available frequency for a given core or None if
|
||||
could not be found.
|
||||
"""
|
||||
freqs = self.list_frequencies(cpu)
|
||||
return freqs and max(freqs) or None
|
||||
|
||||
@memoized
|
||||
def get_min_available_frequency(self, cpu):
|
||||
"""
|
||||
Returns the minimum available frequency for a given core or None if
|
||||
could not be found.
|
||||
"""
|
||||
freqs = self.list_frequencies(cpu)
|
||||
return freqs and min(freqs) or None
|
||||
|
||||
def get_min_frequency(self, cpu):
|
||||
"""
|
||||
Returns the min frequency currently set for the specified CPU.
|
||||
@@ -334,9 +363,8 @@ class CpufreqModule(Module):
|
||||
|
||||
:param cpus: The list of CPU for which the governor is to be set.
|
||||
"""
|
||||
online_cpus = self.target.list_online_cpus()
|
||||
for cpu in online_cpus:
|
||||
self.set_governor(cpu, governor, kwargs)
|
||||
for cpu in cpus:
|
||||
self.set_governor(cpu, governor, **kwargs)
|
||||
|
||||
def set_frequency_for_cpus(self, cpus, freq, exact=False):
|
||||
"""
|
||||
@@ -345,34 +373,112 @@ class CpufreqModule(Module):
|
||||
|
||||
:param cpus: The list of CPU for which the frequency has to be set.
|
||||
"""
|
||||
online_cpus = self.target.list_online_cpus()
|
||||
for cpu in online_cpus:
|
||||
for cpu in cpus:
|
||||
self.set_frequency(cpu, freq, exact)
|
||||
|
||||
def set_all_frequencies(self, freq, exact=False):
|
||||
self.target.execute(
|
||||
"for CPU in /sys/devices/system/cpu/cpu[0-9]*; do "\
|
||||
"echo {} > $CPU/cpufreq/scaling_cur_freq; "\
|
||||
"done"\
|
||||
.format(freq), as_root=True)
|
||||
def set_all_frequencies(self, freq):
|
||||
"""
|
||||
Set the specified (minimum) frequency for all the (online) CPUs
|
||||
"""
|
||||
return self.target._execute_util(
|
||||
'cpufreq_set_all_frequencies {}'.format(freq),
|
||||
as_root=True)
|
||||
|
||||
def get_all_frequencies(self):
|
||||
"""
|
||||
Get the current frequency for all the (online) CPUs
|
||||
"""
|
||||
output = self.target._execute_util(
|
||||
'cpufreq_get_all_frequencies', as_root=True)
|
||||
frequencies = {}
|
||||
for x in output.splitlines():
|
||||
kv = x.split(' ')
|
||||
if kv[0] == '':
|
||||
break
|
||||
frequencies[kv[0]] = kv[1]
|
||||
return frequencies
|
||||
|
||||
def set_all_governors(self, governor):
|
||||
self.target.execute(
|
||||
"for CPU in /sys/devices/system/cpu/cpu[0-9]*; do "\
|
||||
"echo {} > $CPU/cpufreq/scaling_governor; "\
|
||||
"done"\
|
||||
.format(governor), as_root=True)
|
||||
"""
|
||||
Set the specified governor for all the (online) CPUs
|
||||
"""
|
||||
try:
|
||||
return self.target._execute_util(
|
||||
'cpufreq_set_all_governors {}'.format(governor),
|
||||
as_root=True)
|
||||
except TargetError as e:
|
||||
if ("echo: I/O error" in str(e) or
|
||||
"write error: Invalid argument" in str(e)):
|
||||
|
||||
cpus_unsupported = [c for c in self.target.list_online_cpus()
|
||||
if governor not in self.list_governors(c)]
|
||||
raise TargetError("Governor {} unsupported for CPUs {}".format(
|
||||
governor, cpus_unsupported))
|
||||
else:
|
||||
raise
|
||||
|
||||
def get_all_governors(self):
|
||||
"""
|
||||
Get the current governor for all the (online) CPUs
|
||||
"""
|
||||
output = self.target._execute_util(
|
||||
'cpufreq_get_all_governors', as_root=True)
|
||||
governors = {}
|
||||
for x in output.splitlines():
|
||||
kv = x.split(' ')
|
||||
if kv[0] == '':
|
||||
break
|
||||
governors[kv[0]] = kv[1]
|
||||
return governors
|
||||
|
||||
def trace_frequencies(self):
|
||||
"""
|
||||
Report current frequencies on trace file
|
||||
"""
|
||||
self.target.execute(
|
||||
'FREQS=$(cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq); '
|
||||
'CPU=0; for F in $FREQS; do '
|
||||
' echo "cpu_frequency: state=$F cpu_id=$CPU" > /sys/kernel/debug/tracing/trace_marker; '
|
||||
' let CPU++; '
|
||||
'done',
|
||||
as_root=True
|
||||
)
|
||||
return self.target._execute_util('cpufreq_trace_all_frequencies', as_root=True)
|
||||
|
||||
def get_affected_cpus(self, cpu):
|
||||
"""
|
||||
Get the online CPUs that share a frequency domain with the given CPU
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/affected_cpus'.format(cpu)
|
||||
|
||||
return [int(c) for c in self.target.read_value(sysfile).split()]
|
||||
|
||||
@memoized
|
||||
def get_related_cpus(self, cpu):
|
||||
"""
|
||||
Get the CPUs that share a frequency domain with the given CPU
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/related_cpus'.format(cpu)
|
||||
|
||||
return [int(c) for c in self.target.read_value(sysfile).split()]
|
||||
|
||||
@memoized
|
||||
def get_driver(self, cpu):
|
||||
"""
|
||||
Get the name of the driver used by this cpufreq policy.
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_driver'.format(cpu)
|
||||
|
||||
return self.target.read_value(sysfile).strip()
|
||||
|
||||
def iter_domains(self):
|
||||
"""
|
||||
Iterate over the frequency domains in the system
|
||||
"""
|
||||
cpus = set(range(self.target.number_of_cpus))
|
||||
while cpus:
|
||||
cpu = next(iter(cpus))
|
||||
domain = self.target.cpufreq.get_related_cpus(cpu)
|
||||
yield domain
|
||||
cpus = cpus.difference(domain)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,6 +13,8 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.utils.misc import memoized
|
||||
from devlib.utils.types import integer, boolean
|
||||
@@ -41,16 +43,16 @@ class CpuidleState(object):
|
||||
raise ValueError('invalid idle state name: "{}"'.format(self.id))
|
||||
return int(self.id[i:])
|
||||
|
||||
def __init__(self, target, index, path):
|
||||
def __init__(self, target, index, path, name, desc, power, latency, residency):
|
||||
self.target = target
|
||||
self.index = index
|
||||
self.path = path
|
||||
self.name = name
|
||||
self.desc = desc
|
||||
self.power = power
|
||||
self.latency = latency
|
||||
self.id = self.target.path.basename(self.path)
|
||||
self.cpu = self.target.path.basename(self.target.path.dirname(path))
|
||||
self.desc = self.get('desc')
|
||||
self.name = self.get('name')
|
||||
self.latency = self.get('latency')
|
||||
self.power = self.get('power')
|
||||
|
||||
def enable(self):
|
||||
self.set('disable', 0)
|
||||
@@ -92,28 +94,52 @@ class Cpuidle(Module):
|
||||
def probe(target):
|
||||
return target.file_exists(Cpuidle.root_path)
|
||||
|
||||
def get_driver(self):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_driver'))
|
||||
def __init__(self, target):
|
||||
super(Cpuidle, self).__init__(target)
|
||||
self._states = {}
|
||||
|
||||
def get_governor(self):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_governor_ro'))
|
||||
basepath = '/sys/devices/system/cpu/'
|
||||
values_tree = self.target.read_tree_values(basepath, depth=4, check_exit_code=False)
|
||||
i = 0
|
||||
cpu_id = 'cpu{}'.format(i)
|
||||
while cpu_id in values_tree:
|
||||
cpu_node = values_tree[cpu_id]
|
||||
|
||||
if 'cpuidle' in cpu_node:
|
||||
idle_node = cpu_node['cpuidle']
|
||||
self._states[cpu_id] = []
|
||||
j = 0
|
||||
state_id = 'state{}'.format(j)
|
||||
while state_id in idle_node:
|
||||
state_node = idle_node[state_id]
|
||||
state = CpuidleState(
|
||||
self.target,
|
||||
index=j,
|
||||
path=self.target.path.join(basepath, cpu_id, 'cpuidle', state_id),
|
||||
name=state_node['name'],
|
||||
desc=state_node['desc'],
|
||||
power=int(state_node['power']),
|
||||
latency=int(state_node['latency']),
|
||||
residency=int(state_node['residency']) if 'residency' in state_node else None,
|
||||
)
|
||||
msg = 'Adding {} state {}: {} {}'
|
||||
self.logger.debug(msg.format(cpu_id, j, state.name, state.desc))
|
||||
self._states[cpu_id].append(state)
|
||||
j += 1
|
||||
state_id = 'state{}'.format(j)
|
||||
|
||||
i += 1
|
||||
cpu_id = 'cpu{}'.format(i)
|
||||
|
||||
@memoized
|
||||
def get_states(self, cpu=0):
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
states_dir = self.target.path.join(self.target.path.dirname(self.root_path), cpu, 'cpuidle')
|
||||
idle_states = []
|
||||
for state in self.target.list_directory(states_dir):
|
||||
if state.startswith('state'):
|
||||
index = int(state[5:])
|
||||
idle_states.append(CpuidleState(self.target, index, self.target.path.join(states_dir, state)))
|
||||
return idle_states
|
||||
return self._states.get(cpu, [])
|
||||
|
||||
def get_state(self, state, cpu=0):
|
||||
if isinstance(state, int):
|
||||
try:
|
||||
self.get_states(cpu)[state].enable()
|
||||
return self.get_states(cpu)[state]
|
||||
except IndexError:
|
||||
raise ValueError('Cpuidle state {} does not exist'.format(state))
|
||||
else: # assume string-like
|
||||
@@ -136,3 +162,14 @@ class Cpuidle(Module):
|
||||
for state in self.get_states(cpu):
|
||||
state.disable()
|
||||
|
||||
def perturb_cpus(self):
|
||||
"""
|
||||
Momentarily wake each CPU. Ensures cpu_idle events in trace file.
|
||||
"""
|
||||
output = self.target._execute_util('cpuidle_wake_all_cpus')
|
||||
|
||||
def get_driver(self):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_driver'))
|
||||
|
||||
def get_governor(self):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_governor_ro'))
|
||||
|
261
devlib/module/devfreq.py
Normal file
261
devlib/module/devfreq.py
Normal file
@@ -0,0 +1,261 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from devlib.module import Module
|
||||
from devlib.exception import TargetError
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
class DevfreqModule(Module):
|
||||
|
||||
name = 'devfreq'
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
path = '/sys/class/devfreq/'
|
||||
if not target.file_exists(path):
|
||||
return False
|
||||
|
||||
# Check that at least one policy is implemented
|
||||
if not target.list_directory(path):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@memoized
|
||||
def list_devices(self):
|
||||
"""Returns a list of devfreq devices supported by the target platform."""
|
||||
sysfile = '/sys/class/devfreq/'
|
||||
return self.target.list_directory(sysfile)
|
||||
|
||||
@memoized
|
||||
def list_governors(self, device):
|
||||
"""Returns a list of governors supported by the device."""
|
||||
sysfile = '/sys/class/devfreq/{}/available_governors'.format(device)
|
||||
output = self.target.read_value(sysfile)
|
||||
return output.strip().split()
|
||||
|
||||
def get_governor(self, device):
|
||||
"""Returns the governor currently set for the specified device."""
|
||||
if isinstance(device, int):
|
||||
device = 'device{}'.format(device)
|
||||
sysfile = '/sys/class/devfreq/{}/governor'.format(device)
|
||||
return self.target.read_value(sysfile)
|
||||
|
||||
def set_governor(self, device, governor):
|
||||
"""
|
||||
Set the governor for the specified device.
|
||||
|
||||
:param device: The device for which the governor is to be set. This must be
|
||||
the full name as it appears in sysfs, e.g. "e82c0000.mali".
|
||||
:param governor: The name of the governor to be used. This must be
|
||||
supported by the specific device.
|
||||
|
||||
Additional keyword arguments can be used to specify governor tunables for
|
||||
governors that support them.
|
||||
|
||||
:raises: TargetError if governor is not supported by the device, or if,
|
||||
for some reason, the governor could not be set.
|
||||
|
||||
"""
|
||||
supported = self.list_governors(device)
|
||||
if governor not in supported:
|
||||
raise TargetError('Governor {} not supported for device {}'.format(governor, device))
|
||||
sysfile = '/sys/class/devfreq/{}/governor'.format(device)
|
||||
self.target.write_value(sysfile, governor)
|
||||
|
||||
@memoized
|
||||
def list_frequencies(self, device):
|
||||
"""
|
||||
Returns a list of frequencies supported by the device or an empty list
|
||||
if could not be found.
|
||||
"""
|
||||
cmd = 'cat /sys/class/devfreq/{}/available_frequencies'.format(device)
|
||||
output = self.target.execute(cmd)
|
||||
available_frequencies = [int(freq) for freq in output.strip().split()]
|
||||
|
||||
return available_frequencies
|
||||
|
||||
def get_min_frequency(self, device):
|
||||
"""
|
||||
Returns the min frequency currently set for the specified device.
|
||||
|
||||
Warning, this method does not check if the device is present or not. It
|
||||
will try to read the minimum frequency and the following exception will
|
||||
be raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
sysfile = '/sys/class/devfreq/{}/min_freq'.format(device)
|
||||
return self.target.read_int(sysfile)
|
||||
|
||||
def set_min_frequency(self, device, frequency, exact=True):
|
||||
"""
|
||||
Sets the minimum value for device frequency. Actual frequency will
|
||||
depend on the thermal governor used and may vary during execution. The
|
||||
value should be either an int or a string representing an integer. The
|
||||
Value must also be supported by the device. The available frequencies
|
||||
can be obtained by calling list_frequencies() or examining
|
||||
|
||||
/sys/class/devfreq/<device_name>/available_frequencies
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: TargetError if the frequency is not supported by the device, or if, for
|
||||
some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
"""
|
||||
available_frequencies = self.list_frequencies(device)
|
||||
try:
|
||||
value = int(frequency)
|
||||
if exact and available_frequencies and value not in available_frequencies:
|
||||
raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(device,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/class/devfreq/{}/min_freq'.format(device)
|
||||
self.target.write_value(sysfile, value)
|
||||
except ValueError:
|
||||
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
|
||||
|
||||
def get_frequency(self, device):
|
||||
"""
|
||||
Returns the current frequency currently set for the specified device.
|
||||
|
||||
Warning, this method does not check if the device is present or not. It
|
||||
will try to read the current frequency and the following exception will
|
||||
be raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
sysfile = '/sys/class/devfreq/{}/cur_freq'.format(device)
|
||||
return self.target.read_int(sysfile)
|
||||
|
||||
def get_max_frequency(self, device):
|
||||
"""
|
||||
Returns the max frequency currently set for the specified device.
|
||||
|
||||
Warning, this method does not check if the device is online or not. It will
|
||||
try to read the maximum frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
"""
|
||||
sysfile = '/sys/class/devfreq/{}/max_freq'.format(device)
|
||||
return self.target.read_int(sysfile)
|
||||
|
||||
def set_max_frequency(self, device, frequency, exact=True):
|
||||
"""
|
||||
Sets the maximum value for device frequency. Actual frequency will
|
||||
depend on the Governor used and may vary during execution. The value
|
||||
should be either an int or a string representing an integer. The Value
|
||||
must also be supported by the device. The available frequencies can be
|
||||
obtained by calling get_frequencies() or examining
|
||||
|
||||
/sys/class/devfreq/<device_name>/available_frequencies
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: TargetError if the frequency is not supported by the device, or
|
||||
if, for some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
"""
|
||||
available_frequencies = self.list_frequencies(device)
|
||||
try:
|
||||
value = int(frequency)
|
||||
except ValueError:
|
||||
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
|
||||
|
||||
if exact and value not in available_frequencies:
|
||||
raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(device,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/class/devfreq/{}/max_freq'.format(device)
|
||||
self.target.write_value(sysfile, value)
|
||||
|
||||
def set_governor_for_devices(self, devices, governor):
|
||||
"""
|
||||
Set the governor for the specified list of devices.
|
||||
|
||||
:param devices: The list of device for which the governor is to be set.
|
||||
"""
|
||||
for device in devices:
|
||||
self.set_governor(device, governor)
|
||||
|
||||
def set_all_governors(self, governor):
|
||||
"""
|
||||
Set the specified governor for all the (available) devices
|
||||
"""
|
||||
try:
|
||||
return self.target._execute_util(
|
||||
'devfreq_set_all_governors {}'.format(governor), as_root=True)
|
||||
except TargetError as e:
|
||||
if ("echo: I/O error" in str(e) or
|
||||
"write error: Invalid argument" in str(e)):
|
||||
|
||||
devs_unsupported = [d for d in self.target.list_devices()
|
||||
if governor not in self.list_governors(d)]
|
||||
raise TargetError("Governor {} unsupported for devices {}".format(
|
||||
governor, devs_unsupported))
|
||||
else:
|
||||
raise
|
||||
|
||||
def get_all_governors(self):
|
||||
"""
|
||||
Get the current governor for all the (online) CPUs
|
||||
"""
|
||||
output = self.target._execute_util(
|
||||
'devfreq_get_all_governors', as_root=True)
|
||||
governors = {}
|
||||
for x in output.splitlines():
|
||||
kv = x.split(' ')
|
||||
if kv[0] == '':
|
||||
break
|
||||
governors[kv[0]] = kv[1]
|
||||
return governors
|
||||
|
||||
def set_frequency_for_devices(self, devices, freq, exact=False):
|
||||
"""
|
||||
Set the frequency for the specified list of devices.
|
||||
|
||||
:param devices: The list of device for which the frequency has to be set.
|
||||
"""
|
||||
for device in devices:
|
||||
self.set_max_frequency(device, freq, exact)
|
||||
self.set_min_frequency(device, freq, exact)
|
||||
|
||||
def set_all_frequencies(self, freq):
|
||||
"""
|
||||
Set the specified (minimum) frequency for all the (available) devices
|
||||
"""
|
||||
return self.target._execute_util(
|
||||
'devfreq_set_all_frequencies {}'.format(freq),
|
||||
as_root=True)
|
||||
|
||||
def get_all_frequencies(self):
|
||||
"""
|
||||
Get the current frequency for all the (available) devices
|
||||
"""
|
||||
output = self.target._execute_util(
|
||||
'devfreq_get_all_frequencies', as_root=True)
|
||||
frequencies = {}
|
||||
for x in output.splitlines():
|
||||
kv = x.split(' ')
|
||||
if kv[0] == '':
|
||||
break
|
||||
frequencies[kv[0]] = kv[1]
|
||||
return frequencies
|
||||
|
254
devlib/module/gem5stats.py
Normal file
254
devlib/module/gem5stats.py
Normal file
@@ -0,0 +1,254 @@
|
||||
# Copyright 2017-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
import sys
|
||||
import logging
|
||||
import os.path
|
||||
from collections import defaultdict
|
||||
|
||||
import devlib
|
||||
from devlib.exception import TargetError
|
||||
from devlib.module import Module
|
||||
from devlib.platform import Platform
|
||||
from devlib.platform.gem5 import Gem5SimulationPlatform
|
||||
from devlib.utils.gem5 import iter_statistics_dump, GEM5STATS_ROI_NUMBER, GEM5STATS_DUMP_TAIL
|
||||
|
||||
|
||||
class Gem5ROI:
|
||||
def __init__(self, number, target):
|
||||
self.target = target
|
||||
self.number = number
|
||||
self.running = False
|
||||
self.field = 'ROI::{}'.format(number)
|
||||
|
||||
def start(self):
|
||||
if self.running:
|
||||
return False
|
||||
self.target.execute('m5 roistart {}'.format(self.number))
|
||||
self.running = True
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
if not self.running:
|
||||
return False
|
||||
self.target.execute('m5 roiend {}'.format(self.number))
|
||||
self.running = False
|
||||
return True
|
||||
|
||||
class Gem5StatsModule(Module):
|
||||
'''
|
||||
Module controlling Region of Interest (ROIs) markers, satistics dump
|
||||
frequency and parsing statistics log file when using gem5 platforms.
|
||||
|
||||
ROIs are identified by user-defined labels and need to be booked prior to
|
||||
use. The translation of labels into gem5 ROI numbers will be performed
|
||||
internally in order to avoid conflicts between multiple clients.
|
||||
'''
|
||||
name = 'gem5stats'
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
return isinstance(target.platform, Gem5SimulationPlatform)
|
||||
|
||||
def __init__(self, target):
|
||||
super(Gem5StatsModule, self).__init__(target)
|
||||
self._current_origin = 0
|
||||
self._stats_file_path = os.path.join(target.platform.gem5_out_dir,
|
||||
'stats.txt')
|
||||
self.rois = {}
|
||||
self._dump_pos_cache = {0: 0}
|
||||
|
||||
def book_roi(self, label):
|
||||
if label in self.rois:
|
||||
raise KeyError('ROI label {} already used'.format(label))
|
||||
if len(self.rois) >= GEM5STATS_ROI_NUMBER:
|
||||
raise RuntimeError('Too many ROIs reserved')
|
||||
all_rois = set(range(GEM5STATS_ROI_NUMBER))
|
||||
used_rois = set([roi.number for roi in self.rois.values()])
|
||||
avail_rois = all_rois - used_rois
|
||||
self.rois[label] = Gem5ROI(list(avail_rois)[0], self.target)
|
||||
|
||||
def free_roi(self, label):
|
||||
if label not in self.rois:
|
||||
raise KeyError('ROI label {} not reserved yet'.format(label))
|
||||
self.rois[label].stop()
|
||||
del self.rois[label]
|
||||
|
||||
def roi_start(self, label):
|
||||
if label not in self.rois:
|
||||
raise KeyError('Incorrect ROI label: {}'.format(label))
|
||||
if not self.rois[label].start():
|
||||
raise TargetError('ROI {} was already running'.format(label))
|
||||
|
||||
def roi_end(self, label):
|
||||
if label not in self.rois:
|
||||
raise KeyError('Incorrect ROI label: {}'.format(label))
|
||||
if not self.rois[label].stop():
|
||||
raise TargetError('ROI {} was not running'.format(label))
|
||||
|
||||
def start_periodic_dump(self, delay_ns=0, period_ns=10000000):
|
||||
# Default period is 10ms because it's roughly what's needed to have
|
||||
# accurate power estimations
|
||||
if delay_ns < 0 or period_ns < 0:
|
||||
msg = 'Delay ({}) and period ({}) for periodic dumps must be positive'
|
||||
raise ValueError(msg.format(delay_ns, period_ns))
|
||||
self.target.execute('m5 dumpresetstats {} {}'.format(delay_ns, period_ns))
|
||||
|
||||
def match(self, keys, rois_labels, base_dump=0):
|
||||
'''
|
||||
Extract specific values from the statistics log file of gem5
|
||||
|
||||
:param keys: a list of key name or regular expression patterns that
|
||||
will be matched in the fields of the statistics file. ``match()``
|
||||
returns only the values of fields matching at least one these
|
||||
keys.
|
||||
:type keys: list
|
||||
|
||||
:param rois_labels: list of ROIs labels. ``match()`` returns the
|
||||
values of the specified fields only during dumps spanned by at
|
||||
least one of these ROIs.
|
||||
:type rois_label: list
|
||||
|
||||
:param base_dump: dump number from which ``match()`` should operate. By
|
||||
specifying a non-zero dump number, one can virtually truncate
|
||||
the head of the stats file and ignore all dumps before a specific
|
||||
instant. The value of ``base_dump`` will typically (but not
|
||||
necessarily) be the result of a previous call to ``next_dump_no``.
|
||||
Default value is 0.
|
||||
:type base_dump: int
|
||||
|
||||
:returns: a dict indexed by key parameters containing a dict indexed by
|
||||
ROI labels containing an in-order list of records for the key under
|
||||
consideration during the active intervals of the ROI.
|
||||
|
||||
Example of return value:
|
||||
* Result of match(['sim_'],['roi_1']):
|
||||
{
|
||||
'sim_inst':
|
||||
{
|
||||
'roi_1': [265300176, 267975881]
|
||||
}
|
||||
'sim_ops':
|
||||
{
|
||||
'roi_1': [324395787, 327699419]
|
||||
}
|
||||
'sim_seconds':
|
||||
{
|
||||
'roi_1': [0.199960, 0.199897]
|
||||
}
|
||||
'sim_freq':
|
||||
{
|
||||
'roi_1': [1000000000000, 1000000000000]
|
||||
}
|
||||
'sim_ticks':
|
||||
{
|
||||
'roi_1': [199960234227, 199896897330]
|
||||
}
|
||||
}
|
||||
'''
|
||||
records = defaultdict(lambda : defaultdict(list))
|
||||
for record, active_rois in self.match_iter(keys, rois_labels, base_dump):
|
||||
for key in record:
|
||||
for roi_label in active_rois:
|
||||
records[key][roi_label].append(record[key])
|
||||
return records
|
||||
|
||||
def match_iter(self, keys, rois_labels, base_dump=0):
|
||||
'''
|
||||
Yield specific values dump-by-dump from the statistics log file of gem5
|
||||
|
||||
:param keys: same as ``match()``
|
||||
:param rois_labels: same as ``match()``
|
||||
:param base_dump: same as ``match()``
|
||||
:returns: a pair containing:
|
||||
1. a dict storing the values corresponding to each of the found keys
|
||||
2. the list of currently active ROIs among those passed as parameters
|
||||
|
||||
Example of return value:
|
||||
* Result of match_iter(['sim_'],['roi_1', 'roi_2']).next()
|
||||
(
|
||||
{
|
||||
'sim_inst': 265300176,
|
||||
'sim_ops': 324395787,
|
||||
'sim_seconds': 0.199960,
|
||||
'sim_freq': 1000000000000,
|
||||
'sim_ticks': 199960234227,
|
||||
},
|
||||
[ 'roi_1 ' ]
|
||||
)
|
||||
'''
|
||||
for label in rois_labels:
|
||||
if label not in self.rois:
|
||||
raise KeyError('Impossible to match ROI label {}'.format(label))
|
||||
if self.rois[label].running:
|
||||
self.logger.warning('Trying to match records in statistics file'
|
||||
' while ROI {} is running'.format(label))
|
||||
|
||||
# Construct one large regex that concatenates all keys because
|
||||
# matching one large expression is more efficient than several smaller
|
||||
all_keys_re = re.compile('|'.join(keys))
|
||||
|
||||
def roi_active(roi_label, dump):
|
||||
roi = self.rois[roi_label]
|
||||
return (roi.field in dump) and (int(dump[roi.field]) == 1)
|
||||
|
||||
with open(self._stats_file_path, 'r') as stats_file:
|
||||
self._goto_dump(stats_file, base_dump)
|
||||
for dump in iter_statistics_dump(stats_file):
|
||||
active_rois = [l for l in rois_labels if roi_active(l, dump)]
|
||||
if active_rois:
|
||||
rec = {k: dump[k] for k in dump if all_keys_re.search(k)}
|
||||
yield (rec, active_rois)
|
||||
|
||||
def next_dump_no(self):
|
||||
'''
|
||||
Returns the number of the next dump to be written to the stats file.
|
||||
|
||||
For example, if next_dump_no is called while there are 5 (0 to 4) full
|
||||
dumps in the stats file, it will return 5. This will be usefull to know
|
||||
from which dump one should match() in the future to get only data from
|
||||
now on.
|
||||
'''
|
||||
with open(self._stats_file_path, 'r') as stats_file:
|
||||
# _goto_dump reach EOF and returns the total number of dumps + 1
|
||||
return self._goto_dump(stats_file, sys.maxsize)
|
||||
|
||||
def _goto_dump(self, stats_file, target_dump):
|
||||
if target_dump < 0:
|
||||
raise HostError('Cannot go to dump {}'.format(target_dump))
|
||||
|
||||
# Go to required dump quickly if it was visited before
|
||||
if target_dump in self._dump_pos_cache:
|
||||
stats_file.seek(self._dump_pos_cache[target_dump])
|
||||
return target_dump
|
||||
# Or start from the closest dump already visited before the required one
|
||||
prev_dumps = filter(lambda x: x < target_dump, self._dump_pos_cache.keys())
|
||||
curr_dump = max(prev_dumps)
|
||||
curr_pos = self._dump_pos_cache[curr_dump]
|
||||
stats_file.seek(curr_pos)
|
||||
|
||||
# And iterate until target_dump
|
||||
dump_iterator = iter_statistics_dump(stats_file)
|
||||
while curr_dump < target_dump:
|
||||
try:
|
||||
dump = next(dump_iterator)
|
||||
except StopIteration:
|
||||
break
|
||||
# End of passed dump is beginning og next one
|
||||
curr_pos = stats_file.tell()
|
||||
curr_dump += 1
|
||||
self._dump_pos_cache[curr_dump] = curr_pos
|
||||
return curr_dump
|
||||
|
90
devlib/module/gpufreq.py
Normal file
90
devlib/module/gpufreq.py
Normal file
@@ -0,0 +1,90 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Copyright 2017 Google, ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
import json
|
||||
from devlib.module import Module
|
||||
from devlib.exception import TargetError
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
class GpufreqModule(Module):
|
||||
|
||||
name = 'gpufreq'
|
||||
path = ''
|
||||
|
||||
def __init__(self, target):
|
||||
super(GpufreqModule, self).__init__(target)
|
||||
frequencies_str = self.target.read_value("/sys/kernel/gpu/gpu_freq_table")
|
||||
self.frequencies = list(map(int, frequencies_str.split(" ")))
|
||||
self.frequencies.sort()
|
||||
self.governors = self.target.read_value("/sys/kernel/gpu/gpu_available_governor").split(" ")
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
# kgsl/Adreno
|
||||
probe_path = '/sys/kernel/gpu/'
|
||||
if target.file_exists(probe_path):
|
||||
model = target.read_value(probe_path + "gpu_model")
|
||||
if re.search('adreno', model, re.IGNORECASE):
|
||||
return True
|
||||
return False
|
||||
|
||||
def set_governor(self, governor):
|
||||
if governor not in self.governors:
|
||||
raise TargetError('Governor {} not supported for gpu {}'.format(governor, cpu))
|
||||
self.target.write_value("/sys/kernel/gpu/gpu_governor", governor)
|
||||
|
||||
def get_frequencies(self):
|
||||
"""
|
||||
Returns the list of frequencies that the GPU can have
|
||||
"""
|
||||
return self.frequencies
|
||||
|
||||
def get_current_frequency(self):
|
||||
"""
|
||||
Returns the current frequency currently set for the GPU.
|
||||
|
||||
Warning, this method does not check if the gpu is online or not. It will
|
||||
try to read the current frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
return int(self.target.read_value("/sys/kernel/gpu/gpu_clock"))
|
||||
|
||||
@memoized
|
||||
def get_model_name(self):
|
||||
"""
|
||||
Returns the model name reported by the GPU.
|
||||
"""
|
||||
try:
|
||||
return self.target.read_value("/sys/kernel/gpu/gpu_model")
|
||||
except:
|
||||
return "unknown"
|
@@ -1,3 +1,18 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from devlib.module import Module
|
||||
|
||||
|
||||
@@ -21,7 +36,8 @@ class HotplugModule(Module):
|
||||
return target.path.join(cls.base_path, cpu, 'online')
|
||||
|
||||
def online_all(self):
|
||||
self.online(*range(self.target.number_of_cpus))
|
||||
self.target._execute_util('hotplug_online_all',
|
||||
as_root=self.target.is_rooted)
|
||||
|
||||
def online(self, *args):
|
||||
for cpu in args:
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,9 +12,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import re
|
||||
from collections import defaultdict
|
||||
|
||||
from devlib import TargetError
|
||||
from devlib.module import Module
|
||||
from devlib.utils.types import integer
|
||||
|
||||
@@ -73,19 +75,19 @@ class HwmonDevice(object):
|
||||
@property
|
||||
def sensors(self):
|
||||
all_sensors = []
|
||||
for sensors_of_kind in self._sensors.itervalues():
|
||||
all_sensors.extend(sensors_of_kind.values())
|
||||
for sensors_of_kind in self._sensors.values():
|
||||
all_sensors.extend(list(sensors_of_kind.values()))
|
||||
return all_sensors
|
||||
|
||||
def __init__(self, target, path):
|
||||
def __init__(self, target, path, name, fields):
|
||||
self.target = target
|
||||
self.path = path
|
||||
self.name = self.target.read_value(self.target.path.join(self.path, 'name'))
|
||||
self.name = name
|
||||
self._sensors = defaultdict(dict)
|
||||
path = self.path
|
||||
if not path.endswith(self.target.path.sep):
|
||||
path += self.target.path.sep
|
||||
for entry in self.target.list_directory(path):
|
||||
for entry in fields:
|
||||
match = HWMON_FILE_REGEX.search(entry)
|
||||
if match:
|
||||
kind = match.group('kind')
|
||||
@@ -98,7 +100,7 @@ class HwmonDevice(object):
|
||||
|
||||
def get(self, kind, number=None):
|
||||
if number is None:
|
||||
return [s for _, s in sorted(self._sensors[kind].iteritems(),
|
||||
return [s for _, s in sorted(self._sensors[kind].items(),
|
||||
key=lambda x: x[0])]
|
||||
else:
|
||||
return self._sensors[kind].get(number)
|
||||
@@ -115,7 +117,12 @@ class HwmonModule(Module):
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
return target.file_exists(HWMON_ROOT)
|
||||
try:
|
||||
target.list_directory(HWMON_ROOT, as_root=target.is_rooted)
|
||||
except TargetError:
|
||||
# Doesn't exist or no permissions
|
||||
return False
|
||||
return True
|
||||
|
||||
@property
|
||||
def sensors(self):
|
||||
@@ -131,10 +138,13 @@ class HwmonModule(Module):
|
||||
self.scan()
|
||||
|
||||
def scan(self):
|
||||
for entry in self.target.list_directory(self.root):
|
||||
if entry.startswith('hwmon'):
|
||||
entry_path = self.target.path.join(self.root, entry)
|
||||
if self.target.file_exists(self.target.path.join(entry_path, 'name')):
|
||||
device = HwmonDevice(self.target, entry_path)
|
||||
self.devices.append(device)
|
||||
values_tree = self.target.read_tree_values(self.root, depth=3)
|
||||
for entry_id, fields in values_tree.items():
|
||||
path = self.target.path.join(self.root, entry_id)
|
||||
name = fields.pop('name', None)
|
||||
if name is None:
|
||||
continue
|
||||
self.logger.debug('Adding device {}'.format(name))
|
||||
device = HwmonDevice(self.target, path, name, fields)
|
||||
self.devices.append(device)
|
||||
|
||||
|
334
devlib/module/sched.py
Normal file
334
devlib/module/sched.py
Normal file
@@ -0,0 +1,334 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Copyright 2018 Arm Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import re
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
|
||||
class SchedProcFSNode(object):
|
||||
"""
|
||||
Represents a sched_domain procfs node
|
||||
|
||||
:param nodes: Dictionnary view of the underlying procfs nodes
|
||||
(as returned by devlib.read_tree_values())
|
||||
:type nodes: dict
|
||||
|
||||
|
||||
Say you want to represent this path/data:
|
||||
$ cat /proc/sys/kernel/sched_domain/cpu0/domain*/name
|
||||
MC
|
||||
DIE
|
||||
|
||||
Taking cpu0 as a root, this can be defined as:
|
||||
>>> data = {"domain0" : {"name" : "MC"}, "domain1" : {"name" : "DIE"}}
|
||||
|
||||
>>> repr = SchedProcFSNode(data)
|
||||
>>> print repr.domains[0].name
|
||||
MC
|
||||
|
||||
The "raw" dict remains available under the `procfs` field:
|
||||
>>> print repr.procfs["domain0"]["name"]
|
||||
MC
|
||||
"""
|
||||
|
||||
_re_procfs_node = re.compile(r"(?P<name>.*)(?P<digits>\d+)$")
|
||||
|
||||
@staticmethod
|
||||
def _ends_with_digits(node):
|
||||
if not isinstance(node, basestring):
|
||||
return False
|
||||
|
||||
return re.search(SchedProcFSNode._re_procfs_node, node) != None
|
||||
|
||||
@staticmethod
|
||||
def _node_digits(node):
|
||||
"""
|
||||
:returns: The ending digits of the procfs node
|
||||
"""
|
||||
return int(re.search(SchedProcFSNode._re_procfs_node, node).group("digits"))
|
||||
|
||||
@staticmethod
|
||||
def _node_name(node):
|
||||
"""
|
||||
:returns: The name of the procfs node
|
||||
"""
|
||||
return re.search(SchedProcFSNode._re_procfs_node, node).group("name")
|
||||
|
||||
@staticmethod
|
||||
def _packable(node, entries):
|
||||
"""
|
||||
:returns: Whether it makes sense to pack a node into a common entry
|
||||
"""
|
||||
return (SchedProcFSNode._ends_with_digits(node) and
|
||||
any([SchedProcFSNode._ends_with_digits(x) and
|
||||
SchedProcFSNode._node_digits(x) != SchedProcFSNode._node_digits(node) and
|
||||
SchedProcFSNode._node_name(x) == SchedProcFSNode._node_name(node)
|
||||
for x in entries]))
|
||||
|
||||
@staticmethod
|
||||
def _build_directory(node_name, node_data):
|
||||
if node_name.startswith("domain"):
|
||||
return SchedDomain(node_data)
|
||||
else:
|
||||
return SchedProcFSNode(node_data)
|
||||
|
||||
@staticmethod
|
||||
def _build_entry(node_name, node_data):
|
||||
value = node_data
|
||||
|
||||
# Most nodes just contain numerical data, try to convert
|
||||
try:
|
||||
value = int(value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return value
|
||||
|
||||
@staticmethod
|
||||
def _build_node(node_name, node_data):
|
||||
if isinstance(node_data, dict):
|
||||
return SchedProcFSNode._build_directory(node_name, node_data)
|
||||
else:
|
||||
return SchedProcFSNode._build_entry(node_name, node_data)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return self._dyn_attrs[name]
|
||||
|
||||
def __init__(self, nodes):
|
||||
self.procfs = nodes
|
||||
# First, reduce the procs fields by packing them if possible
|
||||
# Find which entries can be packed into a common entry
|
||||
packables = {
|
||||
node : SchedProcFSNode._node_name(node) + "s"
|
||||
for node in list(nodes.keys()) if SchedProcFSNode._packable(node, list(nodes.keys()))
|
||||
}
|
||||
|
||||
self._dyn_attrs = {}
|
||||
|
||||
for dest in set(packables.values()):
|
||||
self._dyn_attrs[dest] = {}
|
||||
|
||||
# Pack common entries
|
||||
for key, dest in packables.items():
|
||||
i = SchedProcFSNode._node_digits(key)
|
||||
self._dyn_attrs[dest][i] = self._build_node(key, nodes[key])
|
||||
|
||||
# Build the other nodes
|
||||
for key in nodes.keys():
|
||||
if key in packables:
|
||||
continue
|
||||
|
||||
self._dyn_attrs[key] = self._build_node(key, nodes[key])
|
||||
|
||||
|
||||
class SchedDomain(SchedProcFSNode):
|
||||
"""
|
||||
Represents a sched domain as seen through procfs
|
||||
"""
|
||||
# Domain flags obtained from include/linux/sched/topology.h on v4.17
|
||||
# https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux/+/v4.17/include/linux/sched/topology.h#20
|
||||
SD_LOAD_BALANCE = 0x0001 # Do load balancing on this domain.
|
||||
SD_BALANCE_NEWIDLE = 0x0002 # Balance when about to become idle
|
||||
SD_BALANCE_EXEC = 0x0004 # Balance on exec
|
||||
SD_BALANCE_FORK = 0x0008 # Balance on fork, clone
|
||||
SD_BALANCE_WAKE = 0x0010 # Balance on wakeup
|
||||
SD_WAKE_AFFINE = 0x0020 # Wake task to waking CPU
|
||||
SD_ASYM_CPUCAPACITY = 0x0040 # Groups have different max cpu capacities
|
||||
SD_SHARE_CPUCAPACITY = 0x0080 # Domain members share cpu capacity
|
||||
SD_SHARE_POWERDOMAIN = 0x0100 # Domain members share power domain
|
||||
SD_SHARE_PKG_RESOURCES = 0x0200 # Domain members share cpu pkg resources
|
||||
SD_SERIALIZE = 0x0400 # Only a single load balancing instance
|
||||
SD_ASYM_PACKING = 0x0800 # Place busy groups earlier in the domain
|
||||
SD_PREFER_SIBLING = 0x1000 # Prefer to place tasks in a sibling domain
|
||||
SD_OVERLAP = 0x2000 # sched_domains of this level overlap
|
||||
SD_NUMA = 0x4000 # cross-node balancing
|
||||
# Only defined in Android
|
||||
# https://android.googlesource.com/kernel/common/+/android-4.14/include/linux/sched/topology.h#29
|
||||
SD_SHARE_CAP_STATES = 0x8000 # Domain members share capacity state
|
||||
|
||||
# Checked to be valid from v4.4
|
||||
SD_FLAGS_REF_PARTS = (4, 4, 0)
|
||||
|
||||
@staticmethod
|
||||
def check_version(target, logger):
|
||||
"""
|
||||
Check the target and see if its kernel version matches our view of the world
|
||||
"""
|
||||
parts = target.kernel_version.parts
|
||||
if parts < SchedDomain.SD_FLAGS_REF_PARTS:
|
||||
logger.warn(
|
||||
"Sched domain flags are defined for kernels v{} and up, "
|
||||
"but target is running v{}".format(SchedDomain.SD_FLAGS_REF_PARTS, parts)
|
||||
)
|
||||
|
||||
def has_flags(self, flags):
|
||||
"""
|
||||
:returns: Whether 'flags' are set on this sched domain
|
||||
"""
|
||||
return self.flags & flags == flags
|
||||
|
||||
|
||||
class SchedProcFSData(SchedProcFSNode):
|
||||
"""
|
||||
Root class for creating & storing SchedProcFSNode instances
|
||||
"""
|
||||
_read_depth = 6
|
||||
sched_domain_root = '/proc/sys/kernel/sched_domain'
|
||||
|
||||
@staticmethod
|
||||
def available(target):
|
||||
return target.directory_exists(SchedProcFSData.sched_domain_root)
|
||||
|
||||
def __init__(self, target, path=None):
|
||||
if not path:
|
||||
path = self.sched_domain_root
|
||||
|
||||
procfs = target.read_tree_values(path, depth=self._read_depth)
|
||||
super(SchedProcFSData, self).__init__(procfs)
|
||||
|
||||
|
||||
class SchedModule(Module):
|
||||
|
||||
name = 'sched'
|
||||
|
||||
cpu_sysfs_root = '/sys/devices/system/cpu'
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
logger = logging.getLogger(SchedModule.name)
|
||||
SchedDomain.check_version(target, logger)
|
||||
|
||||
return SchedProcFSData.available(target)
|
||||
|
||||
def get_cpu_sd_info(self, cpu):
|
||||
"""
|
||||
:returns: An object view of /proc/sys/kernel/sched_domain/cpu<cpu>/*
|
||||
"""
|
||||
path = self.target.path.join(
|
||||
SchedProcFSData.sched_domain_root,
|
||||
"cpu{}".format(cpu)
|
||||
)
|
||||
|
||||
return SchedProcFSData(self.target, path)
|
||||
|
||||
def get_sd_info(self):
|
||||
"""
|
||||
:returns: An object view of /proc/sys/kernel/sched_domain/*
|
||||
"""
|
||||
return SchedProcFSData(self.target)
|
||||
|
||||
def get_capacity(self, cpu):
|
||||
"""
|
||||
:returns: The capacity of 'cpu'
|
||||
"""
|
||||
return self.get_capacities()[cpu]
|
||||
|
||||
@memoized
|
||||
def has_em(self, cpu, sd=None):
|
||||
"""
|
||||
:returns: Whether energy model data is available for 'cpu'
|
||||
"""
|
||||
if not sd:
|
||||
sd = SchedProcFSData(self.target, cpu)
|
||||
|
||||
return sd.procfs["domain0"].get("group0", {}).get("energy", {}).get("cap_states") != None
|
||||
|
||||
@memoized
|
||||
def has_dmips_capacity(self, cpu):
|
||||
"""
|
||||
:returns: Whether dmips capacity data is available for 'cpu'
|
||||
"""
|
||||
return self.target.file_exists(
|
||||
self.target.path.join(self.cpu_sysfs_root, 'cpu{}/cpu_capacity'.format(cpu))
|
||||
)
|
||||
|
||||
@memoized
|
||||
def get_em_capacity(self, cpu, sd=None):
|
||||
"""
|
||||
:returns: The maximum capacity value exposed by the EAS energy model
|
||||
"""
|
||||
if not sd:
|
||||
sd = SchedProcFSData(self.target, cpu)
|
||||
|
||||
cap_states = sd.domains[0].groups[0].energy.cap_states
|
||||
return int(cap_states.split('\t')[-2])
|
||||
|
||||
@memoized
|
||||
def get_dmips_capacity(self, cpu):
|
||||
"""
|
||||
:returns: The capacity value generated from the capacity-dmips-mhz DT entry
|
||||
"""
|
||||
return self.target.read_value(
|
||||
self.target.path.join(
|
||||
self.cpu_sysfs_root,
|
||||
'cpu{}/cpu_capacity'.format(cpu)
|
||||
),
|
||||
int
|
||||
)
|
||||
|
||||
@memoized
|
||||
def get_capacities(self, default=None):
|
||||
"""
|
||||
:param default: Default capacity value to find if no data is
|
||||
found in procfs
|
||||
|
||||
:returns: a dictionnary of the shape {cpu : capacity}
|
||||
|
||||
:raises RuntimeError: Raised when no capacity information is
|
||||
found and 'default' is None
|
||||
"""
|
||||
cpus = list(range(self.target.number_of_cpus))
|
||||
|
||||
capacities = {}
|
||||
sd_info = self.get_sd_info()
|
||||
|
||||
for cpu in cpus:
|
||||
if self.has_em(cpu, sd_info.cpus[cpu]):
|
||||
capacities[cpu] = self.get_em_capacity(cpu, sd_info.cpus[cpu])
|
||||
elif self.has_dmips_capacity(cpu):
|
||||
capacities[cpu] = self.get_dmips_capacity(cpu)
|
||||
else:
|
||||
if default != None:
|
||||
capacities[cpu] = default
|
||||
else:
|
||||
raise RuntimeError('No capacity data for cpu{}'.format(cpu))
|
||||
|
||||
return capacities
|
||||
|
||||
@memoized
|
||||
def get_hz(self):
|
||||
"""
|
||||
:returns: The scheduler tick frequency on the target
|
||||
"""
|
||||
return int(self.target.config.get('CONFIG_HZ', strict=True))
|
104
devlib/module/thermal.py
Normal file
104
devlib/module/thermal.py
Normal file
@@ -0,0 +1,104 @@
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
|
||||
from devlib.module import Module
|
||||
|
||||
class TripPoint(object):
|
||||
def __init__(self, zone, _id):
|
||||
self._id = _id
|
||||
self.zone = zone
|
||||
self.temp_node = 'trip_point_' + _id + '_temp'
|
||||
self.type_node = 'trip_point_' + _id + '_type'
|
||||
|
||||
@property
|
||||
def target(self):
|
||||
return self.zone.target
|
||||
|
||||
def get_temperature(self):
|
||||
"""Returns the currently configured temperature of the trip point"""
|
||||
temp_file = self.target.path.join(self.zone.path, self.temp_node)
|
||||
return self.target.read_int(temp_file)
|
||||
|
||||
def set_temperature(self, temperature):
|
||||
temp_file = self.target.path.join(self.zone.path, self.temp_node)
|
||||
self.target.write_value(temp_file, temperature)
|
||||
|
||||
def get_type(self):
|
||||
"""Returns the type of trip point"""
|
||||
type_file = self.target.path.join(self.zone.path, self.type_node)
|
||||
return self.target.read_value(type_file)
|
||||
|
||||
class ThermalZone(object):
|
||||
def __init__(self, target, root, _id):
|
||||
self.target = target
|
||||
self.name = 'thermal_zone' + _id
|
||||
self.path = target.path.join(root, self.name)
|
||||
self.trip_points = {}
|
||||
|
||||
for entry in self.target.list_directory(self.path):
|
||||
re_match = re.match('^trip_point_([0-9]+)_temp', entry)
|
||||
if re_match is not None:
|
||||
self.add_trip_point(re_match.group(1))
|
||||
|
||||
def add_trip_point(self, _id):
|
||||
self.trip_points[int(_id)] = TripPoint(self, _id)
|
||||
|
||||
def is_enabled(self):
|
||||
"""Returns a boolean representing the 'mode' of the thermal zone"""
|
||||
value = self.target.read_value(self.target.path.join(self.path, 'mode'))
|
||||
return value == 'enabled'
|
||||
|
||||
def set_enabled(self, enabled=True):
|
||||
value = 'enabled' if enabled else 'disabled'
|
||||
self.target.write_value(self.target.path.join(self.path, 'mode'), value)
|
||||
|
||||
def get_temperature(self):
|
||||
"""Returns the temperature of the thermal zone"""
|
||||
temp_file = self.target.path.join(self.path, 'temp')
|
||||
return self.target.read_int(temp_file)
|
||||
|
||||
class ThermalModule(Module):
|
||||
name = 'thermal'
|
||||
thermal_root = '/sys/class/thermal'
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
|
||||
if target.file_exists(ThermalModule.thermal_root):
|
||||
return True
|
||||
|
||||
def __init__(self, target):
|
||||
super(ThermalModule, self).__init__(target)
|
||||
|
||||
self.zones = {}
|
||||
self.cdevs = []
|
||||
|
||||
for entry in target.list_directory(self.thermal_root):
|
||||
re_match = re.match('^(thermal_zone|cooling_device)([0-9]+)', entry)
|
||||
|
||||
if re_match.group(1) == 'thermal_zone':
|
||||
self.add_thermal_zone(re_match.group(2))
|
||||
elif re_match.group(1) == 'cooling_device':
|
||||
# TODO
|
||||
pass
|
||||
|
||||
def add_thermal_zone(self, _id):
|
||||
self.zones[int(_id)] = ThermalZone(self.target, self.thermal_root, _id)
|
||||
|
||||
def disable_all_zones(self):
|
||||
"""Disables all the thermal zones in the target"""
|
||||
for zone in self.zones.values():
|
||||
zone.set_enabled(False)
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,6 +17,7 @@ import os
|
||||
import time
|
||||
import tarfile
|
||||
import shutil
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
from devlib.module import HardRestModule, BootModule, FlashModule
|
||||
from devlib.exception import TargetError, HostError
|
||||
@@ -25,7 +26,8 @@ from devlib.utils.uefi import UefiMenu, UefiConfig
|
||||
from devlib.utils.uboot import UbootMenu
|
||||
|
||||
|
||||
AUTOSTART_MESSAGE = 'Press Enter to stop auto boot...'
|
||||
OLD_AUTOSTART_MESSAGE = 'Press Enter to stop auto boot...'
|
||||
AUTOSTART_MESSAGE = 'Hit any key to stop autoboot:'
|
||||
POWERUP_MESSAGE = 'Powering up system...'
|
||||
DEFAULT_MCC_PROMPT = 'Cmd>'
|
||||
|
||||
@@ -51,7 +53,7 @@ class VexpressDtrHardReset(HardRestModule):
|
||||
try:
|
||||
if self.target.is_connected:
|
||||
self.target.execute('sync')
|
||||
except TargetError:
|
||||
except (TargetError, CalledProcessError):
|
||||
pass
|
||||
with open_serial_connection(port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
@@ -136,18 +138,20 @@ class VexpressBootModule(BootModule):
|
||||
def get_through_early_boot(self, tty):
|
||||
self.logger.debug('Establishing initial state...')
|
||||
tty.sendline('')
|
||||
i = tty.expect([AUTOSTART_MESSAGE, POWERUP_MESSAGE, self.mcc_prompt])
|
||||
if i == 2:
|
||||
i = tty.expect([AUTOSTART_MESSAGE, OLD_AUTOSTART_MESSAGE, POWERUP_MESSAGE, self.mcc_prompt])
|
||||
if i == 3:
|
||||
self.logger.debug('Saw MCC prompt.')
|
||||
time.sleep(self.short_delay)
|
||||
tty.sendline('reboot')
|
||||
elif i == 1:
|
||||
elif i == 2:
|
||||
self.logger.debug('Saw powering up message (assuming soft reboot).')
|
||||
else:
|
||||
self.logger.debug('Saw auto boot message.')
|
||||
tty.sendline('')
|
||||
time.sleep(self.short_delay)
|
||||
# could be either depending on where in the boot we are
|
||||
tty.sendline('reboot')
|
||||
tty.sendline('reset')
|
||||
|
||||
def get_uefi_menu(self, tty):
|
||||
menu = UefiMenu(tty)
|
||||
@@ -247,7 +251,7 @@ class VexpressUBoot(VexpressBootModule):
|
||||
menu = UbootMenu(tty)
|
||||
self.logger.debug('Waiting for U-Boot prompt...')
|
||||
menu.open(timeout=120)
|
||||
for var, value in self.env.iteritems():
|
||||
for var, value in self.env.items():
|
||||
menu.setenv(var, value)
|
||||
menu.boot()
|
||||
|
||||
@@ -324,7 +328,7 @@ class VersatileExpressFlashModule(FlashModule):
|
||||
baudrate=self.target.platform.baudrate,
|
||||
timeout=self.timeout,
|
||||
init_dtr=0) as tty:
|
||||
i = tty.expect([self.mcc_prompt, AUTOSTART_MESSAGE])
|
||||
i = tty.expect([self.mcc_prompt, AUTOSTART_MESSAGE, OLD_AUTOSTART_MESSAGE])
|
||||
if i:
|
||||
tty.sendline('')
|
||||
wait_for_vemsd(self.vemsd_mount, tty, self.mcc_prompt, self.short_delay)
|
||||
@@ -334,7 +338,7 @@ class VersatileExpressFlashModule(FlashModule):
|
||||
if images:
|
||||
self._overlay_images(images)
|
||||
os.system('sync')
|
||||
except (IOError, OSError), e:
|
||||
except (IOError, OSError) as e:
|
||||
msg = 'Could not deploy images to {}; got: {}'
|
||||
raise TargetError(msg.format(self.vemsd_mount, e))
|
||||
self.target.boot()
|
||||
@@ -348,7 +352,7 @@ class VersatileExpressFlashModule(FlashModule):
|
||||
tar.extractall(self.vemsd_mount)
|
||||
|
||||
def _overlay_images(self, images):
|
||||
for dest, src in images.iteritems():
|
||||
for dest, src in images.items():
|
||||
dest = os.path.join(self.vemsd_mount, dest)
|
||||
self.logger.debug('Copying {} to {}'.format(src, dest))
|
||||
shutil.copy(src, dest)
|
||||
@@ -375,7 +379,7 @@ def wait_for_vemsd(vemsd_mount, tty, mcc_prompt=DEFAULT_MCC_PROMPT, short_delay=
|
||||
path = os.path.join(vemsd_mount, 'config.txt')
|
||||
if os.path.exists(path):
|
||||
return
|
||||
for _ in xrange(attempts):
|
||||
for _ in range(attempts):
|
||||
tty.sendline('') # clear any garbage
|
||||
tty.expect(mcc_prompt, timeout=short_delay)
|
||||
tty.sendline('usb_on')
|
||||
|
@@ -1,6 +1,24 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
BIG_CPUS = ['A15', 'A57', 'A72', 'A73']
|
||||
|
||||
|
||||
class Platform(object):
|
||||
|
||||
@property
|
||||
@@ -25,7 +43,6 @@ class Platform(object):
|
||||
self.logger = logging.getLogger(self.name)
|
||||
if not self.core_clusters and self.core_names:
|
||||
self._set_core_clusters_from_core_names()
|
||||
self._validate()
|
||||
|
||||
def init_target_connection(self, target):
|
||||
# May be ovewritten by subclasses to provide target-specific
|
||||
@@ -37,8 +54,7 @@ class Platform(object):
|
||||
self.core_names = target.cpuinfo.cpu_names
|
||||
self._set_core_clusters_from_core_names()
|
||||
if not self.big_core and self.number_of_clusters == 2:
|
||||
big_idx = self.core_clusters.index(max(self.core_clusters))
|
||||
self.big_core = self.core_names[big_idx]
|
||||
self.big_core = self._identify_big_core()
|
||||
if not self.core_clusters and self.core_names:
|
||||
self._set_core_clusters_from_core_names()
|
||||
if not self.model:
|
||||
@@ -47,6 +63,11 @@ class Platform(object):
|
||||
self.name = self.model
|
||||
self._validate()
|
||||
|
||||
def setup(self, target):
|
||||
# May be overwritten by subclasses to provide platform-specific
|
||||
# setup procedures.
|
||||
pass
|
||||
|
||||
def _set_core_clusters_from_core_names(self):
|
||||
self.core_clusters = []
|
||||
clusters = []
|
||||
@@ -65,6 +86,13 @@ class Platform(object):
|
||||
except Exception: # pylint: disable=broad-except
|
||||
pass # this is best-effort
|
||||
|
||||
def _identify_big_core(self):
|
||||
for core in self.core_names:
|
||||
if core.upper() in BIG_CPUS:
|
||||
return core
|
||||
big_idx = self.core_clusters.index(max(self.core_clusters))
|
||||
return self.core_names[big_idx]
|
||||
|
||||
def _validate(self):
|
||||
if len(self.core_names) != len(self.core_clusters):
|
||||
raise ValueError('core_names and core_clusters are of different lengths.')
|
||||
@@ -76,6 +104,7 @@ class Platform(object):
|
||||
raise ValueError(message.format(self.big_core,
|
||||
', '.join(set(self.core_names))))
|
||||
if self.big_core:
|
||||
little_idx = self.core_clusters.index(min(self.core_clusters))
|
||||
self.little_core = self.core_names[little_idx]
|
||||
|
||||
for core in self.core_names:
|
||||
if core != self.big_core:
|
||||
self.little_core = core
|
||||
break
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,15 +14,16 @@
|
||||
#
|
||||
from __future__ import division
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import csv
|
||||
import time
|
||||
import pexpect
|
||||
|
||||
from devlib.platform import Platform
|
||||
from devlib.instrument import Instrument, InstrumentChannel, MeasurementsCsv, CONTINUOUS
|
||||
from devlib.instrument import Instrument, InstrumentChannel, MeasurementsCsv, Measurement, CONTINUOUS, INSTANTANEOUS
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.utils.csvutil import csvreader, csvwriter
|
||||
from devlib.utils.serial_port import open_serial_connection
|
||||
|
||||
|
||||
@@ -33,6 +34,7 @@ class VersatileExpressPlatform(Platform):
|
||||
core_names=None,
|
||||
core_clusters=None,
|
||||
big_core=None,
|
||||
model=None,
|
||||
modules=None,
|
||||
|
||||
# serial settings
|
||||
@@ -61,6 +63,7 @@ class VersatileExpressPlatform(Platform):
|
||||
core_names,
|
||||
core_clusters,
|
||||
big_core,
|
||||
model,
|
||||
modules)
|
||||
self.serial_port = serial_port
|
||||
self.baudrate = baudrate
|
||||
@@ -86,6 +89,9 @@ class VersatileExpressPlatform(Platform):
|
||||
def _init_android_target(self, target):
|
||||
if target.connection_settings.get('device') is None:
|
||||
addr = self._get_target_ip_address(target)
|
||||
if sys.version_info[0] == 3:
|
||||
# Convert bytes to string for Python3 compatibility
|
||||
addr = addr.decode("utf-8")
|
||||
target.connection_settings['device'] = addr + ':5555'
|
||||
|
||||
def _init_linux_target(self, target):
|
||||
@@ -98,22 +104,26 @@ class VersatileExpressPlatform(Platform):
|
||||
baudrate=self.baudrate,
|
||||
timeout=30,
|
||||
init_dtr=0) as tty:
|
||||
tty.sendline('')
|
||||
tty.sendline('su') # this is, apprently, required to query network device
|
||||
# info by name on recent Juno builds...
|
||||
self.logger.debug('Waiting for the Android shell prompt.')
|
||||
tty.expect(target.shell_prompt)
|
||||
|
||||
self.logger.debug('Waiting for IP address...')
|
||||
wait_start_time = time.time()
|
||||
while True:
|
||||
tty.sendline('ip addr list eth0')
|
||||
time.sleep(1)
|
||||
try:
|
||||
tty.expect(r'inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
|
||||
return tty.match.group(1)
|
||||
except pexpect.TIMEOUT:
|
||||
pass # We have our own timeout -- see below.
|
||||
if (time.time() - wait_start_time) > self.ready_timeout:
|
||||
raise TargetError('Could not acquire IP address.')
|
||||
try:
|
||||
while True:
|
||||
tty.sendline('ip addr list eth0')
|
||||
time.sleep(1)
|
||||
try:
|
||||
tty.expect(r'inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
|
||||
return tty.match.group(1)
|
||||
except pexpect.TIMEOUT:
|
||||
pass # We have our own timeout -- see below.
|
||||
if (time.time() - wait_start_time) > self.ready_timeout:
|
||||
raise TargetError('Could not acquire IP address.')
|
||||
finally:
|
||||
tty.sendline('exit') # exit shell created by "su" call at the start
|
||||
|
||||
def _set_hard_reset_method(self, hard_reset_method):
|
||||
if hard_reset_method == 'dtr':
|
||||
@@ -145,9 +155,12 @@ class VersatileExpressPlatform(Platform):
|
||||
'bootargs': self.bootargs,
|
||||
}})
|
||||
elif self.bootloader == 'u-boot':
|
||||
uboot_env = None
|
||||
if self.bootargs:
|
||||
uboot_env = {'bootargs': self.bootargs}
|
||||
self.modules.append({'vexpress-u-boot': {'port': self.serial_port,
|
||||
'baudrate': self.baudrate,
|
||||
'env': {'bootargs': self.bootargs},
|
||||
'env': uboot_env,
|
||||
}})
|
||||
elif self.bootloader == 'bootmon':
|
||||
self.modules.append({'vexpress-bootmon': {'port': self.serial_port,
|
||||
@@ -204,25 +217,25 @@ class TC2(VersatileExpressPlatform):
|
||||
class JunoEnergyInstrument(Instrument):
|
||||
|
||||
binname = 'readenergy'
|
||||
mode = CONTINUOUS
|
||||
mode = CONTINUOUS | INSTANTANEOUS
|
||||
|
||||
_channels = [
|
||||
InstrumentChannel('sys_curr', 'sys', 'current'),
|
||||
InstrumentChannel('a57_curr', 'a57', 'current'),
|
||||
InstrumentChannel('a53_curr', 'a53', 'current'),
|
||||
InstrumentChannel('gpu_curr', 'gpu', 'current'),
|
||||
InstrumentChannel('sys_volt', 'sys', 'voltage'),
|
||||
InstrumentChannel('a57_volt', 'a57', 'voltage'),
|
||||
InstrumentChannel('a53_volt', 'a53', 'voltage'),
|
||||
InstrumentChannel('gpu_volt', 'gpu', 'voltage'),
|
||||
InstrumentChannel('sys_pow', 'sys', 'power'),
|
||||
InstrumentChannel('a57_pow', 'a57', 'power'),
|
||||
InstrumentChannel('a53_pow', 'a53', 'power'),
|
||||
InstrumentChannel('gpu_pow', 'gpu', 'power'),
|
||||
InstrumentChannel('sys_cenr', 'sys', 'energy'),
|
||||
InstrumentChannel('a57_cenr', 'a57', 'energy'),
|
||||
InstrumentChannel('a53_cenr', 'a53', 'energy'),
|
||||
InstrumentChannel('gpu_cenr', 'gpu', 'energy'),
|
||||
InstrumentChannel('sys', 'current'),
|
||||
InstrumentChannel('a57', 'current'),
|
||||
InstrumentChannel('a53', 'current'),
|
||||
InstrumentChannel('gpu', 'current'),
|
||||
InstrumentChannel('sys', 'voltage'),
|
||||
InstrumentChannel('a57', 'voltage'),
|
||||
InstrumentChannel('a53', 'voltage'),
|
||||
InstrumentChannel('gpu', 'voltage'),
|
||||
InstrumentChannel('sys', 'power'),
|
||||
InstrumentChannel('a57', 'power'),
|
||||
InstrumentChannel('a53', 'power'),
|
||||
InstrumentChannel('gpu', 'power'),
|
||||
InstrumentChannel('sys', 'energy'),
|
||||
InstrumentChannel('a57', 'energy'),
|
||||
InstrumentChannel('a53', 'energy'),
|
||||
InstrumentChannel('gpu', 'energy'),
|
||||
]
|
||||
|
||||
def __init__(self, target):
|
||||
@@ -233,14 +246,18 @@ class JunoEnergyInstrument(Instrument):
|
||||
for chan in self._channels:
|
||||
self.channels[chan.name] = chan
|
||||
self.on_target_file = self.target.tempfile('energy', '.csv')
|
||||
self.sample_rate_hz = 10 # DEFAULT_PERIOD is 100[ms] in readenergy.c
|
||||
self.command = '{} -o {}'.format(self.binary, self.on_target_file)
|
||||
self.command2 = '{}'.format(self.binary)
|
||||
|
||||
def setup(self):
|
||||
self.binary = self.target.install(os.path.join(PACKAGE_BIN_DIRECTORY,
|
||||
self.target.abi, self.binname))
|
||||
self.command = '{} -o {}'.format(self.binary, self.on_target_file)
|
||||
self.command2 = '{}'.format(self.binary)
|
||||
|
||||
def reset(self, sites=None, kinds=None):
|
||||
super(JunoEnergyInstrument, self).reset(sites, kinds)
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(JunoEnergyInstrument, self).reset(sites, kinds, channels)
|
||||
self.target.killall(self.binname, as_root=True)
|
||||
|
||||
def start(self):
|
||||
@@ -254,9 +271,8 @@ class JunoEnergyInstrument(Instrument):
|
||||
self.target.pull(self.on_target_file, temp_file)
|
||||
self.target.remove(self.on_target_file)
|
||||
|
||||
with open(temp_file, 'rb') as fh:
|
||||
reader = csv.reader(fh)
|
||||
headings = reader.next()
|
||||
with csvreader(temp_file) as reader:
|
||||
headings = next(reader)
|
||||
|
||||
# Figure out which columns from the collected csv we actually want
|
||||
select_columns = []
|
||||
@@ -266,15 +282,24 @@ class JunoEnergyInstrument(Instrument):
|
||||
except ValueError:
|
||||
raise HostError('Channel "{}" is not in {}'.format(chan.name, temp_file))
|
||||
|
||||
with open(output_file, 'wb') as wfh:
|
||||
with csvwriter(output_file) as writer:
|
||||
write_headings = ['{}_{}'.format(c.site, c.kind)
|
||||
for c in self.active_channels]
|
||||
writer = csv.writer(wfh)
|
||||
writer.writerow(write_headings)
|
||||
for row in reader:
|
||||
write_row = [row[c] for c in select_columns]
|
||||
writer.writerow(write_row)
|
||||
|
||||
return MeasurementsCsv(output_file, self.active_channels)
|
||||
return MeasurementsCsv(output_file, self.active_channels, sample_rate_hz=10)
|
||||
|
||||
def take_measurement(self):
|
||||
result = []
|
||||
output = self.target.execute(self.command2).split()
|
||||
with csvreader(output) as reader:
|
||||
headings=next(reader)
|
||||
values = next(reader)
|
||||
for chan in self.active_channels:
|
||||
value = values[headings.index(chan.name)]
|
||||
result.append(Measurement(value, chan))
|
||||
return result
|
||||
|
||||
|
299
devlib/platform/gem5.py
Normal file
299
devlib/platform/gem5.py
Normal file
@@ -0,0 +1,299 @@
|
||||
# Copyright 2016-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import shutil
|
||||
import time
|
||||
import types
|
||||
|
||||
from devlib.exception import TargetError
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.platform import Platform
|
||||
from devlib.utils.ssh import AndroidGem5Connection, LinuxGem5Connection
|
||||
|
||||
class Gem5SimulationPlatform(Platform):
|
||||
|
||||
def __init__(self, name,
|
||||
host_output_dir,
|
||||
gem5_bin,
|
||||
gem5_args,
|
||||
gem5_virtio,
|
||||
core_names=None,
|
||||
core_clusters=None,
|
||||
big_core=None,
|
||||
model=None,
|
||||
modules=None,
|
||||
gem5_telnet_port=None):
|
||||
|
||||
# First call the parent class
|
||||
super(Gem5SimulationPlatform, self).__init__(name, core_names, core_clusters,
|
||||
big_core, model, modules)
|
||||
|
||||
# Start setting up the gem5 parameters/directories
|
||||
# The gem5 subprocess
|
||||
self.gem5 = None
|
||||
self.gem5_port = gem5_telnet_port or None
|
||||
self.stats_directory = host_output_dir
|
||||
self.gem5_out_dir = os.path.join(self.stats_directory, "gem5")
|
||||
self.gem5_interact_dir = '/tmp' # Host directory
|
||||
self.executable_dir = None # Device directory
|
||||
self.working_dir = None # Device directory
|
||||
self.stdout_file = None
|
||||
self.stderr_file = None
|
||||
self.stderr_filename = None
|
||||
if self.gem5_port is None:
|
||||
# Allows devlib to pick up already running simulations
|
||||
self.start_gem5_simulation = True
|
||||
else:
|
||||
self.start_gem5_simulation = False
|
||||
|
||||
# Find the first one that does not exist. Ensures that we do not re-use
|
||||
# the directory used by someone else.
|
||||
i = 0
|
||||
directory = os.path.join(self.gem5_interact_dir, "wa_{}".format(i))
|
||||
while os.path.exists(directory):
|
||||
i += 1
|
||||
directory = os.path.join(self.gem5_interact_dir, "wa_{}".format(i))
|
||||
|
||||
self.gem5_interact_dir = directory
|
||||
self.logger.debug("Using {} as the temporary directory."
|
||||
.format(self.gem5_interact_dir))
|
||||
|
||||
# Parameters passed onto gem5
|
||||
self.gem5args_binary = gem5_bin
|
||||
self.gem5args_args = gem5_args
|
||||
self.gem5args_virtio = gem5_virtio
|
||||
self._check_gem5_command()
|
||||
|
||||
# Start the interaction with gem5
|
||||
self._start_interaction_gem5()
|
||||
|
||||
def _check_gem5_command(self):
|
||||
"""
|
||||
Check if the command to start gem5 makes sense
|
||||
"""
|
||||
if self.gem5args_binary is None:
|
||||
raise TargetError('Please specify a gem5 binary.')
|
||||
if self.gem5args_args is None:
|
||||
raise TargetError('Please specify the arguments passed on to gem5.')
|
||||
self.gem5args_virtio = str(self.gem5args_virtio).format(self.gem5_interact_dir)
|
||||
if self.gem5args_virtio is None:
|
||||
raise TargetError('Please specify arguments needed for virtIO.')
|
||||
|
||||
def _start_interaction_gem5(self):
|
||||
"""
|
||||
Starts the interaction of devlib with gem5.
|
||||
"""
|
||||
|
||||
# First create the input and output directories for gem5
|
||||
if self.start_gem5_simulation:
|
||||
# Create the directory to send data to/from gem5 system
|
||||
self.logger.info("Creating temporary directory for interaction "
|
||||
" with gem5 via virtIO: {}"
|
||||
.format(self.gem5_interact_dir))
|
||||
os.mkdir(self.gem5_interact_dir)
|
||||
|
||||
# Create the directory for gem5 output (stats files etc)
|
||||
if not os.path.exists(self.stats_directory):
|
||||
os.mkdir(self.stats_directory)
|
||||
if os.path.exists(self.gem5_out_dir):
|
||||
raise TargetError("The gem5 stats directory {} already "
|
||||
"exists.".format(self.gem5_out_dir))
|
||||
else:
|
||||
os.mkdir(self.gem5_out_dir)
|
||||
|
||||
# We need to redirect the standard output and standard error for the
|
||||
# gem5 process to a file so that we can debug when things go wrong.
|
||||
f = os.path.join(self.gem5_out_dir, 'stdout')
|
||||
self.stdout_file = open(f, 'w')
|
||||
f = os.path.join(self.gem5_out_dir, 'stderr')
|
||||
self.stderr_file = open(f, 'w')
|
||||
# We need to keep this so we can check which port to use for the
|
||||
# telnet connection.
|
||||
self.stderr_filename = f
|
||||
|
||||
# Start gem5 simulation
|
||||
self.logger.info("Starting the gem5 simulator")
|
||||
|
||||
command_line = "{} --outdir={} {} {}".format(self.gem5args_binary,
|
||||
self.gem5_out_dir,
|
||||
self.gem5args_args,
|
||||
self.gem5args_virtio)
|
||||
self.logger.debug("gem5 command line: {}".format(command_line))
|
||||
self.gem5 = subprocess.Popen(command_line.split(),
|
||||
stdout=self.stdout_file,
|
||||
stderr=self.stderr_file)
|
||||
|
||||
else:
|
||||
# The simulation should already be running
|
||||
# Need to dig up the (1) gem5 simulation in question (2) its input
|
||||
# and output directories (3) virtio setting
|
||||
self._intercept_existing_gem5()
|
||||
|
||||
# As the gem5 simulation is running now or was already running
|
||||
# we now need to find out which telnet port it uses
|
||||
self._intercept_telnet_port()
|
||||
|
||||
def _intercept_existing_gem5(self):
|
||||
"""
|
||||
Intercept the information about a running gem5 simulation
|
||||
e.g. pid, input directory etc
|
||||
"""
|
||||
self.logger("This functionality is not yet implemented")
|
||||
raise TargetError()
|
||||
|
||||
def _intercept_telnet_port(self):
|
||||
"""
|
||||
Intercept the telnet port of a running gem5 simulation
|
||||
"""
|
||||
|
||||
if self.gem5 is None:
|
||||
raise TargetError('The platform has no gem5 simulation! '
|
||||
'Something went wrong')
|
||||
while self.gem5_port is None:
|
||||
# Check that gem5 is running!
|
||||
if self.gem5.poll():
|
||||
message = "The gem5 process has crashed with error code {}!\n\tPlease see {} for details."
|
||||
raise TargetError(message.format(self.gem5.poll(), self.stderr_file.name))
|
||||
|
||||
# Open the stderr file
|
||||
with open(self.stderr_filename, 'r') as f:
|
||||
for line in f:
|
||||
# Look for two different strings, exact wording depends on
|
||||
# version of gem5
|
||||
m = re.search(r"Listening for system connection on port (?P<port>\d+)", line)
|
||||
if not m:
|
||||
m = re.search(r"Listening for connections on port (?P<port>\d+)", line)
|
||||
if m:
|
||||
port = int(m.group('port'))
|
||||
if port >= 3456 and port < 5900:
|
||||
self.gem5_port = port
|
||||
break
|
||||
# Check if the sockets are not disabled
|
||||
m = re.search(r"Sockets disabled, not accepting terminal connections", line)
|
||||
if m:
|
||||
raise TargetError("The sockets have been disabled!"
|
||||
"Pass --listener-mode=on to gem5")
|
||||
else:
|
||||
time.sleep(1)
|
||||
|
||||
def init_target_connection(self, target):
|
||||
"""
|
||||
Update the type of connection in the target from here
|
||||
"""
|
||||
if target.os == 'linux':
|
||||
target.conn_cls = LinuxGem5Connection
|
||||
else:
|
||||
target.conn_cls = AndroidGem5Connection
|
||||
|
||||
def setup(self, target):
|
||||
"""
|
||||
Deploy m5 if not yet installed
|
||||
"""
|
||||
m5_path = self._deploy_m5(target)
|
||||
target.conn.m5_path = m5_path
|
||||
|
||||
# Set the terminal settings for the connection to gem5
|
||||
self._resize_shell(target)
|
||||
|
||||
def update_from_target(self, target):
|
||||
"""
|
||||
Set the m5 path and if not yet installed, deploy m5
|
||||
Overwrite certain methods in the target that either can be done
|
||||
more efficiently by gem5 or don't exist in gem5
|
||||
"""
|
||||
m5_path = target.get_installed('m5')
|
||||
if m5_path is None:
|
||||
m5_path = self._deploy_m5(target)
|
||||
target.conn.m5_path = m5_path
|
||||
|
||||
# Overwrite the following methods (monkey-patching)
|
||||
self.logger.debug("Overwriting the 'capture_screen' method in target")
|
||||
# Housekeeping to prevent recursion
|
||||
setattr(target, 'target_impl_capture_screen', target.capture_screen)
|
||||
target.capture_screen = types.MethodType(_overwritten_capture_screen, target)
|
||||
self.logger.debug("Overwriting the 'reset' method in target")
|
||||
target.reset = types.MethodType(_overwritten_reset, target)
|
||||
self.logger.debug("Overwriting the 'reboot' method in target")
|
||||
target.reboot = types.MethodType(_overwritten_reboot, target)
|
||||
|
||||
# Call the general update_from_target implementation
|
||||
super(Gem5SimulationPlatform, self).update_from_target(target)
|
||||
|
||||
def gem5_capture_screen(self, filepath):
|
||||
file_list = os.listdir(self.gem5_out_dir)
|
||||
screen_caps = []
|
||||
for f in file_list:
|
||||
if '.bmp' in f:
|
||||
screen_caps.append(f)
|
||||
|
||||
if '{ts}' in filepath:
|
||||
cmd = '{} date -u -Iseconds'
|
||||
ts = self.target.execute(cmd.format(self.target.busybox)).strip()
|
||||
filepath = filepath.format(ts=ts)
|
||||
|
||||
successful_capture = False
|
||||
if len(screen_caps) == 1:
|
||||
# Bail out if we do not have image, and resort to the slower, built
|
||||
# in method.
|
||||
try:
|
||||
import Image
|
||||
gem5_image = os.path.join(self.gem5_out_dir, screen_caps[0])
|
||||
temp_image = os.path.join(self.gem5_out_dir, "file.png")
|
||||
im = Image.open(gem5_image)
|
||||
im.save(temp_image, "PNG")
|
||||
shutil.copy(temp_image, filepath)
|
||||
os.remove(temp_image)
|
||||
gem5_logger.info("capture_screen: using gem5 screencap")
|
||||
successful_capture = True
|
||||
|
||||
except (shutil.Error, ImportError, IOError):
|
||||
pass
|
||||
|
||||
return successful_capture
|
||||
|
||||
def _deploy_m5(self, target):
|
||||
# m5 is not yet installed so install it
|
||||
host_executable = os.path.join(PACKAGE_BIN_DIRECTORY,
|
||||
target.abi, 'm5')
|
||||
return target.install(host_executable)
|
||||
|
||||
def _resize_shell(self, target):
|
||||
"""
|
||||
Resize the shell to avoid line wrapping issues.
|
||||
|
||||
"""
|
||||
# Try and avoid line wrapping as much as possible.
|
||||
target.execute('{} stty columns 1024'.format(target.busybox))
|
||||
target.execute('reset', check_exit_code=False)
|
||||
|
||||
# Methods that will be monkey-patched onto the target
|
||||
def _overwritten_reset(self):
|
||||
raise TargetError('Resetting is not allowed on gem5 platforms!')
|
||||
|
||||
def _overwritten_reboot(self):
|
||||
raise TargetError('Rebooting is not allowed on gem5 platforms!')
|
||||
|
||||
def _overwritten_capture_screen(self, filepath):
|
||||
connection_screencapped = self.platform.gem5_capture_screen(filepath)
|
||||
if connection_screencapped == False:
|
||||
# The connection was not able to capture the screen so use the target
|
||||
# implementation
|
||||
self.logger.debug('{} was not able to screen cap, using the original target implementation'.format(self.platform.__class__.__name__))
|
||||
self.target_impl_capture_screen(filepath)
|
||||
|
||||
|
1222
devlib/target.py
1222
devlib/target.py
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,18 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,8 +15,11 @@
|
||||
|
||||
from __future__ import division
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
@@ -27,6 +30,7 @@ from devlib.utils.misc import check_output, which
|
||||
TRACE_MARKER_START = 'TRACE_MARKER_START'
|
||||
TRACE_MARKER_STOP = 'TRACE_MARKER_STOP'
|
||||
OUTPUT_TRACE_FILE = 'trace.dat'
|
||||
OUTPUT_PROFILE_FILE = 'trace_stat.dat'
|
||||
DEFAULT_EVENTS = [
|
||||
'cpu_frequency',
|
||||
'cpu_idle',
|
||||
@@ -40,43 +44,61 @@ DEFAULT_EVENTS = [
|
||||
]
|
||||
TIMEOUT = 180
|
||||
|
||||
# Regexps for parsing of function profiling data
|
||||
CPU_RE = re.compile(r' Function \(CPU([0-9]+)\)')
|
||||
STATS_RE = re.compile(r'([^ ]*) +([0-9]+) +([0-9.]+) us +([0-9.]+) us +([0-9.]+) us')
|
||||
|
||||
class FtraceCollector(TraceCollector):
|
||||
|
||||
def __init__(self, target,
|
||||
events=None,
|
||||
functions=None,
|
||||
buffer_size=None,
|
||||
buffer_size_step=1000,
|
||||
buffer_size_file='/sys/kernel/debug/tracing/buffer_size_kb',
|
||||
marker_file='/sys/kernel/debug/tracing/trace_marker',
|
||||
tracing_path='/sys/kernel/debug/tracing',
|
||||
automark=True,
|
||||
autoreport=True,
|
||||
autoview=False,
|
||||
no_install=False,
|
||||
strict=False,
|
||||
report_on_target=False,
|
||||
):
|
||||
super(FtraceCollector, self).__init__(target)
|
||||
self.events = events if events is not None else DEFAULT_EVENTS
|
||||
self.functions = functions
|
||||
self.buffer_size = buffer_size
|
||||
self.buffer_size_step = buffer_size_step
|
||||
self.buffer_size_file = buffer_size_file
|
||||
self.marker_file = marker_file
|
||||
self.tracing_path = tracing_path
|
||||
self.automark = automark
|
||||
self.autoreport = autoreport
|
||||
self.autoview = autoview
|
||||
self.target_output_file = os.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
|
||||
self.report_on_target = report_on_target
|
||||
self.target_output_file = target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
|
||||
text_file_name = target.path.splitext(OUTPUT_TRACE_FILE)[0] + '.txt'
|
||||
self.target_text_file = target.path.join(self.target.working_directory, text_file_name)
|
||||
self.target_binary = None
|
||||
self.host_binary = None
|
||||
self.start_time = None
|
||||
self.stop_time = None
|
||||
self.event_string = _build_trace_events(self.events)
|
||||
self.event_string = None
|
||||
self.function_string = None
|
||||
self._reset_needed = True
|
||||
|
||||
# Setup tracing paths
|
||||
self.available_events_file = self.target.path.join(self.tracing_path, 'available_events')
|
||||
self.available_functions_file = self.target.path.join(self.tracing_path, 'available_filter_functions')
|
||||
self.buffer_size_file = self.target.path.join(self.tracing_path, 'buffer_size_kb')
|
||||
self.current_tracer_file = self.target.path.join(self.tracing_path, 'current_tracer')
|
||||
self.function_profile_file = self.target.path.join(self.tracing_path, 'function_profile_enabled')
|
||||
self.marker_file = self.target.path.join(self.tracing_path, 'trace_marker')
|
||||
self.ftrace_filter_file = self.target.path.join(self.tracing_path, 'set_ftrace_filter')
|
||||
|
||||
self.host_binary = which('trace-cmd')
|
||||
self.kernelshark = which('kernelshark')
|
||||
|
||||
if not self.target.is_rooted:
|
||||
raise TargetError('trace-cmd instrument cannot be used on an unrooted device.')
|
||||
if self.autoreport and self.host_binary is None:
|
||||
if self.autoreport and not self.report_on_target and self.host_binary is None:
|
||||
raise HostError('trace-cmd binary must be installed on the host if autoreport=True.')
|
||||
if self.autoview and self.kernelshark is None:
|
||||
raise HostError('kernelshark binary must be installed on the host if autoview=True.')
|
||||
@@ -88,53 +110,165 @@ class FtraceCollector(TraceCollector):
|
||||
raise TargetError('No trace-cmd found on device and no_install=True is specified.')
|
||||
self.target_binary = 'trace-cmd'
|
||||
|
||||
# Validate required events to be traced
|
||||
available_events = self.target.execute(
|
||||
'cat {}'.format(self.available_events_file),
|
||||
as_root=True).splitlines()
|
||||
selected_events = []
|
||||
for event in self.events:
|
||||
# Convert globs supported by FTrace into valid regexp globs
|
||||
_event = event
|
||||
if event[0] != '*':
|
||||
_event = '*' + event
|
||||
event_re = re.compile(_event.replace('*', '.*'))
|
||||
# Select events matching the required ones
|
||||
if len(list(filter(event_re.match, available_events))) == 0:
|
||||
message = 'Event [{}] not available for tracing'.format(event)
|
||||
if strict:
|
||||
raise TargetError(message)
|
||||
self.target.logger.warning(message)
|
||||
else:
|
||||
selected_events.append(event)
|
||||
# If function profiling is enabled we always need at least one event.
|
||||
# Thus, if not other events have been specified, try to add at least
|
||||
# a tracepoint which is always available and possibly triggered few
|
||||
# times.
|
||||
if self.functions and len(selected_events) == 0:
|
||||
selected_events = ['sched_wakeup_new']
|
||||
self.event_string = _build_trace_events(selected_events)
|
||||
|
||||
# Check for function tracing support
|
||||
if self.functions:
|
||||
if not self.target.file_exists(self.function_profile_file):
|
||||
raise TargetError('Function profiling not supported. '\
|
||||
'A kernel build with CONFIG_FUNCTION_PROFILER enable is required')
|
||||
# Validate required functions to be traced
|
||||
available_functions = self.target.execute(
|
||||
'cat {}'.format(self.available_functions_file),
|
||||
as_root=True).splitlines()
|
||||
selected_functions = []
|
||||
for function in self.functions:
|
||||
if function not in available_functions:
|
||||
message = 'Function [{}] not available for profiling'.format(function)
|
||||
if strict:
|
||||
raise TargetError(message)
|
||||
self.target.logger.warning(message)
|
||||
else:
|
||||
selected_functions.append(function)
|
||||
self.function_string = _build_trace_functions(selected_functions)
|
||||
|
||||
def reset(self):
|
||||
if self.buffer_size:
|
||||
self._set_buffer_size()
|
||||
self.target.execute('{} reset'.format(self.target_binary), as_root=True, timeout=TIMEOUT)
|
||||
self.target.execute('{} reset'.format(self.target_binary),
|
||||
as_root=True, timeout=TIMEOUT)
|
||||
self._reset_needed = False
|
||||
|
||||
def start(self):
|
||||
self.start_time = time.time()
|
||||
if self._reset_needed:
|
||||
self.reset()
|
||||
self.target.execute('{} start {}'.format(self.target_binary, self.event_string),
|
||||
as_root=True)
|
||||
if self.automark:
|
||||
self.mark_start()
|
||||
self.target.execute('{} start {}'.format(self.target_binary, self.event_string), as_root=True)
|
||||
if 'cpufreq' in self.target.modules:
|
||||
self.logger.debug('Trace CPUFreq frequencies')
|
||||
self.target.cpufreq.trace_frequencies()
|
||||
if 'cpuidle' in self.target.modules:
|
||||
self.logger.debug('Trace CPUIdle states')
|
||||
self.target.cpuidle.perturb_cpus()
|
||||
# Enable kernel function profiling
|
||||
if self.functions:
|
||||
self.target.execute('echo nop > {}'.format(self.current_tracer_file),
|
||||
as_root=True)
|
||||
self.target.execute('echo 0 > {}'.format(self.function_profile_file),
|
||||
as_root=True)
|
||||
self.target.execute('echo {} > {}'.format(self.function_string, self.ftrace_filter_file),
|
||||
as_root=True)
|
||||
self.target.execute('echo 1 > {}'.format(self.function_profile_file),
|
||||
as_root=True)
|
||||
|
||||
|
||||
def stop(self):
|
||||
# Disable kernel function profiling
|
||||
if self.functions:
|
||||
self.target.execute('echo 1 > {}'.format(self.function_profile_file),
|
||||
as_root=True)
|
||||
if 'cpufreq' in self.target.modules:
|
||||
self.logger.debug('Trace CPUFreq frequencies')
|
||||
self.target.cpufreq.trace_frequencies()
|
||||
self.stop_time = time.time()
|
||||
if self.automark:
|
||||
self.mark_stop()
|
||||
self.target.execute('{} stop'.format(self.target_binary), timeout=TIMEOUT, as_root=True)
|
||||
self.target.execute('{} stop'.format(self.target_binary),
|
||||
timeout=TIMEOUT, as_root=True)
|
||||
self._reset_needed = True
|
||||
|
||||
def get_trace(self, outfile):
|
||||
if os.path.isdir(outfile):
|
||||
outfile = os.path.join(outfile, os.path.dirname(self.target_output_file))
|
||||
self.target.execute('{} extract -o {}'.format(self.target_binary, self.target_output_file),
|
||||
outfile = os.path.join(outfile, os.path.basename(self.target_output_file))
|
||||
self.target.execute('{0} extract -o {1}; chmod 666 {1}'.format(self.target_binary,
|
||||
self.target_output_file),
|
||||
timeout=TIMEOUT, as_root=True)
|
||||
|
||||
# The size of trace.dat will depend on how long trace-cmd was running.
|
||||
# Therefore timout for the pull command must also be adjusted
|
||||
# accordingly.
|
||||
pull_timeout = self.stop_time - self.start_time
|
||||
pull_timeout = 10 * (self.stop_time - self.start_time)
|
||||
self.target.pull(self.target_output_file, outfile, timeout=pull_timeout)
|
||||
if not os.path.isfile(outfile):
|
||||
self.logger.warning('Binary trace not pulled from device.')
|
||||
else:
|
||||
if self.autoreport:
|
||||
textfile = os.path.splitext(outfile)[0] + '.txt'
|
||||
self.report(outfile, textfile)
|
||||
if self.report_on_target:
|
||||
self.generate_report_on_target()
|
||||
self.target.pull(self.target_text_file,
|
||||
textfile, timeout=pull_timeout)
|
||||
else:
|
||||
self.report(outfile, textfile)
|
||||
if self.autoview:
|
||||
self.view(outfile)
|
||||
|
||||
def get_stats(self, outfile):
|
||||
if not self.functions:
|
||||
return
|
||||
|
||||
if os.path.isdir(outfile):
|
||||
outfile = os.path.join(outfile, OUTPUT_PROFILE_FILE)
|
||||
output = self.target._execute_util('ftrace_get_function_stats',
|
||||
as_root=True)
|
||||
|
||||
function_stats = {}
|
||||
for line in output.splitlines():
|
||||
# Match a new CPU dataset
|
||||
match = CPU_RE.search(line)
|
||||
if match:
|
||||
cpu_id = int(match.group(1))
|
||||
function_stats[cpu_id] = {}
|
||||
self.logger.debug("Processing stats for CPU%d...", cpu_id)
|
||||
continue
|
||||
# Match a new function dataset
|
||||
match = STATS_RE.search(line)
|
||||
if match:
|
||||
fname = match.group(1)
|
||||
function_stats[cpu_id][fname] = {
|
||||
'hits' : int(match.group(2)),
|
||||
'time' : float(match.group(3)),
|
||||
'avg' : float(match.group(4)),
|
||||
's_2' : float(match.group(5)),
|
||||
}
|
||||
self.logger.debug(" %s: %s",
|
||||
fname, function_stats[cpu_id][fname])
|
||||
|
||||
self.logger.debug("FTrace stats output [%s]...", outfile)
|
||||
with open(outfile, 'w') as fh:
|
||||
json.dump(function_stats, fh, indent=4)
|
||||
self.logger.debug("FTrace function stats save in [%s]", outfile)
|
||||
|
||||
return function_stats
|
||||
|
||||
def report(self, binfile, destfile):
|
||||
# To get the output of trace.dat, trace-cmd must be installed
|
||||
# This is done host-side because the generated file is very large
|
||||
@@ -143,6 +277,8 @@ class FtraceCollector(TraceCollector):
|
||||
self.logger.debug(command)
|
||||
process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
|
||||
_, error = process.communicate()
|
||||
if sys.version_info[0] == 3:
|
||||
error = error.decode(sys.stdout.encoding, 'replace')
|
||||
if process.returncode:
|
||||
raise TargetError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
|
||||
if error:
|
||||
@@ -163,6 +299,12 @@ class FtraceCollector(TraceCollector):
|
||||
except OSError:
|
||||
raise HostError('Could not find trace-cmd. Please make sure it is installed and is in PATH.')
|
||||
|
||||
def generate_report_on_target(self):
|
||||
command = '{} report {} > {}'.format(self.target_binary,
|
||||
self.target_output_file,
|
||||
self.target_text_file)
|
||||
self.target.execute(command, timeout=TIMEOUT)
|
||||
|
||||
def view(self, binfile):
|
||||
check_output('{} {}'.format(self.kernelshark, binfile), shell=True)
|
||||
|
||||
@@ -203,3 +345,6 @@ def _build_trace_events(events):
|
||||
event_string = ' '.join(['-e {}'.format(e) for e in events])
|
||||
return event_string
|
||||
|
||||
def _build_trace_functions(functions):
|
||||
function_string = " ".join(functions)
|
||||
return function_string
|
||||
|
73
devlib/trace/logcat.py
Normal file
73
devlib/trace/logcat.py
Normal file
@@ -0,0 +1,73 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.utils.android import LogcatMonitor
|
||||
|
||||
class LogcatCollector(TraceCollector):
|
||||
|
||||
def __init__(self, target, regexps=None):
|
||||
super(LogcatCollector, self).__init__(target)
|
||||
self.regexps = regexps
|
||||
self._collecting = False
|
||||
self._prev_log = None
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Clear Collector data but do not interrupt collection
|
||||
"""
|
||||
if not self._monitor:
|
||||
return
|
||||
|
||||
if self._collecting:
|
||||
self._monitor.clear_log()
|
||||
elif self._prev_log:
|
||||
os.remove(self._prev_log)
|
||||
self._prev_log = None
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start collecting logcat lines
|
||||
"""
|
||||
self._monitor = LogcatMonitor(self.target, self.regexps)
|
||||
if self._prev_log:
|
||||
# Append new data collection to previous collection
|
||||
self._monitor.start(self._prev_log)
|
||||
else:
|
||||
self._monitor.start()
|
||||
|
||||
self._collecting = True
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop collecting logcat lines
|
||||
"""
|
||||
if not self._collecting:
|
||||
raise RuntimeError('Logcat monitor not running, nothing to stop')
|
||||
|
||||
self._monitor.stop()
|
||||
self._collecting = False
|
||||
self._prev_log = self._monitor.logfile
|
||||
|
||||
def get_trace(self, outfile):
|
||||
"""
|
||||
Output collected logcat lines to designated file
|
||||
"""
|
||||
# copy self._monitor.logfile to outfile
|
||||
shutil.copy(self._monitor.logfile, outfile)
|
98
devlib/trace/screencapture.py
Normal file
98
devlib/trace/screencapture.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.exception import WorkerThreadError
|
||||
|
||||
|
||||
class ScreenCapturePoller(threading.Thread):
|
||||
|
||||
def __init__(self, target, period, output_path=None, timeout=30):
|
||||
super(ScreenCapturePoller, self).__init__()
|
||||
self.target = target
|
||||
self.logger = logging.getLogger('screencapture')
|
||||
self.period = period
|
||||
self.timeout = timeout
|
||||
self.stop_signal = threading.Event()
|
||||
self.lock = threading.Lock()
|
||||
self.last_poll = 0
|
||||
self.daemon = True
|
||||
self.exc = None
|
||||
self.output_path = output_path
|
||||
|
||||
def run(self):
|
||||
self.logger.debug('Starting screen capture polling')
|
||||
try:
|
||||
while True:
|
||||
if self.stop_signal.is_set():
|
||||
break
|
||||
with self.lock:
|
||||
current_time = time.time()
|
||||
if (current_time - self.last_poll) >= self.period:
|
||||
self.poll()
|
||||
time.sleep(0.5)
|
||||
except Exception: # pylint: disable=W0703
|
||||
self.exc = WorkerThreadError(self.name, sys.exc_info())
|
||||
|
||||
def stop(self):
|
||||
self.logger.debug('Stopping screen capture polling')
|
||||
self.stop_signal.set()
|
||||
self.join(self.timeout)
|
||||
if self.is_alive():
|
||||
self.logger.error('Could not join screen capture poller thread.')
|
||||
if self.exc:
|
||||
raise self.exc # pylint: disable=E0702
|
||||
|
||||
def poll(self):
|
||||
self.last_poll = time.time()
|
||||
self.target.capture_screen(os.path.join(self.output_path, "screencap_{ts}.png"))
|
||||
|
||||
|
||||
class ScreenCaptureCollector(TraceCollector):
|
||||
|
||||
def __init__(self, target, output_path=None, period=None):
|
||||
super(ScreenCaptureCollector, self).__init__(target)
|
||||
self._collecting = False
|
||||
self.output_path = output_path
|
||||
self.period = period
|
||||
self.target = target
|
||||
self._poller = ScreenCapturePoller(self.target, self.period,
|
||||
self.output_path)
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start collecting the screenshots
|
||||
"""
|
||||
self._poller.start()
|
||||
self._collecting = True
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop collecting the screenshots
|
||||
"""
|
||||
if not self._collecting:
|
||||
raise RuntimeError('Screen capture collector is not running, nothing to stop')
|
||||
|
||||
self._poller.stop()
|
||||
self._collecting = False
|
92
devlib/trace/serial_trace.py
Normal file
92
devlib/trace/serial_trace.py
Normal file
@@ -0,0 +1,92 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from pexpect.exceptions import TIMEOUT
|
||||
import shutil
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.utils.serial_port import get_connection
|
||||
|
||||
|
||||
class SerialTraceCollector(TraceCollector):
|
||||
|
||||
@property
|
||||
def collecting(self):
|
||||
return self._collecting
|
||||
|
||||
def __init__(self, target, serial_port, baudrate, timeout=20):
|
||||
super(SerialTraceCollector, self).__init__(target)
|
||||
self.serial_port = serial_port
|
||||
self.baudrate = baudrate
|
||||
self.timeout = timeout
|
||||
|
||||
self._serial_target = None
|
||||
self._conn = None
|
||||
self._tmpfile = None
|
||||
self._collecting = False
|
||||
|
||||
def reset(self):
|
||||
if self._collecting:
|
||||
raise RuntimeError("reset was called whilst collecting")
|
||||
|
||||
if self._tmpfile:
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
||||
|
||||
def start(self):
|
||||
if self._collecting:
|
||||
raise RuntimeError("start was called whilst collecting")
|
||||
|
||||
|
||||
self._tmpfile = NamedTemporaryFile()
|
||||
self._tmpfile.write("-------- Starting serial logging --------\n")
|
||||
|
||||
self._serial_target, self._conn = get_connection(port=self.serial_port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=self.timeout,
|
||||
logfile=self._tmpfile,
|
||||
init_dtr=0)
|
||||
self._collecting = True
|
||||
|
||||
def stop(self):
|
||||
if not self._collecting:
|
||||
raise RuntimeError("stop was called whilst not collecting")
|
||||
|
||||
# We expect the below to fail, but we need to get pexpect to
|
||||
# do something so that it interacts with the serial device,
|
||||
# and hence updates the logfile.
|
||||
try:
|
||||
self._serial_target.expect(".", timeout=1)
|
||||
except TIMEOUT:
|
||||
pass
|
||||
|
||||
self._serial_target.close()
|
||||
del self._conn
|
||||
|
||||
self._tmpfile.write("-------- Stopping serial logging --------\n")
|
||||
|
||||
self._collecting = False
|
||||
|
||||
def get_trace(self, outfile):
|
||||
if self._collecting:
|
||||
raise RuntimeError("get_trace was called whilst collecting")
|
||||
|
||||
self._tmpfile.flush()
|
||||
|
||||
shutil.copy(self._tmpfile.name, outfile)
|
||||
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
173
devlib/trace/systrace.py
Normal file
173
devlib/trace/systrace.py
Normal file
@@ -0,0 +1,173 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Copyright 2018 Arm Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from shutil import copyfile
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.utils.android import platform_tools
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
|
||||
DEFAULT_CATEGORIES = [
|
||||
'gfx',
|
||||
'view',
|
||||
'sched',
|
||||
'freq',
|
||||
'idle'
|
||||
]
|
||||
|
||||
class SystraceCollector(TraceCollector):
|
||||
"""
|
||||
A trace collector based on Systrace
|
||||
|
||||
For more details, see https://developer.android.com/studio/command-line/systrace
|
||||
|
||||
:param target: Devlib target
|
||||
:type target: AndroidTarget
|
||||
|
||||
:param outdir: Working directory to use on the host
|
||||
:type outdir: str
|
||||
|
||||
:param categories: Systrace categories to trace. See `available_categories`
|
||||
:type categories: list(str)
|
||||
|
||||
:param buffer_size: Buffer size in kb
|
||||
:type buffer_size: int
|
||||
|
||||
:param strict: Raise an exception if any of the requested categories
|
||||
are not available
|
||||
:type strict: bool
|
||||
"""
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def available_categories(self):
|
||||
lines = subprocess.check_output([self.systrace_binary, '-l']).splitlines()
|
||||
|
||||
categories = []
|
||||
for line in lines:
|
||||
categories.append(line.split()[0])
|
||||
|
||||
return categories
|
||||
|
||||
def __init__(self, target,
|
||||
categories=None,
|
||||
buffer_size=None,
|
||||
strict=False):
|
||||
|
||||
super(SystraceCollector, self).__init__(target)
|
||||
|
||||
self.categories = categories or DEFAULT_CATEGORIES
|
||||
self.buffer_size = buffer_size
|
||||
|
||||
self._systrace_process = None
|
||||
self._tmpfile = None
|
||||
|
||||
# Try to find a systrace binary
|
||||
self.systrace_binary = None
|
||||
|
||||
systrace_binary_path = os.path.join(platform_tools, 'systrace', 'systrace.py')
|
||||
if not os.path.isfile(systrace_binary_path):
|
||||
raise HostError('Could not find any systrace binary under {}'.format(platform_tools))
|
||||
|
||||
self.systrace_binary = systrace_binary_path
|
||||
|
||||
# Filter the requested categories
|
||||
for category in self.categories:
|
||||
if category not in self.available_categories:
|
||||
message = 'Category [{}] not available for tracing'.format(category)
|
||||
if strict:
|
||||
raise TargetError(message)
|
||||
self.logger.warning(message)
|
||||
|
||||
self.categories = list(set(self.categories) & set(self.available_categories))
|
||||
if not self.categories:
|
||||
raise TargetError('None of the requested categories are available')
|
||||
|
||||
def __del__(self):
|
||||
self.reset()
|
||||
|
||||
def _build_cmd(self):
|
||||
self._tmpfile = NamedTemporaryFile()
|
||||
|
||||
self.systrace_cmd = '{} -o {} -e {}'.format(
|
||||
self.systrace_binary,
|
||||
self._tmpfile.name,
|
||||
self.target.adb_name
|
||||
)
|
||||
|
||||
if self.buffer_size:
|
||||
self.systrace_cmd += ' -b {}'.format(self.buffer_size)
|
||||
|
||||
self.systrace_cmd += ' {}'.format(' '.join(self.categories))
|
||||
|
||||
def reset(self):
|
||||
if self._systrace_process:
|
||||
self.stop()
|
||||
|
||||
if self._tmpfile:
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
||||
|
||||
def start(self):
|
||||
if self._systrace_process:
|
||||
raise RuntimeError("Tracing is already underway, call stop() first")
|
||||
|
||||
self.reset()
|
||||
|
||||
self._build_cmd()
|
||||
|
||||
self._systrace_process = subprocess.Popen(
|
||||
self.systrace_cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
shell=True
|
||||
)
|
||||
|
||||
def stop(self):
|
||||
if not self._systrace_process:
|
||||
raise RuntimeError("No tracing to stop, call start() first")
|
||||
|
||||
# Systrace expects <enter> to stop
|
||||
self._systrace_process.communicate('\n')
|
||||
self._systrace_process = None
|
||||
|
||||
def get_trace(self, outfile):
|
||||
if self._systrace_process:
|
||||
raise RuntimeError("Tracing is underway, call stop() first")
|
||||
|
||||
if not self._tmpfile:
|
||||
raise RuntimeError("No tracing data available")
|
||||
|
||||
copyfile(self._tmpfile.name, outfile)
|
397
devlib/utils/android.py
Normal file → Executable file
397
devlib/utils/android.py
Normal file → Executable file
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
# Copyright 2013-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,25 +20,37 @@ Utility functions for working with Android devices through adb.
|
||||
"""
|
||||
# pylint: disable=E1103
|
||||
import os
|
||||
import pexpect
|
||||
import time
|
||||
import subprocess
|
||||
import logging
|
||||
import re
|
||||
import threading
|
||||
import tempfile
|
||||
import queue
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.utils.misc import check_output, which
|
||||
from devlib.exception import TargetError, HostError, DevlibError
|
||||
from devlib.utils.misc import check_output, which, memoized, ABI_MAP
|
||||
from devlib.utils.misc import escape_single_quotes, escape_double_quotes
|
||||
from devlib import host
|
||||
|
||||
|
||||
logger = logging.getLogger('android')
|
||||
|
||||
MAX_ATTEMPTS = 5
|
||||
AM_START_ERROR = re.compile(r"Error: Activity class {[\w|.|/]*} does not exist")
|
||||
AM_START_ERROR = re.compile(r"Error: Activity.*")
|
||||
|
||||
# See:
|
||||
# http://developer.android.com/guide/topics/manifest/uses-sdk-element.html#ApiLevels
|
||||
ANDROID_VERSION_MAP = {
|
||||
28: 'P',
|
||||
27: 'OREO_MR1',
|
||||
26: 'OREO',
|
||||
25: 'NOUGAT_MR1',
|
||||
24: 'NOUGAT',
|
||||
23: 'MARSHMALLOW',
|
||||
22: 'LOLLYPOP_MR1',
|
||||
21: 'LOLLYPOP',
|
||||
20: 'KITKAT_WATCH',
|
||||
@@ -63,6 +75,12 @@ ANDROID_VERSION_MAP = {
|
||||
1: 'BASE',
|
||||
}
|
||||
|
||||
# See https://developer.android.com/reference/android/content/Intent.html#setFlags(int)
|
||||
INTENT_FLAGS = {
|
||||
'ACTIVITY_NEW_TASK' : 0x10000000,
|
||||
'ACTIVITY_CLEAR_TASK' : 0x00008000
|
||||
}
|
||||
|
||||
|
||||
# Initialized in functions near the botton of the file
|
||||
android_home = None
|
||||
@@ -82,7 +100,7 @@ class AndroidProperties(object):
|
||||
self._properties = dict(re.findall(r'\[(.*?)\]:\s+\[(.*?)\]', text))
|
||||
|
||||
def iteritems(self):
|
||||
return self._properties.iteritems()
|
||||
return iter(self._properties.items())
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._properties)
|
||||
@@ -115,6 +133,7 @@ class ApkInfo(object):
|
||||
|
||||
version_regex = re.compile(r"name='(?P<name>[^']+)' versionCode='(?P<vcode>[^']+)' versionName='(?P<vname>[^']+)'")
|
||||
name_regex = re.compile(r"name='(?P<name>[^']+)'")
|
||||
permission_regex = re.compile(r"name='(?P<permission>[^']+)'")
|
||||
|
||||
def __init__(self, path=None):
|
||||
self.path = path
|
||||
@@ -123,13 +142,21 @@ class ApkInfo(object):
|
||||
self.label = None
|
||||
self.version_name = None
|
||||
self.version_code = None
|
||||
self.native_code = None
|
||||
self.permissions = []
|
||||
self.parse(path)
|
||||
|
||||
def parse(self, apk_path):
|
||||
_check_env()
|
||||
command = [aapt, 'dump', 'badging', apk_path]
|
||||
logger.debug(' '.join(command))
|
||||
output = subprocess.check_output(command)
|
||||
try:
|
||||
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
|
||||
if sys.version_info[0] == 3:
|
||||
output = output.decode(sys.stdout.encoding, 'replace')
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise HostError('Error parsing APK file {}. `aapt` says:\n{}'
|
||||
.format(apk_path, e.output))
|
||||
for line in output.split('\n'):
|
||||
if line.startswith('application-label:'):
|
||||
self.label = line.split(':')[1].strip().replace('\'', '')
|
||||
@@ -142,6 +169,23 @@ class ApkInfo(object):
|
||||
elif line.startswith('launchable-activity:'):
|
||||
match = self.name_regex.search(line)
|
||||
self.activity = match.group('name')
|
||||
elif line.startswith('native-code'):
|
||||
apk_abis = [entry.strip() for entry in line.split(':')[1].split("'") if entry.strip()]
|
||||
mapped_abis = []
|
||||
for apk_abi in apk_abis:
|
||||
found = False
|
||||
for abi, architectures in ABI_MAP.items():
|
||||
if apk_abi in architectures:
|
||||
mapped_abis.append(abi)
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
mapped_abis.append(apk_abi)
|
||||
self.native_code = mapped_abis
|
||||
elif line.startswith('uses-permission:'):
|
||||
match = self.permission_regex.search(line)
|
||||
if match:
|
||||
self.permissions.append(match.group('permission'))
|
||||
else:
|
||||
pass # not interested
|
||||
|
||||
@@ -151,33 +195,70 @@ class AdbConnection(object):
|
||||
# maintains the count of parallel active connections to a device, so that
|
||||
# adb disconnect is not invoked untill all connections are closed
|
||||
active_connections = defaultdict(int)
|
||||
default_timeout = 10
|
||||
ls_command = 'ls'
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.device
|
||||
|
||||
def __init__(self, device=None, timeout=10):
|
||||
self.timeout = timeout
|
||||
# Again, we need to handle boards where the default output format from ls is
|
||||
# single column *and* boards where the default output is multi-column.
|
||||
# We need to do this purely because the '-1' option causes errors on older
|
||||
# versions of the ls tool in Android pre-v7.
|
||||
def _setup_ls(self):
|
||||
command = "shell '(ls -1); echo \"\n$?\"'"
|
||||
try:
|
||||
output = adb_command(self.device, command, timeout=self.timeout, adb_server=self.adb_server)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise HostError(
|
||||
'Failed to set up ls command on Android device. Output:\n'
|
||||
+ e.output)
|
||||
lines = output.splitlines()
|
||||
retval = lines[-1].strip()
|
||||
if int(retval) == 0:
|
||||
self.ls_command = 'ls -1'
|
||||
else:
|
||||
self.ls_command = 'ls'
|
||||
logger.debug("ls command is set to {}".format(self.ls_command))
|
||||
|
||||
def __init__(self, device=None, timeout=None, platform=None, adb_server=None):
|
||||
self.timeout = timeout if timeout is not None else self.default_timeout
|
||||
if device is None:
|
||||
device = adb_get_device(timeout=timeout)
|
||||
device = adb_get_device(timeout=timeout, adb_server=adb_server)
|
||||
self.device = device
|
||||
self.adb_server = adb_server
|
||||
adb_connect(self.device)
|
||||
AdbConnection.active_connections[self.device] += 1
|
||||
self._setup_ls()
|
||||
|
||||
def push(self, source, dest, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
command = 'push {} {}'.format(source, dest)
|
||||
return adb_command(self.device, command, timeout=timeout)
|
||||
command = "push '{}' '{}'".format(source, dest)
|
||||
if not os.path.exists(source):
|
||||
raise HostError('No such file "{}"'.format(source))
|
||||
return adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
|
||||
def pull(self, source, dest, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
command = 'pull {} {}'.format(source, dest)
|
||||
return adb_command(self.device, command, timeout=timeout)
|
||||
# Pull all files matching a wildcard expression
|
||||
if os.path.isdir(dest) and \
|
||||
('*' in source or '?' in source):
|
||||
command = 'shell {} {}'.format(self.ls_command, source)
|
||||
output = adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
for line in output.splitlines():
|
||||
command = "pull '{}' '{}'".format(line.strip(), dest)
|
||||
adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
return
|
||||
command = "pull '{}' '{}'".format(source, dest)
|
||||
return adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
|
||||
def execute(self, command, timeout=None, check_exit_code=False, as_root=False):
|
||||
return adb_shell(self.device, command, timeout, check_exit_code, as_root)
|
||||
def execute(self, command, timeout=None, check_exit_code=False,
|
||||
as_root=False, strip_colors=True):
|
||||
return adb_shell(self.device, command, timeout, check_exit_code,
|
||||
as_root, adb_server=self.adb_server)
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
return adb_background_shell(self.device, command, stdout, stderr, as_root)
|
||||
@@ -195,9 +276,10 @@ class AdbConnection(object):
|
||||
pass
|
||||
|
||||
|
||||
def fastboot_command(command, timeout=None):
|
||||
def fastboot_command(command, timeout=None, device=None):
|
||||
_check_env()
|
||||
full_command = "fastboot {}".format(command)
|
||||
target = '-s {}'.format(device) if device else ''
|
||||
full_command = 'fastboot {} {}'.format(target, command)
|
||||
logger.debug(full_command)
|
||||
output, _ = check_output(full_command, timeout, shell=True)
|
||||
return output
|
||||
@@ -208,7 +290,7 @@ def fastboot_flash_partition(partition, path_to_image):
|
||||
fastboot_command(command)
|
||||
|
||||
|
||||
def adb_get_device(timeout=None):
|
||||
def adb_get_device(timeout=None, adb_server=None):
|
||||
"""
|
||||
Returns the serial number of a connected android device.
|
||||
|
||||
@@ -217,13 +299,17 @@ def adb_get_device(timeout=None):
|
||||
"""
|
||||
# TODO this is a hacky way to issue a adb command to all listed devices
|
||||
|
||||
# Ensure server is started so the 'daemon started successfully' message
|
||||
# doesn't confuse the parsing below
|
||||
adb_command(None, 'start-server', adb_server=adb_server)
|
||||
|
||||
# The output of calling adb devices consists of a heading line then
|
||||
# a list of the devices sperated by new line
|
||||
# The last line is a blank new line. in otherwords, if there is a device found
|
||||
# then the output length is 2 + (1 for each device)
|
||||
start = time.time()
|
||||
while True:
|
||||
output = adb_command(None, "devices").splitlines() # pylint: disable=E1103
|
||||
output = adb_command(None, "devices", adb_server=adb_server).splitlines() # pylint: disable=E1103
|
||||
output_length = len(output)
|
||||
if output_length == 3:
|
||||
# output[1] is the 2nd line in the output which has the device name
|
||||
@@ -232,7 +318,7 @@ def adb_get_device(timeout=None):
|
||||
return output[1].split('\t')[0]
|
||||
elif output_length > 3:
|
||||
message = '{} Android devices found; either explicitly specify ' +\
|
||||
'the device you want, or make sure only one is connected.'
|
||||
'the device you want, or make sure only one is connected.'
|
||||
raise HostError(message.format(output_length - 2))
|
||||
else:
|
||||
if timeout < time.time() - start:
|
||||
@@ -247,9 +333,10 @@ def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS):
|
||||
while tries <= attempts:
|
||||
tries += 1
|
||||
if device:
|
||||
command = 'adb connect {}'.format(device)
|
||||
logger.debug(command)
|
||||
output, _ = check_output(command, shell=True, timeout=timeout)
|
||||
if "." in device: # Connect is required only for ADB-over-IP
|
||||
command = 'adb connect {}'.format(device)
|
||||
logger.debug(command)
|
||||
output, _ = check_output(command, shell=True, timeout=timeout)
|
||||
if _ping(device):
|
||||
break
|
||||
time.sleep(10)
|
||||
@@ -264,7 +351,7 @@ def adb_disconnect(device):
|
||||
_check_env()
|
||||
if not device:
|
||||
return
|
||||
if ":" in device:
|
||||
if ":" in device and device in adb_list_devices():
|
||||
command = "adb disconnect " + device
|
||||
logger.debug(command)
|
||||
retval = subprocess.call(command, stdout=open(os.devnull, 'wb'), shell=True)
|
||||
@@ -275,7 +362,7 @@ def adb_disconnect(device):
|
||||
def _ping(device):
|
||||
_check_env()
|
||||
device_string = ' -s {}'.format(device) if device else ''
|
||||
command = "adb{} shell \"ls / > /dev/null\"".format(device_string)
|
||||
command = "adb{} shell \"ls /data/local/tmp > /dev/null\"".format(device_string)
|
||||
logger.debug(command)
|
||||
result = subprocess.call(command, stderr=subprocess.PIPE, shell=True)
|
||||
if not result:
|
||||
@@ -284,47 +371,60 @@ def _ping(device):
|
||||
return False
|
||||
|
||||
|
||||
def adb_shell(device, command, timeout=None, check_exit_code=False, as_root=False): # NOQA
|
||||
def adb_shell(device, command, timeout=None, check_exit_code=False,
|
||||
as_root=False, adb_server=None): # NOQA
|
||||
_check_env()
|
||||
if as_root:
|
||||
command = 'echo "{}" | su'.format(escape_double_quotes(command))
|
||||
device_string = ' -s {}'.format(device) if device else ''
|
||||
full_command = 'adb{} shell "{}"'.format(device_string,
|
||||
escape_double_quotes(command))
|
||||
logger.debug(full_command)
|
||||
if check_exit_code:
|
||||
actual_command = "adb{} shell '({}); echo $?'".format(device_string,
|
||||
escape_single_quotes(command))
|
||||
raw_output, error = check_output(actual_command, timeout, shell=True)
|
||||
if raw_output:
|
||||
try:
|
||||
output, exit_code, _ = raw_output.rsplit('\n', 2)
|
||||
except ValueError:
|
||||
exit_code, _ = raw_output.rsplit('\n', 1)
|
||||
output = ''
|
||||
else: # raw_output is empty
|
||||
exit_code = '969696' # just because
|
||||
output = ''
|
||||
command = 'echo \'{}\' | su'.format(escape_single_quotes(command))
|
||||
device_part = []
|
||||
if adb_server:
|
||||
device_part = ['-H', adb_server]
|
||||
device_part += ['-s', device] if device else []
|
||||
|
||||
# On older combinations of ADB/Android versions, the adb host command always
|
||||
# exits with 0 if it was able to run the command on the target, even if the
|
||||
# command failed (https://code.google.com/p/android/issues/detail?id=3254).
|
||||
# Homogenise this behaviour by running the command then echoing the exit
|
||||
# code.
|
||||
adb_shell_command = '({}); echo \"\n$?\"'.format(command)
|
||||
actual_command = ['adb'] + device_part + ['shell', adb_shell_command]
|
||||
logger.debug('adb {} shell {}'.format(' '.join(device_part), command))
|
||||
try:
|
||||
raw_output, _ = check_output(actual_command, timeout, shell=False, combined_output=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise TargetError(str(e))
|
||||
|
||||
if raw_output:
|
||||
try:
|
||||
output, exit_code, _ = raw_output.replace('\r\n', '\n').replace('\r', '\n').rsplit('\n', 2)
|
||||
except ValueError:
|
||||
exit_code, _ = raw_output.replace('\r\n', '\n').replace('\r', '\n').rsplit('\n', 1)
|
||||
output = ''
|
||||
else: # raw_output is empty
|
||||
exit_code = '969696' # just because
|
||||
output = ''
|
||||
|
||||
if check_exit_code:
|
||||
exit_code = exit_code.strip()
|
||||
re_search = AM_START_ERROR.findall(output)
|
||||
if exit_code.isdigit():
|
||||
if int(exit_code):
|
||||
message = 'Got exit code {}\nfrom: {}\nSTDOUT: {}\nSTDERR: {}'
|
||||
raise TargetError(message.format(exit_code, full_command, output, error))
|
||||
elif AM_START_ERROR.findall(output):
|
||||
message = 'Could not start activity; got the following:'
|
||||
message += '\n{}'.format(AM_START_ERROR.findall(output)[0])
|
||||
raise TargetError(message)
|
||||
else: # not all digits
|
||||
if AM_START_ERROR.findall(output):
|
||||
message = ('Got exit code {}\nfrom target command: {}\n'
|
||||
'OUTPUT: {}')
|
||||
raise TargetError(message.format(exit_code, command, output))
|
||||
elif re_search:
|
||||
message = 'Could not start activity; got the following:\n{}'
|
||||
raise TargetError(message.format(AM_START_ERROR.findall(output)[0]))
|
||||
raise TargetError(message.format(re_search[0]))
|
||||
else: # not all digits
|
||||
if re_search:
|
||||
message = 'Could not start activity; got the following:\n{}'
|
||||
raise TargetError(message.format(re_search[0]))
|
||||
else:
|
||||
message = 'adb has returned early; did not get an exit code. '\
|
||||
'Was kill-server invoked?'
|
||||
raise TargetError(message)
|
||||
else: # do not check exit code
|
||||
output, _ = check_output(full_command, timeout, shell=True)
|
||||
'Was kill-server invoked?\nOUTPUT:\n-----\n{}\n'\
|
||||
'-----'
|
||||
raise TargetError(message.format(raw_output))
|
||||
|
||||
return output
|
||||
|
||||
|
||||
@@ -342,8 +442,8 @@ def adb_background_shell(device, command,
|
||||
return subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
|
||||
|
||||
|
||||
def adb_list_devices():
|
||||
output = adb_command(None, 'devices')
|
||||
def adb_list_devices(adb_server=None):
|
||||
output = adb_command(None, 'devices',adb_server=adb_server)
|
||||
devices = []
|
||||
for line in output.splitlines():
|
||||
parts = [p.strip() for p in line.split()]
|
||||
@@ -352,14 +452,39 @@ def adb_list_devices():
|
||||
return devices
|
||||
|
||||
|
||||
def adb_command(device, command, timeout=None):
|
||||
def get_adb_command(device, command, timeout=None,adb_server=None):
|
||||
_check_env()
|
||||
device_string = ' -s {}'.format(device) if device else ''
|
||||
full_command = "adb{} {}".format(device_string, command)
|
||||
device_string = ""
|
||||
if adb_server != None:
|
||||
device_string = ' -H {}'.format(adb_server)
|
||||
device_string += ' -s {}'.format(device) if device else ''
|
||||
return "adb{} {}".format(device_string, command)
|
||||
|
||||
def adb_command(device, command, timeout=None,adb_server=None):
|
||||
full_command = get_adb_command(device, command, timeout, adb_server)
|
||||
logger.debug(full_command)
|
||||
output, _ = check_output(full_command, timeout, shell=True)
|
||||
return output
|
||||
|
||||
def grant_app_permissions(target, package):
|
||||
"""
|
||||
Grant an app all the permissions it may ask for
|
||||
"""
|
||||
dumpsys = target.execute('dumpsys package {}'.format(package))
|
||||
|
||||
permissions = re.search(
|
||||
'requested permissions:\s*(?P<permissions>(android.permission.+\s*)+)', dumpsys
|
||||
)
|
||||
if permissions is None:
|
||||
return
|
||||
permissions = permissions.group('permissions').replace(" ", "").splitlines()
|
||||
|
||||
for permission in permissions:
|
||||
try:
|
||||
target.execute('pm grant {} {}'.format(package, permission))
|
||||
except TargetError:
|
||||
logger.debug('Cannot grant {}'.format(permission))
|
||||
|
||||
|
||||
# Messy environment initialisation stuff...
|
||||
|
||||
@@ -377,19 +502,20 @@ def _initialize_with_android_home(env):
|
||||
logger.debug('Using ANDROID_HOME from the environment.')
|
||||
env.android_home = android_home
|
||||
env.platform_tools = os.path.join(android_home, 'platform-tools')
|
||||
os.environ['PATH'] += os.pathsep + env.platform_tools
|
||||
os.environ['PATH'] = env.platform_tools + os.pathsep + os.environ['PATH']
|
||||
_init_common(env)
|
||||
return env
|
||||
|
||||
|
||||
def _initialize_without_android_home(env):
|
||||
if which('adb'):
|
||||
adb_full_path = which('adb')
|
||||
if adb_full_path:
|
||||
env.adb = 'adb'
|
||||
else:
|
||||
raise HostError('ANDROID_HOME is not set and adb is not in PATH. '
|
||||
'Have you installed Android SDK?')
|
||||
logger.debug('Discovering ANDROID_HOME from adb path.')
|
||||
env.platform_tools = os.path.dirname(env.adb)
|
||||
env.platform_tools = os.path.dirname(adb_full_path)
|
||||
env.android_home = os.path.dirname(env.platform_tools)
|
||||
_init_common(env)
|
||||
return env
|
||||
@@ -426,3 +552,144 @@ def _check_env():
|
||||
platform_tools = _env.platform_tools
|
||||
adb = _env.adb
|
||||
aapt = _env.aapt
|
||||
|
||||
class LogcatMonitor(object):
|
||||
"""
|
||||
Helper class for monitoring Anroid's logcat
|
||||
|
||||
:param target: Android target to monitor
|
||||
:type target: :class:`AndroidTarget`
|
||||
|
||||
:param regexps: List of uncompiled regular expressions to filter on the
|
||||
device. Logcat entries that don't match any will not be
|
||||
seen. If omitted, all entries will be sent to host.
|
||||
:type regexps: list(str)
|
||||
"""
|
||||
|
||||
@property
|
||||
def logfile(self):
|
||||
return self._logfile
|
||||
|
||||
def __init__(self, target, regexps=None):
|
||||
super(LogcatMonitor, self).__init__()
|
||||
|
||||
self.target = target
|
||||
self._regexps = regexps
|
||||
|
||||
def start(self, outfile=None):
|
||||
"""
|
||||
Start logcat and begin monitoring
|
||||
|
||||
:param outfile: Optional path to file to store all logcat entries
|
||||
:type outfile: str
|
||||
"""
|
||||
if outfile:
|
||||
self._logfile = open(outfile, 'w')
|
||||
else:
|
||||
self._logfile = tempfile.NamedTemporaryFile()
|
||||
|
||||
self.target.clear_logcat()
|
||||
|
||||
logcat_cmd = 'logcat'
|
||||
|
||||
# Join all requested regexps with an 'or'
|
||||
if self._regexps:
|
||||
regexp = '{}'.format('|'.join(self._regexps))
|
||||
if len(self._regexps) > 1:
|
||||
regexp = '({})'.format(regexp)
|
||||
# Logcat on older version of android do not support the -e argument
|
||||
# so fall back to using grep.
|
||||
if self.target.get_sdk_version() > 23:
|
||||
logcat_cmd = '{} -e "{}"'.format(logcat_cmd, regexp)
|
||||
else:
|
||||
logcat_cmd = '{} | grep "{}"'.format(logcat_cmd, regexp)
|
||||
|
||||
logcat_cmd = get_adb_command(self.target.conn.device, logcat_cmd)
|
||||
|
||||
logger.debug('logcat command ="{}"'.format(logcat_cmd))
|
||||
self._logcat = pexpect.spawn(logcat_cmd, logfile=self._logfile)
|
||||
|
||||
def stop(self):
|
||||
self._logcat.terminate()
|
||||
self._logfile.close()
|
||||
|
||||
def get_log(self):
|
||||
"""
|
||||
Return the list of lines found by the monitor
|
||||
"""
|
||||
# Unless we tell pexect to 'expect' something, it won't read from
|
||||
# logcat's buffer or write into our logfile. We'll need to force it to
|
||||
# read any pending logcat output.
|
||||
while True:
|
||||
try:
|
||||
read_size = 1024 * 8
|
||||
# This will read up to read_size bytes, but only those that are
|
||||
# already ready (i.e. it won't block). If there aren't any bytes
|
||||
# already available it raises pexpect.TIMEOUT.
|
||||
buf = self._logcat.read_nonblocking(read_size, timeout=0)
|
||||
|
||||
# We can't just keep calling read_nonblocking until we get a
|
||||
# pexpect.TIMEOUT (i.e. until we don't find any available
|
||||
# bytes), because logcat might be writing bytes the whole time -
|
||||
# in that case we might never return from this function. In
|
||||
# fact, we only care about bytes that were written before we
|
||||
# entered this function. So, if we read read_size bytes (as many
|
||||
# as we were allowed to), then we'll assume there are more bytes
|
||||
# that have already been sitting in the output buffer of the
|
||||
# logcat command. If not, we'll assume we read everything that
|
||||
# had already been written.
|
||||
if len(buf) == read_size:
|
||||
continue
|
||||
else:
|
||||
break
|
||||
except pexpect.TIMEOUT:
|
||||
# No available bytes to read. No prob, logcat just hasn't
|
||||
# printed anything since pexpect last read from its buffer.
|
||||
break
|
||||
|
||||
with open(self._logfile.name) as fh:
|
||||
return [line for line in fh]
|
||||
|
||||
def clear_log(self):
|
||||
with open(self._logfile.name, 'w') as fh:
|
||||
pass
|
||||
|
||||
def search(self, regexp):
|
||||
"""
|
||||
Search a line that matches a regexp in the logcat log
|
||||
Return immediatly
|
||||
"""
|
||||
return [line for line in self.get_log() if re.match(regexp, line)]
|
||||
|
||||
def wait_for(self, regexp, timeout=30):
|
||||
"""
|
||||
Search a line that matches a regexp in the logcat log
|
||||
Wait for it to appear if it's not found
|
||||
|
||||
:param regexp: regexp to search
|
||||
:type regexp: str
|
||||
|
||||
:param timeout: Timeout in seconds, before rasing RuntimeError.
|
||||
``None`` means wait indefinitely
|
||||
:type timeout: number
|
||||
|
||||
:returns: List of matched strings
|
||||
"""
|
||||
log = self.get_log()
|
||||
res = [line for line in log if re.match(regexp, line)]
|
||||
|
||||
# Found some matches, return them
|
||||
if len(res) > 0:
|
||||
return res
|
||||
|
||||
# Store the number of lines we've searched already, so we don't have to
|
||||
# re-grep them after 'expect' returns
|
||||
next_line_num = len(log)
|
||||
|
||||
try:
|
||||
self._logcat.expect(regexp, timeout=timeout)
|
||||
except pexpect.TIMEOUT:
|
||||
raise RuntimeError('Logcat monitor timeout ({}s)'.format(timeout))
|
||||
|
||||
return [line for line in self.get_log()[next_line_num:]
|
||||
if re.match(regexp, line)]
|
||||
|
100
devlib/utils/csvutil.py
Normal file
100
devlib/utils/csvutil.py
Normal file
@@ -0,0 +1,100 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
'''
|
||||
Due to the change in the nature of "binary mode" when opening files in
|
||||
Python 3, the way files need to be opened for ``csv.reader`` and ``csv.writer``
|
||||
is different from Python 2.
|
||||
|
||||
The functions in this module are intended to hide these differences allowing
|
||||
the rest of the code to create csv readers/writers without worrying about which
|
||||
Python version it is running under.
|
||||
|
||||
First up are ``csvwriter`` and ``csvreader`` context mangers that handle the
|
||||
opening and closing of the underlying file. These are intended to replace the
|
||||
most common usage pattern
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with open(filepath, 'wb') as wfh: # or open(filepath, 'w', newline='') in Python 3
|
||||
writer = csv.writer(wfh)
|
||||
writer.writerows(data)
|
||||
|
||||
|
||||
with
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with csvwriter(filepath) as writer:
|
||||
writer.writerows(data)
|
||||
|
||||
|
||||
``csvreader`` works in an analogous way. ``csvreader`` and ``writer`` can take
|
||||
additional arguments which will be passed directly to the
|
||||
``csv.reader``/``csv.writer`` calls.
|
||||
|
||||
In some cases, it is desirable not to use a context manager (e.g. if the
|
||||
reader/writer is intended to be returned from the function that creates it. For
|
||||
such cases, alternative functions, ``create_reader`` and ``create_writer``,
|
||||
exit. These return a two-tuple, with the created reader/writer as the first
|
||||
element, and the corresponding ``FileObject`` as the second. It is the
|
||||
responsibility of the calling code to ensure that the file is closed properly.
|
||||
|
||||
'''
|
||||
import csv
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
@contextmanager
|
||||
def csvwriter(filepath, *args, **kwargs):
|
||||
if sys.version_info[0] == 3:
|
||||
wfh = open(filepath, 'w', newline='')
|
||||
else:
|
||||
wfh = open(filepath, 'wb')
|
||||
|
||||
try:
|
||||
yield csv.writer(wfh, *args, **kwargs)
|
||||
finally:
|
||||
wfh.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def csvreader(filepath, *args, **kwargs):
|
||||
if sys.version_info[0] == 3:
|
||||
fh = open(filepath, 'r', newline='')
|
||||
else:
|
||||
fh = open(filepath, 'rb')
|
||||
|
||||
try:
|
||||
yield csv.reader(fh, *args, **kwargs)
|
||||
finally:
|
||||
fh.close()
|
||||
|
||||
|
||||
def create_writer(filepath, *args, **kwargs):
|
||||
if sys.version_info[0] == 3:
|
||||
wfh = open(filepath, 'w', newline='')
|
||||
else:
|
||||
wfh = open(filepath, 'wb')
|
||||
return csv.writer(wfh, *args, **kwargs), wfh
|
||||
|
||||
|
||||
def create_reader(filepath, *args, **kwargs):
|
||||
if sys.version_info[0] == 3:
|
||||
fh = open(filepath, 'r', newline='')
|
||||
else:
|
||||
fh = open(filepath, 'rb')
|
||||
return csv.reader(fh, *args, **kwargs), fh
|
53
devlib/utils/gem5.py
Normal file
53
devlib/utils/gem5.py
Normal file
@@ -0,0 +1,53 @@
|
||||
# Copyright 2017-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
import logging
|
||||
|
||||
from devlib.utils.types import numeric
|
||||
|
||||
|
||||
GEM5STATS_FIELD_REGEX = re.compile("^(?P<key>[^- ]\S*) +(?P<value>[^#]+).+$")
|
||||
GEM5STATS_DUMP_HEAD = '---------- Begin Simulation Statistics ----------'
|
||||
GEM5STATS_DUMP_TAIL = '---------- End Simulation Statistics ----------'
|
||||
GEM5STATS_ROI_NUMBER = 8
|
||||
|
||||
logger = logging.getLogger('gem5')
|
||||
|
||||
|
||||
def iter_statistics_dump(stats_file):
|
||||
'''
|
||||
Yields statistics dumps as dicts. The parameter is assumed to be a stream
|
||||
reading from the statistics log file.
|
||||
'''
|
||||
cur_dump = {}
|
||||
while True:
|
||||
line = stats_file.readline()
|
||||
if not line:
|
||||
break
|
||||
if GEM5STATS_DUMP_TAIL in line:
|
||||
yield cur_dump
|
||||
cur_dump = {}
|
||||
else:
|
||||
res = GEM5STATS_FIELD_REGEX.match(line)
|
||||
if res:
|
||||
k = res.group("key")
|
||||
vtext = res.group("value")
|
||||
try:
|
||||
v = list(map(numeric, vtext.split()))
|
||||
cur_dump[k] = v[0] if len(v)==1 else set(v)
|
||||
except ValueError:
|
||||
msg = 'Found non-numeric entry in gem5 stats ({}: {})'
|
||||
logger.warning(msg.format(k, vtext))
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
# Copyright 2013-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -29,14 +29,22 @@ import subprocess
|
||||
import pkgutil
|
||||
import logging
|
||||
import random
|
||||
import ctypes
|
||||
import threading
|
||||
from operator import itemgetter
|
||||
from itertools import groupby
|
||||
from functools import partial
|
||||
|
||||
import wrapt
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.exception import HostError, TimeoutError
|
||||
from functools import reduce
|
||||
|
||||
|
||||
# ABI --> architectures list
|
||||
ABI_MAP = {
|
||||
'armeabi': ['armeabi', 'armv7', 'armv7l', 'armv7el', 'armv7lh'],
|
||||
'armeabi': ['armeabi', 'armv7', 'armv7l', 'armv7el', 'armv7lh', 'armeabi-v7a'],
|
||||
'arm64': ['arm64', 'armv8', 'arm64-v8a', 'aarch64'],
|
||||
}
|
||||
|
||||
@@ -55,21 +63,38 @@ CPU_PART_MAP = {
|
||||
0xc07: {None: 'A7'},
|
||||
0xc08: {None: 'A8'},
|
||||
0xc09: {None: 'A9'},
|
||||
0xc0e: {None: 'A17'},
|
||||
0xc0f: {None: 'A15'},
|
||||
0xc14: {None: 'R4'},
|
||||
0xc15: {None: 'R5'},
|
||||
0xc17: {None: 'R7'},
|
||||
0xc18: {None: 'R8'},
|
||||
0xc20: {None: 'M0'},
|
||||
0xc60: {None: 'M0+'},
|
||||
0xc21: {None: 'M1'},
|
||||
0xc23: {None: 'M3'},
|
||||
0xc24: {None: 'M4'},
|
||||
0xc27: {None: 'M7'},
|
||||
0xd01: {None: 'A32'},
|
||||
0xd03: {None: 'A53'},
|
||||
0xd04: {None: 'A35'},
|
||||
0xd07: {None: 'A57'},
|
||||
0xd08: {None: 'A72'},
|
||||
0xd09: {None: 'A73'},
|
||||
},
|
||||
0x42: { # Broadcom
|
||||
0x516: {None: 'Vulcan'},
|
||||
},
|
||||
0x43: { # Cavium
|
||||
0x0a1: {None: 'Thunderx'},
|
||||
0x0a2: {None: 'Thunderx81xx'},
|
||||
},
|
||||
0x4e: { # Nvidia
|
||||
0x0: {None: 'Denver'},
|
||||
},
|
||||
0x50: { # AppliedMicro
|
||||
0x0: {None: 'xgene'},
|
||||
},
|
||||
0x51: { # Qualcomm
|
||||
0x02d: {None: 'Scorpion'},
|
||||
0x04d: {None: 'MSM8960'},
|
||||
@@ -77,6 +102,12 @@ CPU_PART_MAP = {
|
||||
0x2: 'Krait400',
|
||||
0x3: 'Krait450',
|
||||
},
|
||||
0x205: {0x1: 'KryoSilver'},
|
||||
0x211: {0x1: 'KryoGold'},
|
||||
0x800: {None: 'Falkor'},
|
||||
},
|
||||
0x53: { # Samsung LSI
|
||||
0x001: {0x1: 'MongooseM1'},
|
||||
},
|
||||
0x56: { # Marvell
|
||||
0x131: {
|
||||
@@ -107,25 +138,13 @@ def preexec_function():
|
||||
|
||||
|
||||
check_output_logger = logging.getLogger('check_output')
|
||||
# Popen is not thread safe. If two threads attempt to call it at the same time,
|
||||
# one may lock up. See https://bugs.python.org/issue12739.
|
||||
check_output_lock = threading.Lock()
|
||||
|
||||
|
||||
# Defined here rather than in devlib.exceptions due to module load dependencies
|
||||
class TimeoutError(Exception):
|
||||
"""Raised when a subprocess command times out. This is basically a ``WAError``-derived version
|
||||
of ``subprocess.CalledProcessError``, the thinking being that while a timeout could be due to
|
||||
programming error (e.g. not setting long enough timers), it is often due to some failure in the
|
||||
environment, and there fore should be classed as a "user error"."""
|
||||
|
||||
def __init__(self, command, output):
|
||||
super(TimeoutError, self).__init__('Timed out: {}'.format(command))
|
||||
self.command = command
|
||||
self.output = output
|
||||
|
||||
def __str__(self):
|
||||
return '\n'.join([self.message, 'OUTPUT:', self.output or ''])
|
||||
|
||||
|
||||
def check_output(command, timeout=None, ignore=None, inputtext=None, **kwargs):
|
||||
def check_output(command, timeout=None, ignore=None, inputtext=None,
|
||||
combined_output=False, **kwargs):
|
||||
"""This is a version of subprocess.check_output that adds a timeout parameter to kill
|
||||
the subprocess if it does not return within the specified time."""
|
||||
# pylint: disable=too-many-branches
|
||||
@@ -146,9 +165,14 @@ def check_output(command, timeout=None, ignore=None, inputtext=None, **kwargs):
|
||||
except OSError:
|
||||
pass # process may have already terminated.
|
||||
|
||||
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_function, **kwargs)
|
||||
with check_output_lock:
|
||||
stderr = subprocess.STDOUT if combined_output else subprocess.PIPE
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=stderr,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_function,
|
||||
**kwargs)
|
||||
|
||||
if timeout:
|
||||
timer = threading.Timer(timeout, callback, [process.pid, ])
|
||||
@@ -156,6 +180,11 @@ def check_output(command, timeout=None, ignore=None, inputtext=None, **kwargs):
|
||||
|
||||
try:
|
||||
output, error = process.communicate(inputtext)
|
||||
if sys.version_info[0] == 3:
|
||||
# Currently errors=replace is needed as 0x8c throws an error
|
||||
output = output.decode(sys.stdout.encoding, "replace")
|
||||
if error:
|
||||
error = error.decode(sys.stderr.encoding, "replace")
|
||||
finally:
|
||||
if timeout:
|
||||
timer.cancel()
|
||||
@@ -163,9 +192,9 @@ def check_output(command, timeout=None, ignore=None, inputtext=None, **kwargs):
|
||||
retcode = process.poll()
|
||||
if retcode:
|
||||
if retcode == -9: # killed, assume due to timeout callback
|
||||
raise TimeoutError(command, output='\n'.join([output, error]))
|
||||
raise TimeoutError(command, output='\n'.join([output or '', error or '']))
|
||||
elif ignore != 'all' and retcode not in ignore:
|
||||
raise subprocess.CalledProcessError(retcode, command, output='\n'.join([output, error]))
|
||||
raise subprocess.CalledProcessError(retcode, command, output='\n'.join([output or '', error or '']))
|
||||
return output, error
|
||||
|
||||
|
||||
@@ -174,15 +203,35 @@ def walk_modules(path):
|
||||
Given package name, return a list of all modules (including submodules, etc)
|
||||
in that package.
|
||||
|
||||
:raises HostError: if an exception is raised while trying to import one of the
|
||||
modules under ``path``. The exception will have addtional
|
||||
attributes set: ``module`` will be set to the qualified name
|
||||
of the originating module, and ``orig_exc`` will contain
|
||||
the original exception.
|
||||
|
||||
"""
|
||||
root_mod = __import__(path, {}, {}, [''])
|
||||
|
||||
def __try_import(path):
|
||||
try:
|
||||
return __import__(path, {}, {}, [''])
|
||||
except Exception as e:
|
||||
he = HostError('Could not load {}: {}'.format(path, str(e)))
|
||||
he.module = path
|
||||
he.exc_info = sys.exc_info()
|
||||
he.orig_exc = e
|
||||
raise he
|
||||
|
||||
root_mod = __try_import(path)
|
||||
mods = [root_mod]
|
||||
if not hasattr(root_mod, '__path__'):
|
||||
# root is a module not a package -- nothing to walk
|
||||
return mods
|
||||
for _, name, ispkg in pkgutil.iter_modules(root_mod.__path__):
|
||||
submod_path = '.'.join([path, name])
|
||||
if ispkg:
|
||||
mods.extend(walk_modules(submod_path))
|
||||
else:
|
||||
submod = __import__(submod_path, {}, {}, [''])
|
||||
submod = __try_import(submod_path)
|
||||
mods.append(submod)
|
||||
return mods
|
||||
|
||||
@@ -217,8 +266,8 @@ def _merge_two_dicts(base, other, list_duplicates='all', match_types=False, # p
|
||||
dict_type=dict, should_normalize=True, should_merge_lists=True):
|
||||
"""Merge dicts normalizing their keys."""
|
||||
merged = dict_type()
|
||||
base_keys = base.keys()
|
||||
other_keys = other.keys()
|
||||
base_keys = list(base.keys())
|
||||
other_keys = list(other.keys())
|
||||
norm = normalize if should_normalize else lambda x, y: x
|
||||
|
||||
base_only = []
|
||||
@@ -350,7 +399,7 @@ def normalize(value, dict_type=dict):
|
||||
no surrounding whitespace, underscore-delimited strings."""
|
||||
if isinstance(value, dict):
|
||||
normalized = dict_type()
|
||||
for k, v in value.iteritems():
|
||||
for k, v in value.items():
|
||||
key = k.strip().lower().replace(' ', '_')
|
||||
normalized[key] = normalize(v, dict_type)
|
||||
return normalized
|
||||
@@ -382,11 +431,16 @@ def escape_double_quotes(text):
|
||||
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\"', '\\\"')
|
||||
|
||||
|
||||
def escape_spaces(text):
|
||||
"""Escape spaces in the specified text"""
|
||||
return text.replace(' ', '\ ')
|
||||
|
||||
|
||||
def getch(count=1):
|
||||
"""Read ``count`` characters from standard input."""
|
||||
if os.name == 'nt':
|
||||
import msvcrt # pylint: disable=F0401
|
||||
return ''.join([msvcrt.getch() for _ in xrange(count)])
|
||||
return ''.join([msvcrt.getch() for _ in range(count)])
|
||||
else: # assume Unix
|
||||
import tty # NOQA
|
||||
import termios # NOQA
|
||||
@@ -413,6 +467,19 @@ def as_relative(path):
|
||||
return path.lstrip(os.sep)
|
||||
|
||||
|
||||
def commonprefix(file_list, sep=os.sep):
|
||||
"""
|
||||
Find the lowest common base folder of a passed list of files.
|
||||
"""
|
||||
common_path = os.path.commonprefix(file_list)
|
||||
cp_split = common_path.split(sep)
|
||||
other_split = file_list[0].split(sep)
|
||||
last = len(cp_split) - 1
|
||||
if cp_split[last] != other_split[last]:
|
||||
cp_split = cp_split[:-1]
|
||||
return sep.join(cp_split)
|
||||
|
||||
|
||||
def get_cpu_mask(cores):
|
||||
"""Return a string with the hex for the cpu mask for the specified core numbers."""
|
||||
mask = 0
|
||||
@@ -442,8 +509,8 @@ def which(name):
|
||||
return None
|
||||
|
||||
|
||||
_bash_color_regex = re.compile('\x1b\\[[0-9;]+m')
|
||||
|
||||
# This matches most ANSI escape sequences, not just colors
|
||||
_bash_color_regex = re.compile(r'\x1b\[[0-9;]*[a-zA-Z]')
|
||||
|
||||
def strip_bash_colors(text):
|
||||
return _bash_color_regex.sub('', text)
|
||||
@@ -451,7 +518,7 @@ def strip_bash_colors(text):
|
||||
|
||||
def get_random_string(length):
|
||||
"""Returns a random ASCII string of the specified length)."""
|
||||
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(length))
|
||||
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
|
||||
|
||||
|
||||
class LoadSyntaxError(Exception):
|
||||
@@ -468,13 +535,18 @@ class LoadSyntaxError(Exception):
|
||||
|
||||
RAND_MOD_NAME_LEN = 30
|
||||
BAD_CHARS = string.punctuation + string.whitespace
|
||||
TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
|
||||
if sys.version_info[0] == 3:
|
||||
TRANS_TABLE = str.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
|
||||
else:
|
||||
TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
|
||||
|
||||
|
||||
def to_identifier(text):
|
||||
"""Converts text to a valid Python identifier by replacing all
|
||||
whitespace and punctuation."""
|
||||
return re.sub('_+', '_', text.translate(TRANS_TABLE))
|
||||
whitespace and punctuation and adding a prefix if starting with a digit"""
|
||||
if text[:1].isdigit():
|
||||
text = '_' + text
|
||||
return re.sub('_+', '_', str(text).translate(TRANS_TABLE))
|
||||
|
||||
|
||||
def unique(alist):
|
||||
@@ -495,8 +567,8 @@ def ranges_to_list(ranges_string):
|
||||
values = []
|
||||
for rg in ranges_string.split(','):
|
||||
if '-' in rg:
|
||||
first, last = map(int, rg.split('-'))
|
||||
values.extend(xrange(first, last + 1))
|
||||
first, last = list(map(int, rg.split('-')))
|
||||
values.extend(range(first, last + 1))
|
||||
else:
|
||||
values.append(int(rg))
|
||||
return values
|
||||
@@ -505,8 +577,8 @@ def ranges_to_list(ranges_string):
|
||||
def list_to_ranges(values):
|
||||
"""Converts a list, e.g ``[0,2,3,4]``, into a sysfs-style ranges string, e.g. ``"0,2-4"``"""
|
||||
range_groups = []
|
||||
for _, g in groupby(enumerate(values), lambda (i, x): i - x):
|
||||
range_groups.append(map(itemgetter(1), g))
|
||||
for _, g in groupby(enumerate(values), lambda i_x: i_x[0] - i_x[1]):
|
||||
range_groups.append(list(map(itemgetter(1), g)))
|
||||
range_strings = []
|
||||
for group in range_groups:
|
||||
if len(group) == 1:
|
||||
@@ -529,24 +601,55 @@ def mask_to_list(mask):
|
||||
"""Converts the specfied integer bitmask into a list of
|
||||
indexes of bits that are set in the mask."""
|
||||
size = len(bin(mask)) - 2 # because of "0b"
|
||||
return [size - i - 1 for i in xrange(size)
|
||||
return [size - i - 1 for i in range(size)
|
||||
if mask & (1 << size - i - 1)]
|
||||
|
||||
|
||||
__memo_cache = {}
|
||||
|
||||
|
||||
def memoized(func):
|
||||
def reset_memo_cache():
|
||||
__memo_cache.clear()
|
||||
|
||||
|
||||
def __get_memo_id(obj):
|
||||
"""
|
||||
An object's id() may be re-used after an object is freed, so it's not
|
||||
sufficiently unique to identify params for the memo cache (two different
|
||||
params may end up with the same id). this attempts to generate a more unique
|
||||
ID string.
|
||||
"""
|
||||
obj_id = id(obj)
|
||||
try:
|
||||
return '{}/{}'.format(obj_id, hash(obj))
|
||||
except TypeError: # obj is not hashable
|
||||
obj_pyobj = ctypes.cast(obj_id, ctypes.py_object)
|
||||
# TODO: Note: there is still a possibility of a clash here. If Two
|
||||
# different objects get assigned the same ID, an are large and are
|
||||
# identical in the first thirty two bytes. This shouldn't be much of an
|
||||
# issue in the current application of memoizing Target calls, as it's very
|
||||
# unlikely that a target will get passed large params; but may cause
|
||||
# problems in other applications, e.g. when memoizing results of operations
|
||||
# on large arrays. I can't really think of a good way around that apart
|
||||
# form, e.g., md5 hashing the entire raw object, which will have an
|
||||
# undesirable impact on performance.
|
||||
num_bytes = min(ctypes.sizeof(obj_pyobj), 32)
|
||||
obj_bytes = ctypes.string_at(ctypes.addressof(obj_pyobj), num_bytes)
|
||||
return '{}/{}'.format(obj_id, obj_bytes)
|
||||
|
||||
|
||||
@wrapt.decorator
|
||||
def memoized(wrapped, instance, args, kwargs):
|
||||
"""A decorator for memoizing functions and methods."""
|
||||
func_id = repr(func)
|
||||
func_id = repr(wrapped)
|
||||
|
||||
def memoize_wrapper(*args, **kwargs):
|
||||
id_string = func_id + ','.join([str(id(a)) for a in args])
|
||||
id_string = func_id + ','.join([__get_memo_id(a) for a in args])
|
||||
id_string += ','.join('{}={}'.format(k, v)
|
||||
for k, v in kwargs.iteritems())
|
||||
for k, v in kwargs.items())
|
||||
if id_string not in __memo_cache:
|
||||
__memo_cache[id_string] = func(*args, **kwargs)
|
||||
__memo_cache[id_string] = wrapped(*args, **kwargs)
|
||||
return __memo_cache[id_string]
|
||||
|
||||
return memoize_wrapper
|
||||
return memoize_wrapper(*args, **kwargs)
|
||||
|
||||
|
536
devlib/utils/parse_aep.py
Executable file
536
devlib/utils/parse_aep.py
Executable file
@@ -0,0 +1,536 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Copyright 2018 Linaro Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
import subprocess
|
||||
import logging
|
||||
import signal
|
||||
import serial
|
||||
import time
|
||||
import math
|
||||
|
||||
logger = logging.getLogger('aep-parser')
|
||||
|
||||
class AepParser(object):
|
||||
prepared = False
|
||||
|
||||
@staticmethod
|
||||
def topology_from_data(array, topo):
|
||||
# Extract topology information for the data file
|
||||
# The header of a data file looks like this ('#' included):
|
||||
# configuration: <file path>
|
||||
# config_name: <file name>
|
||||
# trigger: 0.400000V (hyst 0.200000V) 0.000000W (hyst 0.200000W) 400us
|
||||
# date: Fri, 10 Jun 2016 11:25:07 +0200
|
||||
# host: <host name>
|
||||
#
|
||||
# CHN_0 Pretty_name_0 PARENT_0 Color0 Class0
|
||||
# CHN_1 Pretty_name_1 PARENT_1 Color1 Class1
|
||||
# CHN_2 Pretty_name_2 PARENT_2 Color2 Class2
|
||||
# CHN_3 Pretty_name_3 PARENT_3 Color3 Class3
|
||||
# ..
|
||||
# CHN_N Pretty_name_N PARENT_N ColorN ClassN
|
||||
#
|
||||
|
||||
info = {}
|
||||
|
||||
if len(array) == 6:
|
||||
info['name'] = array[1]
|
||||
info['parent'] = array[3]
|
||||
info['pretty'] = array[2]
|
||||
# add an entry for both name and pretty name in order to not parse
|
||||
# the whole dict when looking for a parent and the parent of parent
|
||||
topo[array[1]] = info
|
||||
topo[array[2]] = info
|
||||
return topo
|
||||
|
||||
@staticmethod
|
||||
def create_virtual(topo, label, hide, duplicate):
|
||||
# Create a list of virtual power domain that are the sum of others
|
||||
# A virtual domain is the parent of several channels but is not sampled by a
|
||||
# channel
|
||||
# This can be useful if a power domain is supplied by 2 power rails
|
||||
virtual = {}
|
||||
|
||||
# Create an entry for each virtual parent
|
||||
for supply in topo.keys():
|
||||
index = topo[supply]['index']
|
||||
# Don't care of hidden columns
|
||||
if hide[index]:
|
||||
continue
|
||||
|
||||
# Parent is in the topology
|
||||
parent = topo[supply]['parent']
|
||||
if parent in topo:
|
||||
continue
|
||||
|
||||
if parent not in virtual:
|
||||
virtual[parent] = { supply : index }
|
||||
|
||||
virtual[parent][supply] = index
|
||||
|
||||
# Remove parent with 1 child as they don't give more information than their
|
||||
# child
|
||||
for supply in list(virtual.keys()):
|
||||
if len(virtual[supply]) == 1:
|
||||
del virtual[supply];
|
||||
|
||||
for supply in list(virtual.keys()):
|
||||
# Add label, hide and duplicate columns for virtual domains
|
||||
hide.append(0)
|
||||
duplicate.append(1)
|
||||
label.append(supply)
|
||||
|
||||
return virtual
|
||||
|
||||
@staticmethod
|
||||
def get_label(array):
|
||||
# Get the label of each column
|
||||
# Remove unit '(X)' from the end of the label
|
||||
label = [""]*len(array)
|
||||
unit = [""]*len(array)
|
||||
|
||||
label[0] = array[0]
|
||||
unit[0] = "(S)"
|
||||
for i in range(1,len(array)):
|
||||
label[i] = array[i][:-3]
|
||||
unit[i] = array[i][-3:]
|
||||
|
||||
return label, unit
|
||||
|
||||
@staticmethod
|
||||
def filter_column(label, unit, topo):
|
||||
# Filter columns
|
||||
# We don't parse Volt and Amper columns: put in hide list
|
||||
# We don't add in Total a column that is the child of another one: put in duplicate list
|
||||
|
||||
# By default we hide all columns
|
||||
hide = [1] * len(label)
|
||||
# By default we assume that there is no child
|
||||
duplicate = [0] * len(label)
|
||||
|
||||
for i in range(len(label)):
|
||||
# We only care about time and Watt
|
||||
if label[i] == 'time':
|
||||
hide[i] = 0
|
||||
continue
|
||||
|
||||
if '(W)' not in unit[i]:
|
||||
continue
|
||||
|
||||
hide[i] = 0
|
||||
|
||||
#label is pretty name
|
||||
pretty = label[i]
|
||||
|
||||
# We don't add a power domain that is already accounted by its parent
|
||||
if topo[pretty]['parent'] in topo:
|
||||
duplicate[i] = 1
|
||||
|
||||
# Set index, that will be used by virtual domain
|
||||
topo[topo[pretty]['name']]['index'] = i
|
||||
|
||||
# remove pretty element that is useless now
|
||||
del topo[pretty]
|
||||
|
||||
return hide, duplicate
|
||||
|
||||
@staticmethod
|
||||
def parse_text(array, hide):
|
||||
data = [0]*len(array)
|
||||
for i in range(len(array)):
|
||||
if hide[i]:
|
||||
continue
|
||||
|
||||
try:
|
||||
data[i] = int(float(array[i])*1000000)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def add_virtual_data(data, virtual):
|
||||
# write virtual domain
|
||||
for parent in virtual.keys():
|
||||
power = 0
|
||||
for child in list(virtual[parent].values()):
|
||||
try:
|
||||
power += data[child]
|
||||
except IndexError:
|
||||
continue
|
||||
data.append(power)
|
||||
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def delta_nrj(array, delta, min, max, hide):
|
||||
# Compute the energy consumed in this time slice and add it
|
||||
# delta[0] is used to save the last time stamp
|
||||
|
||||
if (delta[0] < 0):
|
||||
delta[0] = array[0]
|
||||
|
||||
time = array[0] - delta[0]
|
||||
if (time <= 0):
|
||||
return delta
|
||||
|
||||
for i in range(len(array)):
|
||||
if hide[i]:
|
||||
continue
|
||||
|
||||
try:
|
||||
data = array[i]
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if (data < min[i]):
|
||||
min[i] = data
|
||||
if (data > max[i]):
|
||||
max[i] = data
|
||||
delta[i] += time * data
|
||||
|
||||
# save last time stamp
|
||||
delta[0] = array[0]
|
||||
|
||||
return delta
|
||||
|
||||
def output_label(self, label, hide):
|
||||
self.fo.write(label[0]+"(uS)")
|
||||
for i in range(1, len(label)):
|
||||
if hide[i]:
|
||||
continue
|
||||
self.fo.write(" "+label[i]+"(uW)")
|
||||
|
||||
self.fo.write("\n")
|
||||
|
||||
def output_power(self, array, hide):
|
||||
#skip partial line. Most probably the last one
|
||||
if len(array) < len(hide):
|
||||
return
|
||||
|
||||
# write not hidden colums
|
||||
self.fo.write(str(array[0]))
|
||||
for i in range(1, len(array)):
|
||||
if hide[i]:
|
||||
continue
|
||||
|
||||
self.fo.write(" "+str(array[i]))
|
||||
|
||||
self.fo.write("\n")
|
||||
|
||||
def prepare(self, infile, outfile, summaryfile):
|
||||
|
||||
try:
|
||||
self.fi = open(infile, "r")
|
||||
except IOError:
|
||||
logger.warn('Unable to open input file {}'.format(infile))
|
||||
logger.warn('Usage: parse_arp.py -i <inputfile> [-o <outputfile>]')
|
||||
sys.exit(2)
|
||||
|
||||
self.parse = True
|
||||
if len(outfile) > 0:
|
||||
try:
|
||||
self.fo = open(outfile, "w")
|
||||
except IOError:
|
||||
logger.warn('Unable to create {}'.format(outfile))
|
||||
self.parse = False
|
||||
else:
|
||||
self.parse = False
|
||||
|
||||
self.summary = True
|
||||
if len(summaryfile) > 0:
|
||||
try:
|
||||
self.fs = open(summaryfile, "w")
|
||||
except IOError:
|
||||
logger.warn('Unable to create {}'.format(summaryfile))
|
||||
self.fs = sys.stdout
|
||||
else:
|
||||
self.fs = sys.stdout
|
||||
|
||||
self.prepared = True
|
||||
|
||||
def unprepare(self):
|
||||
if not self.prepared:
|
||||
# nothing has been prepared
|
||||
return
|
||||
|
||||
self.fi.close()
|
||||
|
||||
if self.parse:
|
||||
self.fo.close()
|
||||
|
||||
self.prepared = False
|
||||
|
||||
def parse_aep(self, start=0, lenght=-1):
|
||||
# Parse aep data and calculate the energy consumed
|
||||
begin = 0
|
||||
|
||||
label_line = 1
|
||||
|
||||
topo = {}
|
||||
|
||||
lines = self.fi.readlines()
|
||||
|
||||
for myline in lines:
|
||||
array = myline.split()
|
||||
|
||||
if "#" in myline:
|
||||
# update power topology
|
||||
topo = self.topology_from_data(array, topo)
|
||||
continue
|
||||
|
||||
if label_line:
|
||||
label_line = 0
|
||||
# 1st line not starting with # gives label of each column
|
||||
label, unit = self.get_label(array)
|
||||
# hide useless columns and detect channels that are children
|
||||
# of other channels
|
||||
hide, duplicate = self.filter_column(label, unit, topo)
|
||||
|
||||
# Create virtual power domains
|
||||
virtual = self.create_virtual(topo, label, hide, duplicate)
|
||||
if self.parse:
|
||||
self.output_label(label, hide)
|
||||
|
||||
logger.debug('Topology : {}'.format(topo))
|
||||
logger.debug('Virtual power domain : {}'.format(virtual))
|
||||
logger.debug('Duplicated power domain : : {}'.format(duplicate))
|
||||
logger.debug('Name of columns : {}'.format(label))
|
||||
logger.debug('Hidden columns : {}'.format(hide))
|
||||
logger.debug('Unit of columns : {}'.format(unit))
|
||||
|
||||
# Init arrays
|
||||
nrj = [0]*len(label)
|
||||
min = [100000000]*len(label)
|
||||
max = [0]*len(label)
|
||||
offset = [0]*len(label)
|
||||
|
||||
continue
|
||||
|
||||
# convert text to int and unit to micro-unit
|
||||
data = self.parse_text(array, hide)
|
||||
|
||||
# get 1st time stamp
|
||||
if begin <= 0:
|
||||
being = data[0]
|
||||
|
||||
# skip data before start
|
||||
if (data[0]-begin) < start:
|
||||
continue
|
||||
|
||||
# stop after lenght
|
||||
if lenght >= 0 and (data[0]-begin) > (start + lenght):
|
||||
continue
|
||||
|
||||
# add virtual domains
|
||||
data = self.add_virtual_data(data, virtual)
|
||||
|
||||
# extract power figures
|
||||
self.delta_nrj(data, nrj, min, max, hide)
|
||||
|
||||
# write data into new file
|
||||
if self.parse:
|
||||
self.output_power(data, hide)
|
||||
|
||||
# if there is no data just return
|
||||
if label_line or len(nrj) == 1:
|
||||
raise ValueError('No data found in the data file. Please check the Arm Energy Probe')
|
||||
return
|
||||
|
||||
# display energy consumption of each channel and total energy consumption
|
||||
total = 0
|
||||
results_table = {}
|
||||
for i in range(1, len(nrj)):
|
||||
if hide[i]:
|
||||
continue
|
||||
|
||||
nrj[i] -= offset[i] * nrj[0]
|
||||
|
||||
total_nrj = nrj[i]/1000000000000.0
|
||||
duration = (max[0]-min[0])/1000000.0
|
||||
channel_name = label[i]
|
||||
average_power = total_nrj/duration
|
||||
|
||||
self.fs.write("Total nrj: %8.3f J for %s -- duration %8.3f sec -- min %8.3f W -- max %8.3f W\n" % (nrj[i]/1000000000000.0, label[i], (max[0]-min[0])/1000000.0, min[i]/1000000.0, max[i]/1000000.0))
|
||||
|
||||
# store each AEP channel info except Platform in the results table
|
||||
results_table[channel_name] = total_nrj, average_power
|
||||
|
||||
if (min[i] < offset[i]):
|
||||
self.fs.write ("!!! Min below offset\n")
|
||||
|
||||
if duplicate[i]:
|
||||
continue
|
||||
|
||||
total += nrj[i]
|
||||
|
||||
self.fs.write ("Total nrj: %8.3f J for %s -- duration %8.3f sec\n" % (total/1000000000000.0, "Platform ", (max[0]-min[0])/1000000.0))
|
||||
|
||||
total_nrj = total/1000000000000.0
|
||||
duration = (max[0]-min[0])/1000000.0
|
||||
average_power = total_nrj/duration
|
||||
|
||||
# store AEP Platform channel info in the results table
|
||||
results_table["Platform"] = total_nrj, average_power
|
||||
|
||||
return results_table
|
||||
|
||||
def topology_from_config(self, topofile):
|
||||
try:
|
||||
ft = open(topofile, "r")
|
||||
except IOError:
|
||||
logger.warn('Unable to open config file {}'.format(topofile))
|
||||
return
|
||||
lines = ft.readlines()
|
||||
|
||||
topo = {}
|
||||
virtual = {}
|
||||
name = ""
|
||||
offset = 0
|
||||
index = 0
|
||||
#parse config file
|
||||
for myline in lines:
|
||||
if myline.startswith("#"):
|
||||
# skip comment
|
||||
continue
|
||||
|
||||
if myline == "\n":
|
||||
# skip empty line
|
||||
continue
|
||||
|
||||
if name == "":
|
||||
# 1st valid line is the config's name
|
||||
name = myline
|
||||
continue
|
||||
|
||||
if not myline.startswith((' ', '\t')):
|
||||
# new device path
|
||||
offset = index
|
||||
continue
|
||||
|
||||
# Get parameters of channel configuration
|
||||
items = myline.split()
|
||||
|
||||
info = {}
|
||||
info['name'] = items[0]
|
||||
info['parent'] = items[9]
|
||||
info['pretty'] = items[8]
|
||||
info['index'] = int(items[2])+offset
|
||||
|
||||
# Add channel
|
||||
topo[items[0]] = info
|
||||
|
||||
# Increase index
|
||||
index +=1
|
||||
|
||||
|
||||
# Create an entry for each virtual parent
|
||||
for supply in topo.keys():
|
||||
# Parent is in the topology
|
||||
parent = topo[supply]['parent']
|
||||
if parent in topo:
|
||||
continue
|
||||
|
||||
if parent not in virtual:
|
||||
virtual[parent] = { supply : topo[supply]['index'] }
|
||||
|
||||
virtual[parent][supply] = topo[supply]['index']
|
||||
|
||||
|
||||
# Remove parent with 1 child as they don't give more information than their
|
||||
# child
|
||||
for supply in list(virtual.keys()):
|
||||
if len(virtual[supply]) == 1:
|
||||
del virtual[supply];
|
||||
|
||||
topo_list = ['']*(1+len(topo)+len(virtual))
|
||||
topo_list[0] = 'time'
|
||||
for chnl in topo.keys():
|
||||
topo_list[topo[chnl]['index']] = chnl
|
||||
for chnl in virtual.keys():
|
||||
index +=1
|
||||
topo_list[index] = chnl
|
||||
|
||||
ft.close()
|
||||
|
||||
return topo_list
|
||||
|
||||
def __del__(self):
|
||||
self.unprepare()
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
def handleSigTERM(signum, frame):
|
||||
sys.exit(2)
|
||||
|
||||
signal.signal(signal.SIGTERM, handleSigTERM)
|
||||
signal.signal(signal.SIGINT, handleSigTERM)
|
||||
|
||||
logger.setLevel(logging.WARN)
|
||||
ch = logging.StreamHandler()
|
||||
ch.setLevel(logging.DEBUG)
|
||||
logger.addHandler(ch)
|
||||
|
||||
infile = ""
|
||||
outfile = ""
|
||||
figurefile = ""
|
||||
start = 0
|
||||
lenght = -1
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "i:vo:s:l:t:")
|
||||
except getopt.GetoptError as err:
|
||||
print(str(err)) # will print something like "option -a not recognized"
|
||||
sys.exit(2)
|
||||
|
||||
for o, a in opts:
|
||||
if o == "-i":
|
||||
infile = a
|
||||
if o == "-v":
|
||||
logger.setLevel(logging.DEBUG)
|
||||
if o == "-o":
|
||||
parse = True
|
||||
outfile = a
|
||||
if o == "-s":
|
||||
start = int(float(a)*1000000)
|
||||
if o == "-l":
|
||||
lenght = int(float(a)*1000000)
|
||||
if o == "-t":
|
||||
topofile = a
|
||||
parser = AepParser()
|
||||
print(parser.topology_from_config(topofile))
|
||||
exit(0)
|
||||
|
||||
parser = AepParser()
|
||||
parser.prepare(infile, outfile, figurefile)
|
||||
parser.parse_aep(start, lenght)
|
272
devlib/utils/rendering.py
Normal file
272
devlib/utils/rendering.py
Normal file
@@ -0,0 +1,272 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
from collections import namedtuple, OrderedDict
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from devlib.exception import WorkerThreadError, TargetNotRespondingError, TimeoutError
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
|
||||
|
||||
logger = logging.getLogger('rendering')
|
||||
|
||||
SurfaceFlingerFrame = namedtuple('SurfaceFlingerFrame',
|
||||
'desired_present_time actual_present_time frame_ready_time')
|
||||
|
||||
VSYNC_INTERVAL = 16666667
|
||||
|
||||
|
||||
class FrameCollector(threading.Thread):
|
||||
|
||||
def __init__(self, target, period):
|
||||
super(FrameCollector, self).__init__()
|
||||
self.target = target
|
||||
self.period = period
|
||||
self.stop_signal = threading.Event()
|
||||
self.frames = []
|
||||
|
||||
self.temp_file = None
|
||||
self.refresh_period = None
|
||||
self.drop_threshold = None
|
||||
self.unresponsive_count = 0
|
||||
self.last_ready_time = None
|
||||
self.exc = None
|
||||
self.header = None
|
||||
|
||||
def run(self):
|
||||
logger.debug('Surface flinger frame data collection started.')
|
||||
try:
|
||||
self.stop_signal.clear()
|
||||
fd, self.temp_file = tempfile.mkstemp()
|
||||
logger.debug('temp file: {}'.format(self.temp_file))
|
||||
wfh = os.fdopen(fd, 'wb')
|
||||
try:
|
||||
while not self.stop_signal.is_set():
|
||||
self.collect_frames(wfh)
|
||||
time.sleep(self.period)
|
||||
finally:
|
||||
wfh.close()
|
||||
except (TargetNotRespondingError, TimeoutError): # pylint: disable=W0703
|
||||
raise
|
||||
except Exception as e: # pylint: disable=W0703
|
||||
logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e))
|
||||
self.exc = WorkerThreadError(self.name, sys.exc_info())
|
||||
logger.debug('Surface flinger frame data collection stopped.')
|
||||
|
||||
def stop(self):
|
||||
self.stop_signal.set()
|
||||
self.join()
|
||||
if self.unresponsive_count:
|
||||
message = 'FrameCollector was unrepsonsive {} times.'.format(self.unresponsive_count)
|
||||
if self.unresponsive_count > 10:
|
||||
logger.warning(message)
|
||||
else:
|
||||
logger.debug(message)
|
||||
if self.exc:
|
||||
raise self.exc # pylint: disable=E0702
|
||||
|
||||
def process_frames(self, outfile=None):
|
||||
if not self.temp_file:
|
||||
raise RuntimeError('Attempting to process frames before running the collector')
|
||||
with open(self.temp_file) as fh:
|
||||
self._process_raw_file(fh)
|
||||
if outfile:
|
||||
shutil.copy(self.temp_file, outfile)
|
||||
os.unlink(self.temp_file)
|
||||
self.temp_file = None
|
||||
|
||||
def write_frames(self, outfile, columns=None):
|
||||
if columns is None:
|
||||
header = self.header
|
||||
frames = self.frames
|
||||
else:
|
||||
indexes = []
|
||||
for c in columns:
|
||||
if c not in self.header:
|
||||
msg = 'Invalid column "{}"; must be in {}'
|
||||
raise ValueError(msg.format(c, self.header))
|
||||
indexes.append(self.header.index(c))
|
||||
frames = [[f[i] for i in indexes] for f in self.frames]
|
||||
header = columns
|
||||
with csvwriter(outfile) as writer:
|
||||
if header:
|
||||
writer.writerow(header)
|
||||
writer.writerows(frames)
|
||||
|
||||
def collect_frames(self, wfh):
|
||||
raise NotImplementedError()
|
||||
|
||||
def clear(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _process_raw_file(self, fh):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class SurfaceFlingerFrameCollector(FrameCollector):
|
||||
|
||||
def __init__(self, target, period, view, header=None):
|
||||
super(SurfaceFlingerFrameCollector, self).__init__(target, period)
|
||||
self.view = view
|
||||
self.header = header or SurfaceFlingerFrame._fields
|
||||
|
||||
def collect_frames(self, wfh):
|
||||
for activity in self.list():
|
||||
if activity == self.view:
|
||||
wfh.write(self.get_latencies(activity))
|
||||
|
||||
def clear(self):
|
||||
self.target.execute('dumpsys SurfaceFlinger --latency-clear ')
|
||||
|
||||
def get_latencies(self, activity):
|
||||
cmd = 'dumpsys SurfaceFlinger --latency "{}"'
|
||||
return self.target.execute(cmd.format(activity))
|
||||
|
||||
def list(self):
|
||||
text = self.target.execute('dumpsys SurfaceFlinger --list')
|
||||
return text.replace('\r\n', '\n').replace('\r', '\n').split('\n')
|
||||
|
||||
def _process_raw_file(self, fh):
|
||||
text = fh.read().replace('\r\n', '\n').replace('\r', '\n')
|
||||
for line in text.split('\n'):
|
||||
line = line.strip()
|
||||
if line:
|
||||
self._process_trace_line(line)
|
||||
|
||||
def _process_trace_line(self, line):
|
||||
parts = line.split()
|
||||
if len(parts) == 3:
|
||||
frame = SurfaceFlingerFrame(*list(map(int, parts)))
|
||||
if not frame.frame_ready_time:
|
||||
return # "null" frame
|
||||
if frame.frame_ready_time <= self.last_ready_time:
|
||||
return # duplicate frame
|
||||
if (frame.frame_ready_time - frame.desired_present_time) > self.drop_threshold:
|
||||
logger.debug('Dropping bogus frame {}.'.format(line))
|
||||
return # bogus data
|
||||
self.last_ready_time = frame.frame_ready_time
|
||||
self.frames.append(frame)
|
||||
elif len(parts) == 1:
|
||||
self.refresh_period = int(parts[0])
|
||||
self.drop_threshold = self.refresh_period * 1000
|
||||
elif 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
|
||||
self.unresponsive_count += 1
|
||||
else:
|
||||
logger.warning('Unexpected SurfaceFlinger dump output: {}'.format(line))
|
||||
|
||||
|
||||
def read_gfxinfo_columns(target):
|
||||
output = target.execute('dumpsys gfxinfo --list framestats')
|
||||
lines = iter(output.split('\n'))
|
||||
for line in lines:
|
||||
if line.startswith('---PROFILEDATA---'):
|
||||
break
|
||||
columns_line = next(lines)
|
||||
return columns_line.split(',')[:-1] # has a trailing ','
|
||||
|
||||
|
||||
class GfxinfoFrameCollector(FrameCollector):
|
||||
|
||||
def __init__(self, target, period, package, header=None):
|
||||
super(GfxinfoFrameCollector, self).__init__(target, period)
|
||||
self.package = package
|
||||
self.header = None
|
||||
self._init_header(header)
|
||||
|
||||
def collect_frames(self, wfh):
|
||||
cmd = 'dumpsys gfxinfo {} framestats'
|
||||
wfh.write(self.target.execute(cmd.format(self.package)))
|
||||
|
||||
def clear(self):
|
||||
pass
|
||||
|
||||
def _init_header(self, header):
|
||||
if header is not None:
|
||||
self.header = header
|
||||
else:
|
||||
self.header = read_gfxinfo_columns(self.target)
|
||||
|
||||
def _process_raw_file(self, fh):
|
||||
found = False
|
||||
try:
|
||||
last_vsync = 0
|
||||
while True:
|
||||
for line in fh:
|
||||
if line.startswith('---PROFILEDATA---'):
|
||||
found = True
|
||||
break
|
||||
|
||||
next(fh) # headers
|
||||
for line in fh:
|
||||
if line.startswith('---PROFILEDATA---'):
|
||||
break
|
||||
entries = list(map(int, line.strip().split(',')[:-1])) # has a trailing ','
|
||||
if entries[1] <= last_vsync:
|
||||
continue # repeat frame
|
||||
last_vsync = entries[1]
|
||||
self.frames.append(entries)
|
||||
except StopIteration:
|
||||
pass
|
||||
if not found:
|
||||
logger.warning('Could not find frames data in gfxinfo output')
|
||||
return
|
||||
|
||||
|
||||
def _file_reverse_iter(fh, buf_size=1024):
|
||||
fh.seek(0, os.SEEK_END)
|
||||
offset = 0
|
||||
file_size = remaining_size = fh.tell()
|
||||
while remaining_size > 0:
|
||||
offset = min(file_size, offset + buf_size)
|
||||
fh.seek(file_size - offset)
|
||||
buf = fh.read(min(remaining_size, buf_size))
|
||||
remaining_size -= buf_size
|
||||
yield buf
|
||||
|
||||
|
||||
def gfxinfo_get_last_dump(filepath):
|
||||
"""
|
||||
Return the last gfxinfo dump from the frame collector's raw output.
|
||||
|
||||
"""
|
||||
record = ''
|
||||
with open(filepath, 'r') as fh:
|
||||
fh_iter = _file_reverse_iter(fh)
|
||||
try:
|
||||
while True:
|
||||
buf = next(fh_iter)
|
||||
ix = buf.find('** Graphics')
|
||||
if ix >= 0:
|
||||
return buf[ix:] + record
|
||||
|
||||
ix = buf.find(' **\n')
|
||||
if ix >= 0:
|
||||
buf = next(fh_iter) + buf
|
||||
ix = buf.find('** Graphics')
|
||||
if ix < 0:
|
||||
msg = '"{}" appears to be corrupted'
|
||||
raise RuntimeError(msg.format(filepath))
|
||||
return buf[ix:] + record
|
||||
record = buf + record
|
||||
except StopIteration:
|
||||
pass
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
# Copyright 2013-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -32,6 +32,14 @@ from pexpect import EOF, TIMEOUT # NOQA pylint: disable=W0611
|
||||
from devlib.exception import HostError
|
||||
|
||||
|
||||
class SerialLogger(Logger):
|
||||
|
||||
write = Logger.debug
|
||||
|
||||
def flush(self):
|
||||
pass
|
||||
|
||||
|
||||
def pulse_dtr(conn, state=True, duration=0.1):
|
||||
"""Set the DTR line of the specified serial connection to the specified state
|
||||
for the specified duration (note: the initial state of the line is *not* checked."""
|
||||
@@ -40,19 +48,19 @@ def pulse_dtr(conn, state=True, duration=0.1):
|
||||
conn.setDTR(not state)
|
||||
|
||||
|
||||
def get_connection(timeout, init_dtr=None, logcls=Logger,
|
||||
*args, **kwargs):
|
||||
def get_connection(timeout, init_dtr=None, logcls=SerialLogger,
|
||||
logfile=None, *args, **kwargs):
|
||||
if init_dtr is not None:
|
||||
kwargs['dsrdtr'] = True
|
||||
try:
|
||||
conn = serial.Serial(*args, **kwargs)
|
||||
except serial.SerialException as e:
|
||||
raise HostError(e.message)
|
||||
raise HostError(str(e))
|
||||
if init_dtr is not None:
|
||||
conn.setDTR(init_dtr)
|
||||
conn.nonblocking()
|
||||
conn.flushOutput()
|
||||
target = fdpexpect.fdspawn(conn.fileno(), timeout=timeout)
|
||||
target = fdpexpect.fdspawn(conn.fileno(), timeout=timeout, logfile=logfile)
|
||||
target.logfile_read = logcls('read')
|
||||
target.logfile_send = logcls('send')
|
||||
|
||||
@@ -83,7 +91,7 @@ def write_characters(conn, line, delay=0.05):
|
||||
|
||||
@contextmanager
|
||||
def open_serial_connection(timeout, get_conn=False, init_dtr=None,
|
||||
logcls=Logger, *args, **kwargs):
|
||||
logcls=SerialLogger, *args, **kwargs):
|
||||
"""
|
||||
Opens a serial connection to a device.
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -22,6 +22,9 @@ import re
|
||||
import threading
|
||||
import tempfile
|
||||
import shutil
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
|
||||
import pexpect
|
||||
from distutils.version import StrictVersion as V
|
||||
@@ -32,31 +35,44 @@ else:
|
||||
from pexpect import EOF, TIMEOUT, spawn
|
||||
|
||||
from devlib.exception import HostError, TargetError, TimeoutError
|
||||
from devlib.utils.misc import which, strip_bash_colors, escape_single_quotes, check_output
|
||||
from devlib.utils.misc import which, strip_bash_colors, check_output
|
||||
from devlib.utils.misc import (escape_single_quotes, escape_double_quotes,
|
||||
escape_spaces)
|
||||
from devlib.utils.types import boolean
|
||||
|
||||
|
||||
ssh = None
|
||||
scp = None
|
||||
sshpass = None
|
||||
|
||||
|
||||
logger = logging.getLogger('ssh')
|
||||
gem5_logger = logging.getLogger('gem5-connection')
|
||||
|
||||
|
||||
def ssh_get_shell(host, username, password=None, keyfile=None, port=None, timeout=10, telnet=False):
|
||||
def ssh_get_shell(host, username, password=None, keyfile=None, port=None, timeout=10, telnet=False, original_prompt=None):
|
||||
_check_env()
|
||||
if telnet:
|
||||
if keyfile:
|
||||
raise ValueError('keyfile may not be used with a telnet connection.')
|
||||
conn = TelnetConnection()
|
||||
else: # ssh
|
||||
conn = pxssh.pxssh()
|
||||
try:
|
||||
if keyfile:
|
||||
conn.login(host, username, ssh_key=keyfile, port=port, login_timeout=timeout)
|
||||
else:
|
||||
conn.login(host, username, password, port=port, login_timeout=timeout)
|
||||
except EOF:
|
||||
raise TargetError('Could not connect to {}; is the host name correct?'.format(host))
|
||||
start_time = time.time()
|
||||
while True:
|
||||
if telnet:
|
||||
if keyfile:
|
||||
raise ValueError('keyfile may not be used with a telnet connection.')
|
||||
conn = TelnetPxssh(original_prompt=original_prompt)
|
||||
else: # ssh
|
||||
conn = pxssh.pxssh()
|
||||
|
||||
try:
|
||||
if keyfile:
|
||||
conn.login(host, username, ssh_key=keyfile, port=port, login_timeout=timeout)
|
||||
else:
|
||||
conn.login(host, username, password, port=port, login_timeout=timeout)
|
||||
break
|
||||
except EOF:
|
||||
timeout -= time.time() - start_time
|
||||
if timeout <= 0:
|
||||
message = 'Could not connect to {}; is the host name correct?'
|
||||
raise TargetError(message.format(host))
|
||||
time.sleep(5)
|
||||
|
||||
conn.setwinsize(500,200)
|
||||
conn.sendline('')
|
||||
conn.prompt()
|
||||
@@ -64,23 +80,37 @@ def ssh_get_shell(host, username, password=None, keyfile=None, port=None, timeou
|
||||
return conn
|
||||
|
||||
|
||||
class TelnetConnection(pxssh.pxssh):
|
||||
class TelnetPxssh(pxssh.pxssh):
|
||||
# pylint: disable=arguments-differ
|
||||
|
||||
def login(self, server, username, password='', original_prompt=r'[#$]', login_timeout=10,
|
||||
auto_prompt_reset=True, sync_multiplier=1):
|
||||
cmd = 'telnet -l {} {}'.format(username, server)
|
||||
def __init__(self, original_prompt):
|
||||
super(TelnetPxssh, self).__init__()
|
||||
self.original_prompt = original_prompt or r'[#$]'
|
||||
|
||||
def login(self, server, username, password='', login_timeout=10,
|
||||
auto_prompt_reset=True, sync_multiplier=1, port=23):
|
||||
args = ['telnet']
|
||||
if username is not None:
|
||||
args += ['-l', username]
|
||||
args += [server, str(port)]
|
||||
cmd = ' '.join(args)
|
||||
|
||||
spawn._spawn(self, cmd) # pylint: disable=protected-access
|
||||
i = self.expect('(?i)(?:password)', timeout=login_timeout)
|
||||
if i == 0:
|
||||
self.sendline(password)
|
||||
i = self.expect([original_prompt, 'Login incorrect'], timeout=login_timeout)
|
||||
else:
|
||||
raise pxssh.ExceptionPxssh('could not log in: did not see a password prompt')
|
||||
|
||||
if i:
|
||||
raise pxssh.ExceptionPxssh('could not log in: password was incorrect')
|
||||
try:
|
||||
i = self.expect('(?i)(?:password)', timeout=login_timeout)
|
||||
if i == 0:
|
||||
self.sendline(password)
|
||||
i = self.expect([self.original_prompt, 'Login incorrect'], timeout=login_timeout)
|
||||
if i:
|
||||
raise pxssh.ExceptionPxssh('could not log in: password was incorrect')
|
||||
except TIMEOUT:
|
||||
if not password:
|
||||
# No password promt before TIMEOUT & no password provided
|
||||
# so assume everything is okay
|
||||
pass
|
||||
else:
|
||||
raise pxssh.ExceptionPxssh('could not log in: did not see a password prompt')
|
||||
|
||||
if not self.sync_original_prompt(sync_multiplier):
|
||||
self.close()
|
||||
@@ -117,6 +147,7 @@ class SshConnection(object):
|
||||
|
||||
default_password_prompt = '[sudo] password'
|
||||
max_cancel_attempts = 5
|
||||
default_timeout=10
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@@ -128,9 +159,12 @@ class SshConnection(object):
|
||||
password=None,
|
||||
keyfile=None,
|
||||
port=None,
|
||||
timeout=10,
|
||||
timeout=None,
|
||||
telnet=False,
|
||||
password_prompt=None,
|
||||
original_prompt=None,
|
||||
platform=None,
|
||||
sudo_cmd="sudo -- sh -c '{}'"
|
||||
):
|
||||
self.host = host
|
||||
self.username = username
|
||||
@@ -139,39 +173,62 @@ class SshConnection(object):
|
||||
self.port = port
|
||||
self.lock = threading.Lock()
|
||||
self.password_prompt = password_prompt if password_prompt is not None else self.default_password_prompt
|
||||
self.sudo_cmd = sudo_cmd
|
||||
logger.debug('Logging in {}@{}'.format(username, host))
|
||||
self.conn = ssh_get_shell(host, username, password, self.keyfile, port, timeout, telnet)
|
||||
timeout = timeout if timeout is not None else self.default_timeout
|
||||
self.conn = ssh_get_shell(host, username, password, self.keyfile, port, timeout, False, None)
|
||||
|
||||
def push(self, source, dest, timeout=30):
|
||||
dest = '{}@{}:{}'.format(self.username, self.host, dest)
|
||||
dest = '"{}"@"{}":"{}"'.format(escape_double_quotes(self.username),
|
||||
escape_spaces(escape_double_quotes(self.host)),
|
||||
escape_spaces(escape_double_quotes(dest)))
|
||||
return self._scp(source, dest, timeout)
|
||||
|
||||
def pull(self, source, dest, timeout=30):
|
||||
source = '{}@{}:{}'.format(self.username, self.host, source)
|
||||
source = '"{}"@"{}":"{}"'.format(escape_double_quotes(self.username),
|
||||
escape_spaces(escape_double_quotes(self.host)),
|
||||
escape_spaces(escape_double_quotes(source)))
|
||||
return self._scp(source, dest, timeout)
|
||||
|
||||
def execute(self, command, timeout=None, check_exit_code=True, as_root=False, strip_colors=True):
|
||||
with self.lock:
|
||||
output = self._execute_and_wait_for_prompt(command, timeout, as_root, strip_colors)
|
||||
if check_exit_code:
|
||||
exit_code_text = self._execute_and_wait_for_prompt('echo $?', strip_colors=strip_colors, log=False)
|
||||
try:
|
||||
exit_code = int(exit_code_text.split()[0])
|
||||
if exit_code:
|
||||
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'
|
||||
raise TargetError(message.format(exit_code, command, output))
|
||||
except (ValueError, IndexError):
|
||||
logger.warning('Could not get exit code for "{}",\ngot: "{}"'.format(command, exit_code_text))
|
||||
return output
|
||||
def execute(self, command, timeout=None, check_exit_code=True,
|
||||
as_root=False, strip_colors=True): #pylint: disable=unused-argument
|
||||
if command == '':
|
||||
# Empty command is valid but the __devlib_ec stuff below will
|
||||
# produce a syntax error with bash. Treat as a special case.
|
||||
return ''
|
||||
try:
|
||||
with self.lock:
|
||||
_command = '({}); __devlib_ec=$?; echo; echo $__devlib_ec'.format(command)
|
||||
raw_output = self._execute_and_wait_for_prompt(
|
||||
_command, timeout, as_root, strip_colors)
|
||||
output, exit_code_text, _ = raw_output.rsplit('\r\n', 2)
|
||||
if check_exit_code:
|
||||
try:
|
||||
exit_code = int(exit_code_text)
|
||||
if exit_code:
|
||||
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'
|
||||
raise TargetError(message.format(exit_code, command, output))
|
||||
except (ValueError, IndexError):
|
||||
logger.warning(
|
||||
'Could not get exit code for "{}",\ngot: "{}"'\
|
||||
.format(command, exit_code_text))
|
||||
return output
|
||||
except EOF:
|
||||
raise TargetError('Connection lost.')
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
|
||||
port_string = '-p {}'.format(self.port) if self.port else ''
|
||||
keyfile_string = '-i {}'.format(self.keyfile) if self.keyfile else ''
|
||||
command = '{} {} {} {}@{} {}'.format(ssh, keyfile_string, port_string, self.username, self.host, command)
|
||||
logger.debug(command)
|
||||
if self.password:
|
||||
command = _give_password(self.password, command)
|
||||
return subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True)
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
try:
|
||||
port_string = '-p {}'.format(self.port) if self.port else ''
|
||||
keyfile_string = '-i {}'.format(self.keyfile) if self.keyfile else ''
|
||||
if as_root:
|
||||
command = self.sudo_cmd.format(command)
|
||||
command = '{} {} {} {}@{} {}'.format(ssh, keyfile_string, port_string, self.username, self.host, command)
|
||||
logger.debug(command)
|
||||
if self.password:
|
||||
command = _give_password(self.password, command)
|
||||
return subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True)
|
||||
except EOF:
|
||||
raise TargetError('Connection lost.')
|
||||
|
||||
def close(self):
|
||||
logger.debug('Logging out {}@{}'.format(self.username, self.host))
|
||||
@@ -180,7 +237,7 @@ class SshConnection(object):
|
||||
def cancel_running_command(self):
|
||||
# simulate impatiently hitting ^C until command prompt appears
|
||||
logger.debug('Sending ^C')
|
||||
for _ in xrange(self.max_cancel_attempts):
|
||||
for _ in range(self.max_cancel_attempts):
|
||||
self.conn.sendline(chr(3))
|
||||
if self.conn.prompt(0.1):
|
||||
return True
|
||||
@@ -188,8 +245,11 @@ class SshConnection(object):
|
||||
|
||||
def _execute_and_wait_for_prompt(self, command, timeout=None, as_root=False, strip_colors=True, log=True):
|
||||
self.conn.prompt(0.1) # clear an existing prompt if there is one.
|
||||
if self.username == 'root':
|
||||
# As we're already root, there is no need to use sudo.
|
||||
as_root = False
|
||||
if as_root:
|
||||
command = "sudo -- sh -c '{}'".format(escape_single_quotes(command))
|
||||
command = self.sudo_cmd.format(escape_single_quotes(command))
|
||||
if log:
|
||||
logger.debug(command)
|
||||
self.conn.sendline(command)
|
||||
@@ -204,7 +264,10 @@ class SshConnection(object):
|
||||
timed_out = self._wait_for_prompt(timeout)
|
||||
# the regex removes line breaks potential introduced when writing
|
||||
# command to shell.
|
||||
output = process_backspaces(self.conn.before)
|
||||
if sys.version_info[0] == 3:
|
||||
output = process_backspaces(self.conn.before.decode(sys.stdout.encoding, 'replace'))
|
||||
else:
|
||||
output = process_backspaces(self.conn.before)
|
||||
output = re.sub(r'\r([^\n])', r'\1', output)
|
||||
if '\r\n' in output: # strip the echoed command
|
||||
output = output.split('\r\n', 1)[1]
|
||||
@@ -231,18 +294,610 @@ class SshConnection(object):
|
||||
port_string = '-P {}'.format(self.port) if (self.port and self.port != 22) else ''
|
||||
keyfile_string = '-i {}'.format(self.keyfile) if self.keyfile else ''
|
||||
command = '{} -r {} {} {} {}'.format(scp, keyfile_string, port_string, source, dest)
|
||||
pass_string = ''
|
||||
command_redacted = command
|
||||
logger.debug(command)
|
||||
if self.password:
|
||||
command = _give_password(self.password, command)
|
||||
command_redacted = command.replace(self.password, '<redacted>')
|
||||
try:
|
||||
check_output(command, timeout=timeout, shell=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise subprocess.CalledProcessError(e.returncode, e.cmd.replace(pass_string, ''), e.output)
|
||||
raise HostError("Failed to copy file with '{}'. Output:\n{}".format(
|
||||
command_redacted, e.output))
|
||||
except TimeoutError as e:
|
||||
raise TimeoutError(e.command.replace(pass_string, ''), e.output)
|
||||
raise TimeoutError(command_redacted, e.output)
|
||||
|
||||
|
||||
class TelnetConnection(SshConnection):
|
||||
|
||||
def __init__(self,
|
||||
host,
|
||||
username,
|
||||
password=None,
|
||||
port=None,
|
||||
timeout=None,
|
||||
password_prompt=None,
|
||||
original_prompt=None,
|
||||
platform=None):
|
||||
self.host = host
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.port = port
|
||||
self.keyfile = None
|
||||
self.lock = threading.Lock()
|
||||
self.password_prompt = password_prompt if password_prompt is not None else self.default_password_prompt
|
||||
logger.debug('Logging in {}@{}'.format(username, host))
|
||||
timeout = timeout if timeout is not None else self.default_timeout
|
||||
self.conn = ssh_get_shell(host, username, password, None, port, timeout, True, original_prompt)
|
||||
|
||||
|
||||
class Gem5Connection(TelnetConnection):
|
||||
|
||||
def __init__(self,
|
||||
platform,
|
||||
host=None,
|
||||
username=None,
|
||||
password=None,
|
||||
port=None,
|
||||
timeout=None,
|
||||
password_prompt=None,
|
||||
original_prompt=None,
|
||||
strip_echoed_commands=False,
|
||||
):
|
||||
if host is not None:
|
||||
host_system = socket.gethostname()
|
||||
if host_system != host:
|
||||
raise TargetError("Gem5Connection can only connect to gem5 "
|
||||
"simulations on your current host, which "
|
||||
"differs from the one given {}!"
|
||||
.format(host_system, host))
|
||||
if username is not None and username != 'root':
|
||||
raise ValueError('User should be root in gem5!')
|
||||
if password is not None and password != '':
|
||||
raise ValueError('No password needed in gem5!')
|
||||
self.username = 'root'
|
||||
self.is_rooted = True
|
||||
self.password = None
|
||||
self.port = None
|
||||
# Flag to indicate whether commands are echoed by the simulated system
|
||||
self.strip_echoed_commands = strip_echoed_commands
|
||||
# Long timeouts to account for gem5 being slow
|
||||
# Can be overriden if the given timeout is longer
|
||||
self.default_timeout = 3600
|
||||
if timeout is not None:
|
||||
if timeout > self.default_timeout:
|
||||
logger.info('Overwriting the default timeout of gem5 ({})'
|
||||
' to {}'.format(self.default_timeout, timeout))
|
||||
self.default_timeout = timeout
|
||||
else:
|
||||
logger.info('Ignoring the given timeout --> gem5 needs longer timeouts')
|
||||
self.ready_timeout = self.default_timeout * 3
|
||||
# Counterpart in gem5_interact_dir
|
||||
self.gem5_input_dir = '/mnt/host/'
|
||||
# Location of m5 binary in the gem5 simulated system
|
||||
self.m5_path = None
|
||||
# Actual telnet connection to gem5 simulation
|
||||
self.conn = None
|
||||
# Flag to indicate the gem5 device is ready to interact with the
|
||||
# outer world
|
||||
self.ready = False
|
||||
# Lock file to prevent multiple connections to same gem5 simulation
|
||||
# (gem5 does not allow this)
|
||||
self.lock_directory = '/tmp/'
|
||||
self.lock_file_name = None # Will be set once connected to gem5
|
||||
|
||||
# These parameters will be set by either the method to connect to the
|
||||
# gem5 platform or directly to the gem5 simulation
|
||||
# Intermediate directory to push things to gem5 using VirtIO
|
||||
self.gem5_interact_dir = None
|
||||
# Directory to store output from gem5 on the host
|
||||
self.gem5_out_dir = None
|
||||
# Actual gem5 simulation
|
||||
self.gem5simulation = None
|
||||
|
||||
# Connect to gem5
|
||||
if platform:
|
||||
self._connect_gem5_platform(platform)
|
||||
|
||||
# Wait for boot
|
||||
self._wait_for_boot()
|
||||
|
||||
# Mount the virtIO to transfer files in/out gem5 system
|
||||
self._mount_virtio()
|
||||
|
||||
def set_hostinteractdir(self, indir):
|
||||
logger.info('Setting hostinteractdir from {} to {}'
|
||||
.format(self.gem5_input_dir, indir))
|
||||
self.gem5_input_dir = indir
|
||||
|
||||
def push(self, source, dest, timeout=None):
|
||||
"""
|
||||
Push a file to the gem5 device using VirtIO
|
||||
|
||||
The file to push to the device is copied to the temporary directory on
|
||||
the host, before being copied within the simulation to the destination.
|
||||
Checks, in the form of 'ls' with error code checking, are performed to
|
||||
ensure that the file is copied to the destination.
|
||||
"""
|
||||
# First check if the connection is set up to interact with gem5
|
||||
self._check_ready()
|
||||
|
||||
filename = os.path.basename(source)
|
||||
logger.debug("Pushing {} to device.".format(source))
|
||||
logger.debug("gem5interactdir: {}".format(self.gem5_interact_dir))
|
||||
logger.debug("dest: {}".format(dest))
|
||||
logger.debug("filename: {}".format(filename))
|
||||
|
||||
# We need to copy the file to copy to the temporary directory
|
||||
self._move_to_temp_dir(source)
|
||||
|
||||
# Dest in gem5 world is a file rather than directory
|
||||
if os.path.basename(dest) != filename:
|
||||
dest = os.path.join(dest, filename)
|
||||
# Back to the gem5 world
|
||||
self._gem5_shell("ls -al {}{}".format(self.gem5_input_dir, filename))
|
||||
self._gem5_shell("cat '{}''{}' > '{}'".format(self.gem5_input_dir,
|
||||
filename,
|
||||
dest))
|
||||
self._gem5_shell("sync")
|
||||
self._gem5_shell("ls -al {}".format(dest))
|
||||
self._gem5_shell("ls -al {}".format(self.gem5_input_dir))
|
||||
logger.debug("Push complete.")
|
||||
|
||||
def pull(self, source, dest, timeout=0): #pylint: disable=unused-argument
|
||||
"""
|
||||
Pull a file from the gem5 device using m5 writefile
|
||||
|
||||
The file is copied to the local directory within the guest as the m5
|
||||
writefile command assumes that the file is local. The file is then
|
||||
written out to the host system using writefile, prior to being moved to
|
||||
the destination on the host.
|
||||
"""
|
||||
# First check if the connection is set up to interact with gem5
|
||||
self._check_ready()
|
||||
|
||||
result = self._gem5_shell("ls {}".format(source))
|
||||
files = strip_bash_colors(result).split()
|
||||
|
||||
for filename in files:
|
||||
dest_file = os.path.basename(filename)
|
||||
logger.debug("pull_file {} {}".format(filename, dest_file))
|
||||
# writefile needs the file to be copied to be in the current
|
||||
# working directory so if needed, copy to the working directory
|
||||
# We don't check the exit code here because it is non-zero if the
|
||||
# source and destination are the same. The ls below will cause an
|
||||
# error if the file was not where we expected it to be.
|
||||
if os.path.isabs(source):
|
||||
if os.path.dirname(source) != self.execute('pwd',
|
||||
check_exit_code=False):
|
||||
self._gem5_shell("cat '{}' > '{}'".format(filename,
|
||||
dest_file))
|
||||
self._gem5_shell("sync")
|
||||
self._gem5_shell("ls -la {}".format(dest_file))
|
||||
logger.debug('Finished the copy in the simulator')
|
||||
self._gem5_util("writefile {}".format(dest_file))
|
||||
|
||||
if 'cpu' not in filename:
|
||||
while not os.path.exists(os.path.join(self.gem5_out_dir,
|
||||
dest_file)):
|
||||
time.sleep(1)
|
||||
|
||||
# Perform the local move
|
||||
if os.path.exists(os.path.join(dest, dest_file)):
|
||||
logger.warning(
|
||||
'Destination file {} already exists!'\
|
||||
.format(dest_file))
|
||||
else:
|
||||
shutil.move(os.path.join(self.gem5_out_dir, dest_file), dest)
|
||||
logger.debug("Pull complete.")
|
||||
|
||||
def execute(self, command, timeout=1000, check_exit_code=True,
|
||||
as_root=False, strip_colors=True):
|
||||
"""
|
||||
Execute a command on the gem5 platform
|
||||
"""
|
||||
# First check if the connection is set up to interact with gem5
|
||||
self._check_ready()
|
||||
|
||||
output = self._gem5_shell(command,
|
||||
check_exit_code=check_exit_code,
|
||||
as_root=as_root)
|
||||
if strip_colors:
|
||||
output = strip_bash_colors(output)
|
||||
return output
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, as_root=False):
|
||||
# First check if the connection is set up to interact with gem5
|
||||
self._check_ready()
|
||||
|
||||
# Create the logfile for stderr/stdout redirection
|
||||
command_name = command.split(' ')[0].split('/')[-1]
|
||||
redirection_file = 'BACKGROUND_{}.log'.format(command_name)
|
||||
trial = 0
|
||||
while os.path.isfile(redirection_file):
|
||||
# Log file already exists so add to name
|
||||
redirection_file = 'BACKGROUND_{}{}.log'.format(command_name, trial)
|
||||
trial += 1
|
||||
|
||||
# Create the command to pass on to gem5 shell
|
||||
complete_command = '{} >> {} 2>&1 &'.format(command, redirection_file)
|
||||
output = self._gem5_shell(complete_command, as_root=as_root)
|
||||
output = strip_bash_colors(output)
|
||||
gem5_logger.info('STDERR/STDOUT of background command will be '
|
||||
'redirected to {}. Use target.pull() to '
|
||||
'get this file'.format(redirection_file))
|
||||
return output
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
Close and disconnect from the gem5 simulation. Additionally, we remove
|
||||
the temporary directory used to pass files into the simulation.
|
||||
"""
|
||||
gem5_logger.info("Gracefully terminating the gem5 simulation.")
|
||||
try:
|
||||
# Unmount the virtio device BEFORE we kill the
|
||||
# simulation. This is done to simplify checkpointing at
|
||||
# the end of a simulation!
|
||||
self._unmount_virtio()
|
||||
self._gem5_util("exit")
|
||||
self.gem5simulation.wait()
|
||||
except EOF:
|
||||
pass
|
||||
gem5_logger.info("Removing the temporary directory")
|
||||
try:
|
||||
shutil.rmtree(self.gem5_interact_dir)
|
||||
except OSError:
|
||||
gem5_logger.warn("Failed to remove the temporary directory!")
|
||||
|
||||
# Delete the lock file
|
||||
os.remove(self.lock_file_name)
|
||||
|
||||
# Functions only to be called by the Gem5 connection itself
|
||||
def _connect_gem5_platform(self, platform):
|
||||
port = platform.gem5_port
|
||||
gem5_simulation = platform.gem5
|
||||
gem5_interact_dir = platform.gem5_interact_dir
|
||||
gem5_out_dir = platform.gem5_out_dir
|
||||
|
||||
self.connect_gem5(port, gem5_simulation, gem5_interact_dir, gem5_out_dir)
|
||||
|
||||
# Handle the EOF exception raised by pexpect
|
||||
def _gem5_EOF_handler(self, gem5_simulation, gem5_out_dir, err):
|
||||
# If we have reached the "EOF", it typically means
|
||||
# that gem5 crashed and closed the connection. Let's
|
||||
# check and actually tell the user what happened here,
|
||||
# rather than spewing out pexpect errors.
|
||||
if gem5_simulation.poll():
|
||||
message = "The gem5 process has crashed with error code {}!\n\tPlease see {} for details."
|
||||
raise TargetError(message.format(gem5_simulation.poll(), gem5_out_dir))
|
||||
else:
|
||||
# Let's re-throw the exception in this case.
|
||||
raise err
|
||||
|
||||
# This function connects to the gem5 simulation
|
||||
def connect_gem5(self, port, gem5_simulation, gem5_interact_dir,
|
||||
gem5_out_dir):
|
||||
"""
|
||||
Connect to the telnet port of the gem5 simulation.
|
||||
|
||||
We connect, and wait for the prompt to be found. We do not use a timeout
|
||||
for this, and wait for the prompt in a while loop as the gem5 simulation
|
||||
can take many hours to reach a prompt when booting the system. We also
|
||||
inject some newlines periodically to try and force gem5 to show a
|
||||
prompt. Once the prompt has been found, we replace it with a unique
|
||||
prompt to ensure that we are able to match it properly. We also disable
|
||||
the echo as this simplifies parsing the output when executing commands
|
||||
on the device.
|
||||
"""
|
||||
host = socket.gethostname()
|
||||
gem5_logger.info("Connecting to the gem5 simulation on port {}".format(port))
|
||||
|
||||
# Check if there is no on-going connection yet
|
||||
lock_file_name = '{}{}_{}.LOCK'.format(self.lock_directory, host, port)
|
||||
if os.path.isfile(lock_file_name):
|
||||
# There is already a connection to this gem5 simulation
|
||||
raise TargetError('There is already a connection to the gem5 '
|
||||
'simulation using port {} on {}!'
|
||||
.format(port, host))
|
||||
|
||||
# Connect to the gem5 telnet port. Use a short timeout here.
|
||||
attempts = 0
|
||||
while attempts < 10:
|
||||
attempts += 1
|
||||
try:
|
||||
self.conn = TelnetPxssh(original_prompt=None)
|
||||
self.conn.login(host, self.username, port=port,
|
||||
login_timeout=10, auto_prompt_reset=False)
|
||||
break
|
||||
except pxssh.ExceptionPxssh:
|
||||
pass
|
||||
except EOF as err:
|
||||
self._gem5_EOF_handler(gem5_simulation, gem5_out_dir, err)
|
||||
else:
|
||||
gem5_simulation.kill()
|
||||
raise TargetError("Failed to connect to the gem5 telnet session.")
|
||||
|
||||
gem5_logger.info("Connected! Waiting for prompt...")
|
||||
|
||||
# Create the lock file
|
||||
self.lock_file_name = lock_file_name
|
||||
open(self.lock_file_name, 'w').close() # Similar to touch
|
||||
gem5_logger.info("Created lock file {} to prevent reconnecting to "
|
||||
"same simulation".format(self.lock_file_name))
|
||||
|
||||
# We need to find the prompt. It might be different if we are resuming
|
||||
# from a checkpoint. Therefore, we test multiple options here.
|
||||
prompt_found = False
|
||||
while not prompt_found:
|
||||
try:
|
||||
self._login_to_device()
|
||||
except TIMEOUT:
|
||||
pass
|
||||
except EOF as err:
|
||||
self._gem5_EOF_handler(gem5_simulation, gem5_out_dir, err)
|
||||
|
||||
try:
|
||||
# Try and force a prompt to be shown
|
||||
self.conn.send('\n')
|
||||
self.conn.expect([r'# ', r'\$ ', self.conn.UNIQUE_PROMPT, r'\[PEXPECT\][\\\$\#]+ '], timeout=60)
|
||||
prompt_found = True
|
||||
except TIMEOUT:
|
||||
pass
|
||||
except EOF as err:
|
||||
self._gem5_EOF_handler(gem5_simulation, gem5_out_dir, err)
|
||||
|
||||
gem5_logger.info("Successfully logged in")
|
||||
gem5_logger.info("Setting unique prompt...")
|
||||
|
||||
self.conn.set_unique_prompt()
|
||||
self.conn.prompt()
|
||||
gem5_logger.info("Prompt found and replaced with a unique string")
|
||||
|
||||
# We check that the prompt is what we think it should be. If not, we
|
||||
# need to update the regex we use to match.
|
||||
self._find_prompt()
|
||||
|
||||
self.conn.setecho(False)
|
||||
self._sync_gem5_shell()
|
||||
|
||||
# Fully connected to gem5 simulation
|
||||
self.gem5_interact_dir = gem5_interact_dir
|
||||
self.gem5_out_dir = gem5_out_dir
|
||||
self.gem5simulation = gem5_simulation
|
||||
|
||||
# Ready for interaction now
|
||||
self.ready = True
|
||||
|
||||
def _login_to_device(self):
|
||||
"""
|
||||
Login to device, will be overwritten if there is an actual login
|
||||
"""
|
||||
pass
|
||||
|
||||
def _find_prompt(self):
|
||||
prompt = r'\[PEXPECT\][\\\$\#]+ '
|
||||
synced = False
|
||||
while not synced:
|
||||
self.conn.send('\n')
|
||||
i = self.conn.expect([prompt, self.conn.UNIQUE_PROMPT, r'[\$\#] '], timeout=self.default_timeout)
|
||||
if i == 0:
|
||||
synced = True
|
||||
elif i == 1:
|
||||
prompt = self.conn.UNIQUE_PROMPT
|
||||
synced = True
|
||||
else:
|
||||
prompt = re.sub(r'\$', r'\\\$', self.conn.before.strip() + self.conn.after.strip())
|
||||
prompt = re.sub(r'\#', r'\\\#', prompt)
|
||||
prompt = re.sub(r'\[', r'\[', prompt)
|
||||
prompt = re.sub(r'\]', r'\]', prompt)
|
||||
|
||||
self.conn.PROMPT = prompt
|
||||
|
||||
def _sync_gem5_shell(self):
|
||||
"""
|
||||
Synchronise with the gem5 shell.
|
||||
|
||||
Write some unique text to the gem5 device to allow us to synchronise
|
||||
with the shell output. We actually get two prompts so we need to match
|
||||
both of these.
|
||||
"""
|
||||
gem5_logger.debug("Sending Sync")
|
||||
self.conn.send("echo \*\*sync\*\*\n")
|
||||
self.conn.expect(r"\*\*sync\*\*", timeout=self.default_timeout)
|
||||
self.conn.expect([self.conn.UNIQUE_PROMPT, self.conn.PROMPT], timeout=self.default_timeout)
|
||||
self.conn.expect([self.conn.UNIQUE_PROMPT, self.conn.PROMPT], timeout=self.default_timeout)
|
||||
|
||||
def _gem5_util(self, command):
|
||||
""" Execute a gem5 utility command using the m5 binary on the device """
|
||||
if self.m5_path is None:
|
||||
raise TargetError('Path to m5 binary on simulated system is not set!')
|
||||
self._gem5_shell('{} {}'.format(self.m5_path, command))
|
||||
|
||||
def _gem5_shell(self, command, as_root=False, timeout=None, check_exit_code=True, sync=True): # pylint: disable=R0912
|
||||
"""
|
||||
Execute a command in the gem5 shell
|
||||
|
||||
This wraps the telnet connection to gem5 and processes the raw output.
|
||||
|
||||
This method waits for the shell to return, and then will try and
|
||||
separate the output from the command from the command itself. If this
|
||||
fails, warn, but continue with the potentially wrong output.
|
||||
|
||||
The exit code is also checked by default, and non-zero exit codes will
|
||||
raise a TargetError.
|
||||
"""
|
||||
if sync:
|
||||
self._sync_gem5_shell()
|
||||
|
||||
gem5_logger.debug("gem5_shell command: {}".format(command))
|
||||
|
||||
if as_root:
|
||||
command = 'echo "{}" | su'.format(escape_double_quotes(command))
|
||||
|
||||
# Send the actual command
|
||||
self.conn.send("{}\n".format(command))
|
||||
|
||||
# Wait for the response. We just sit here and wait for the prompt to
|
||||
# appear, as gem5 might take a long time to provide the output. This
|
||||
# avoids timeout issues.
|
||||
command_index = -1
|
||||
while command_index == -1:
|
||||
if self.conn.prompt():
|
||||
output = re.sub(r' \r([^\n])', r'\1', self.conn.before)
|
||||
output = re.sub(r'[\b]', r'', output)
|
||||
# Deal with line wrapping
|
||||
output = re.sub(r'[\r].+?<', r'', output)
|
||||
command_index = output.find(command)
|
||||
|
||||
# If we have -1, then we cannot match the command, but the
|
||||
# prompt has returned. Hence, we have a bit of an issue. We
|
||||
# warn, and return the whole output.
|
||||
if command_index == -1:
|
||||
gem5_logger.warn("gem5_shell: Unable to match command in "
|
||||
"command output. Expect parsing errors!")
|
||||
command_index = 0
|
||||
|
||||
output = output[command_index + len(command):].strip()
|
||||
|
||||
# If the gem5 system echoes the executed command, we need to remove that too!
|
||||
if self.strip_echoed_commands:
|
||||
command_index = output.find(command)
|
||||
if command_index != -1:
|
||||
output = output[command_index + len(command):].strip()
|
||||
|
||||
gem5_logger.debug("gem5_shell output: {}".format(output))
|
||||
|
||||
# We get a second prompt. Hence, we need to eat one to make sure that we
|
||||
# stay in sync. If we do not do this, we risk getting out of sync for
|
||||
# slower simulations.
|
||||
self.conn.expect([self.conn.UNIQUE_PROMPT, self.conn.PROMPT], timeout=self.default_timeout)
|
||||
|
||||
if check_exit_code:
|
||||
exit_code_text = self._gem5_shell('echo $?', as_root=as_root,
|
||||
timeout=timeout, check_exit_code=False,
|
||||
sync=False)
|
||||
try:
|
||||
exit_code = int(exit_code_text.split()[0])
|
||||
if exit_code:
|
||||
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'
|
||||
raise TargetError(message.format(exit_code, command, output))
|
||||
except (ValueError, IndexError):
|
||||
gem5_logger.warning('Could not get exit code for "{}",\ngot: "{}"'.format(command, exit_code_text))
|
||||
|
||||
return output
|
||||
|
||||
def _mount_virtio(self):
|
||||
"""
|
||||
Mount the VirtIO device in the simulated system.
|
||||
"""
|
||||
gem5_logger.info("Mounting VirtIO device in simulated system")
|
||||
|
||||
self._gem5_shell('mkdir -p {}'.format(self.gem5_input_dir), as_root=True)
|
||||
mount_command = "mount -t 9p -o trans=virtio,version=9p2000.L,aname={} gem5 {}".format(self.gem5_interact_dir, self.gem5_input_dir)
|
||||
self._gem5_shell(mount_command, as_root=True)
|
||||
|
||||
def _unmount_virtio(self):
|
||||
"""
|
||||
Unmount the VirtIO device in the simulated system.
|
||||
"""
|
||||
gem5_logger.info("Unmounting VirtIO device in simulated system")
|
||||
|
||||
unmount_command = "umount {}".format(self.gem5_input_dir)
|
||||
self._gem5_shell(unmount_command, as_root=True)
|
||||
|
||||
def take_checkpoint(self):
|
||||
"""
|
||||
Take a checkpoint of the simulated system.
|
||||
|
||||
In order to take a checkpoint we first unmount the virtio
|
||||
device, take then checkpoint, and then remount the device to
|
||||
allow us to continue the current run. This needs to be done to
|
||||
ensure that future gem5 simulations are able to utilise the
|
||||
virtio device (i.e., we need to drop the current state
|
||||
information that the device has).
|
||||
"""
|
||||
self._unmount_virtio()
|
||||
self._gem5_util("checkpoint")
|
||||
self._mount_virtio()
|
||||
|
||||
def _move_to_temp_dir(self, source):
|
||||
"""
|
||||
Move a file to the temporary directory on the host for copying to the
|
||||
gem5 device
|
||||
"""
|
||||
command = "cp {} {}".format(source, self.gem5_interact_dir)
|
||||
gem5_logger.debug("Local copy command: {}".format(command))
|
||||
subprocess.call(command.split())
|
||||
subprocess.call("sync".split())
|
||||
|
||||
def _check_ready(self):
|
||||
"""
|
||||
Check if the gem5 platform is ready
|
||||
"""
|
||||
if not self.ready:
|
||||
raise TargetError('Gem5 is not ready to interact yet')
|
||||
|
||||
def _wait_for_boot(self):
|
||||
pass
|
||||
|
||||
def _probe_file(self, filepath):
|
||||
"""
|
||||
Internal method to check if the target has a certain file
|
||||
"""
|
||||
command = 'if [ -e \'{}\' ]; then echo 1; else echo 0; fi'
|
||||
output = self.execute(command.format(filepath), as_root=self.is_rooted)
|
||||
return boolean(output.strip())
|
||||
|
||||
|
||||
class LinuxGem5Connection(Gem5Connection):
|
||||
|
||||
def _login_to_device(self):
|
||||
gem5_logger.info("Trying to log in to gem5 device")
|
||||
login_prompt = ['login:', 'AEL login:', 'username:', 'aarch64-gem5 login:']
|
||||
login_password_prompt = ['password:']
|
||||
# Wait for the login prompt
|
||||
prompt = login_prompt + [self.conn.UNIQUE_PROMPT]
|
||||
i = self.conn.expect(prompt, timeout=10)
|
||||
# Check if we are already at a prompt, or if we need to log in.
|
||||
if i < len(prompt) - 1:
|
||||
self.conn.sendline("{}".format(self.username))
|
||||
password_prompt = login_password_prompt + [r'# ', self.conn.UNIQUE_PROMPT]
|
||||
j = self.conn.expect(password_prompt, timeout=self.default_timeout)
|
||||
if j < len(password_prompt) - 2:
|
||||
self.conn.sendline("{}".format(self.password))
|
||||
self.conn.expect([r'# ', self.conn.UNIQUE_PROMPT], timeout=self.default_timeout)
|
||||
|
||||
|
||||
|
||||
class AndroidGem5Connection(Gem5Connection):
|
||||
|
||||
def _wait_for_boot(self):
|
||||
"""
|
||||
Wait for the system to boot
|
||||
|
||||
We monitor the sys.boot_completed and service.bootanim.exit system
|
||||
properties to determine when the system has finished booting. In the
|
||||
event that we cannot coerce the result of service.bootanim.exit to an
|
||||
integer, we assume that the boot animation was disabled and do not wait
|
||||
for it to finish.
|
||||
|
||||
"""
|
||||
gem5_logger.info("Waiting for Android to boot...")
|
||||
while True:
|
||||
booted = False
|
||||
anim_finished = True # Assume boot animation was disabled on except
|
||||
try:
|
||||
booted = (int('0' + self._gem5_shell('getprop sys.boot_completed', check_exit_code=False).strip()) == 1)
|
||||
anim_finished = (int(self._gem5_shell('getprop service.bootanim.exit', check_exit_code=False).strip()) == 1)
|
||||
except ValueError:
|
||||
pass
|
||||
if booted and anim_finished:
|
||||
break
|
||||
time.sleep(60)
|
||||
|
||||
gem5_logger.info("Android booted")
|
||||
|
||||
def _give_password(password, command):
|
||||
if not sshpass:
|
||||
raise HostError('Must have sshpass installed on the host in order to use password-based auth.')
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -26,6 +26,11 @@ is not the best language to use for configuration.
|
||||
|
||||
"""
|
||||
import math
|
||||
import re
|
||||
import sys
|
||||
from functools import total_ordering
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.utils.misc import isiterable, to_identifier, ranges_to_list, list_to_mask
|
||||
|
||||
@@ -68,6 +73,15 @@ def numeric(value):
|
||||
"""
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
|
||||
if isinstance(value, basestring):
|
||||
value = value.strip()
|
||||
if value.endswith('%'):
|
||||
try:
|
||||
return float(value.rstrip('%')) / 100
|
||||
except ValueError:
|
||||
raise ValueError('Not numeric: {}'.format(value))
|
||||
|
||||
try:
|
||||
fvalue = float(value)
|
||||
except ValueError:
|
||||
@@ -79,6 +93,7 @@ def numeric(value):
|
||||
return fvalue
|
||||
|
||||
|
||||
@total_ordering
|
||||
class caseless_string(str):
|
||||
"""
|
||||
Just like built-in Python string except case-insensitive on comparisons. However, the
|
||||
@@ -92,12 +107,17 @@ class caseless_string(str):
|
||||
return self.lower() == other
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __cmp__(self, other):
|
||||
if isinstance(basestring, other):
|
||||
if isinstance(other, basestring):
|
||||
other = other.lower()
|
||||
return cmp(self.lower(), other)
|
||||
return self.lower() != other
|
||||
|
||||
def __lt__(self, other):
|
||||
if isinstance(other, basestring):
|
||||
other = other.lower()
|
||||
return self.lower() < other
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.lower())
|
||||
|
||||
def format(self, *args, **kwargs):
|
||||
return caseless_string(super(caseless_string, self).format(*args, **kwargs))
|
||||
@@ -111,3 +131,40 @@ def bitmask(value):
|
||||
if not isinstance(value, int):
|
||||
raise ValueError(value)
|
||||
return value
|
||||
|
||||
|
||||
regex_type = type(re.compile(''))
|
||||
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
def regex(value):
|
||||
if isinstance(value, regex_type):
|
||||
if isinstance(value.pattern, str):
|
||||
return value
|
||||
return re.compile(value.pattern.decode(),
|
||||
value.flags | re.UNICODE)
|
||||
else:
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode()
|
||||
return re.compile(value)
|
||||
|
||||
|
||||
def bytes_regex(value):
|
||||
if isinstance(value, regex_type):
|
||||
if isinstance(value.pattern, bytes):
|
||||
return value
|
||||
return re.compile(value.pattern.encode(sys.stdout.encoding),
|
||||
value.flags & ~re.UNICODE)
|
||||
else:
|
||||
if isinstance(value, str):
|
||||
value = value.encode(sys.stdout.encoding)
|
||||
return re.compile(value)
|
||||
else:
|
||||
def regex(value):
|
||||
if isinstance(value, regex_type):
|
||||
return value
|
||||
else:
|
||||
return re.compile(value)
|
||||
|
||||
|
||||
bytes_regex = regex
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -19,6 +19,8 @@ import time
|
||||
import logging
|
||||
from copy import copy
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.utils.serial_port import write_characters, TIMEOUT
|
||||
from devlib.utils.types import boolean
|
||||
|
||||
@@ -193,14 +195,14 @@ class UefiMenu(object):
|
||||
is not in the current menu, ``LookupError`` will be raised."""
|
||||
if not self.prompt:
|
||||
self.read_menu(timeout)
|
||||
return self.options.items()
|
||||
return list(self.options.items())
|
||||
|
||||
def get_option_index(self, text, timeout=default_timeout):
|
||||
"""Returns the menu index of the specified option text (uses regex matching). If the option
|
||||
is not in the current menu, ``LookupError`` will be raised."""
|
||||
if not self.prompt:
|
||||
self.read_menu(timeout)
|
||||
for k, v in self.options.iteritems():
|
||||
for k, v in self.options.items():
|
||||
if re.search(text, v):
|
||||
return k
|
||||
raise LookupError(text)
|
||||
|
30
devlib/utils/version.py
Normal file
30
devlib/utils/version.py
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
def get_commit():
|
||||
p = Popen(['git', 'rev-parse', 'HEAD'], cwd=os.path.dirname(__file__),
|
||||
stdout=PIPE, stderr=PIPE)
|
||||
std, _ = p.communicate()
|
||||
p.wait()
|
||||
if p.returncode:
|
||||
return None
|
||||
if sys.version_info[0] == 3:
|
||||
return std[:8].decode(sys.stdout.encoding, 'replace')
|
||||
else:
|
||||
return std[:8]
|
240
doc/connection.rst
Normal file
240
doc/connection.rst
Normal file
@@ -0,0 +1,240 @@
|
||||
Connection
|
||||
==========
|
||||
|
||||
A :class:`Connection` abstracts an actual physical connection to a device. The
|
||||
first connection is created when :func:`Target.connect` method is called. If a
|
||||
:class:`Target` is used in a multi-threaded environment, it will maintain a
|
||||
connection for each thread in which it is invoked. This allows the same target
|
||||
object to be used in parallel in multiple threads.
|
||||
|
||||
:class:`Connection`\ s will be automatically created and managed by
|
||||
:class:`Target`\ s, so there is usually no reason to create one manually.
|
||||
Instead, configuration for a :class:`Connection` is passed as
|
||||
`connection_settings` parameter when creating a :class:`Target`. The connection
|
||||
to be used target is also specified on instantiation by `conn_cls` parameter,
|
||||
though all concrete :class:`Target` implementations will set an appropriate
|
||||
default, so there is typically no need to specify this explicitly.
|
||||
|
||||
:class:`Connection` classes are not a part of an inheritance hierarchy, i.e.
|
||||
they do not derive from a common base. Instead, a :class:`Connection` is any
|
||||
class that implements the following methods.
|
||||
|
||||
|
||||
.. method:: push(self, source, dest, timeout=None)
|
||||
|
||||
Transfer a file from the host machine to the connected device.
|
||||
|
||||
:param source: path of to the file on the host
|
||||
:param dest: path of to the file on the connected device.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
|
||||
.. method:: pull(self, source, dest, timeout=None)
|
||||
|
||||
Transfer a file, or files matching a glob pattern, from the connected device
|
||||
to the host machine.
|
||||
|
||||
:param source: path of to the file on the connected device. If ``dest`` is a
|
||||
directory, may be a glob pattern.
|
||||
:param dest: path of to the file on the host
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
|
||||
.. method:: execute(self, command, timeout=None, check_exit_code=False, as_root=False)
|
||||
|
||||
Execute the specified command on the connected device and return its output.
|
||||
|
||||
:param command: The command to be executed.
|
||||
:param timeout: Timeout (in seconds) for the execution of the command. If
|
||||
specified, an exception will be raised if execution does not complete
|
||||
with the specified period.
|
||||
:param check_exit_code: If ``True`` the exit code (on connected device)
|
||||
from execution of the command will be checked, and an exception will be
|
||||
raised if it is not ``0``.
|
||||
:param as_root: The command will be executed as root. This will fail on
|
||||
unrooted connected devices.
|
||||
|
||||
.. method:: background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False)
|
||||
|
||||
Execute the command on the connected device, invoking it via subprocess on the host.
|
||||
This will return :class:`subprocess.Popen` instance for the command.
|
||||
|
||||
:param command: The command to be executed.
|
||||
:param stdout: By default, standard output will be piped from the subprocess;
|
||||
this may be used to redirect it to an alternative file handle.
|
||||
:param stderr: By default, standard error will be piped from the subprocess;
|
||||
this may be used to redirect it to an alternative file handle.
|
||||
:param as_root: The command will be executed as root. This will fail on
|
||||
unrooted connected devices.
|
||||
|
||||
.. note:: This **will block the connection** until the command completes.
|
||||
|
||||
.. note:: The above methods are directly wrapped by :class:`Target` methods,
|
||||
however note that some of the defaults are different.
|
||||
|
||||
.. method:: cancel_running_command(self)
|
||||
|
||||
Cancel a running command (previously started with :func:`background`) and free up the connection.
|
||||
It is valid to call this if the command has already terminated (or if no
|
||||
command was issued), in which case this is a no-op.
|
||||
|
||||
.. method:: close(self)
|
||||
|
||||
Close the connection to the device. The :class:`Connection` object should not
|
||||
be used after this method is called. There is no way to reopen a previously
|
||||
closed connection, a new connection object should be created instead.
|
||||
|
||||
.. note:: There is no :func:`open` method, as the connection is assumed to be
|
||||
opened on instantiation.
|
||||
|
||||
|
||||
.. _connection-types:
|
||||
|
||||
Connection Types
|
||||
----------------
|
||||
|
||||
.. class:: AdbConnection(device=None, timeout=None)
|
||||
|
||||
A connection to an android device via ``adb`` (Android Debug Bridge).
|
||||
``adb`` is part of the Android SDK (though stand-alone versions are also
|
||||
available).
|
||||
|
||||
:param device: The name of the adb device. This is usually a unique hex
|
||||
string for USB-connected devices, or an ip address/port
|
||||
combination. To see connected devices, you can run ``adb
|
||||
devices`` on the host.
|
||||
:param timeout: Connection timeout in seconds. If a connection to the device
|
||||
is not established within this period, :class:`HostError`
|
||||
is raised.
|
||||
|
||||
|
||||
.. class:: SshConnection(host, username, password=None, keyfile=None, port=None,\
|
||||
timeout=None, password_prompt=None)
|
||||
|
||||
A connection to a device on the network over SSH.
|
||||
|
||||
:param host: SSH host to which to connect
|
||||
:param username: username for SSH login
|
||||
:param password: password for the SSH connection
|
||||
|
||||
.. note:: In order to user password-based authentication,
|
||||
``sshpass`` utility must be installed on the
|
||||
system.
|
||||
|
||||
:param keyfile: Path to the SSH private key to be used for the connection.
|
||||
|
||||
.. note:: ``keyfile`` and ``password`` can't be specified
|
||||
at the same time.
|
||||
|
||||
:param port: TCP port on which SSH server is listening on the remote device.
|
||||
Omit to use the default port.
|
||||
:param timeout: Timeout for the connection in seconds. If a connection
|
||||
cannot be established within this time, an error will be
|
||||
raised.
|
||||
:param password_prompt: A string with the password prompt used by
|
||||
``sshpass``. Set this if your version of ``sshpass``
|
||||
uses something other than ``"[sudo] password"``.
|
||||
|
||||
|
||||
.. class:: TelnetConnection(host, username, password=None, port=None,\
|
||||
timeout=None, password_prompt=None,\
|
||||
original_prompt=None)
|
||||
|
||||
A connection to a device on the network over Telenet.
|
||||
|
||||
.. note:: Since Telenet protocol is does not support file transfer, scp is
|
||||
used for that purpose.
|
||||
|
||||
:param host: SSH host to which to connect
|
||||
:param username: username for SSH login
|
||||
:param password: password for the SSH connection
|
||||
|
||||
.. note:: In order to user password-based authentication,
|
||||
``sshpass`` utility must be installed on the
|
||||
system.
|
||||
|
||||
:param port: TCP port on which SSH server is listening on the remote device.
|
||||
Omit to use the default port.
|
||||
:param timeout: Timeout for the connection in seconds. If a connection
|
||||
cannot be established within this time, an error will be
|
||||
raised.
|
||||
:param password_prompt: A string with the password prompt used by
|
||||
``sshpass``. Set this if your version of ``sshpass``
|
||||
uses something other than ``"[sudo] password"``.
|
||||
:param original_prompt: A regex for the shell prompted presented in the Telenet
|
||||
connection (the prompt will be reset to a
|
||||
randomly-generated pattern for the duration of the
|
||||
connection to reduce the possibility of clashes).
|
||||
This parameter is ignored for SSH connections.
|
||||
|
||||
|
||||
.. class:: LocalConnection(keep_password=True, unrooted=False, password=None)
|
||||
|
||||
A connection to the local host allowing it to be treated as a Target.
|
||||
|
||||
|
||||
:param keep_password: If this is ``True`` (the default) user's password will
|
||||
be cached in memory after it is first requested.
|
||||
:param unrooted: If set to ``True``, the platform will be assumed to be
|
||||
unrooted without testing for root. This is useful to avoid
|
||||
blocking on password request in scripts.
|
||||
:param password: Specify password on connection creation rather than
|
||||
prompting for it.
|
||||
|
||||
|
||||
.. class:: Gem5Connection(platform, host=None, username=None, password=None,\
|
||||
timeout=None, password_prompt=None,\
|
||||
original_prompt=None)
|
||||
|
||||
A connection to a gem5 simulation using a local Telnet connection.
|
||||
|
||||
.. note:: Some of the following input parameters are optional and will be ignored during
|
||||
initialisation. They were kept to keep the analogy with a :class:`TelnetConnection`
|
||||
(i.e. ``host``, `username``, ``password``, ``port``,
|
||||
``password_prompt`` and ``original_promp``)
|
||||
|
||||
|
||||
:param host: Host on which the gem5 simulation is running
|
||||
|
||||
.. note:: Even thought the input parameter for the ``host``
|
||||
will be ignored, the gem5 simulation needs to on
|
||||
the same host as the user as the user is
|
||||
currently on, so if the host given as input
|
||||
parameter is not the same as the actual host, a
|
||||
``TargetError`` will be raised to prevent
|
||||
confusion.
|
||||
|
||||
:param username: Username in the simulated system
|
||||
:param password: No password required in gem5 so does not need to be set
|
||||
:param port: Telnet port to connect to gem5. This does not need to be set
|
||||
at initialisation as this will either be determined by the
|
||||
:class:`Gem5SimulationPlatform` or can be set using the
|
||||
:func:`connect_gem5` method
|
||||
:param timeout: Timeout for the connection in seconds. Gem5 has high
|
||||
latencies so unless the timeout given by the user via
|
||||
this input parameter is higher than the default one
|
||||
(3600 seconds), this input parameter will be ignored.
|
||||
:param password_prompt: A string with password prompt
|
||||
:param original_prompt: A regex for the shell prompt
|
||||
|
||||
There are two classes that inherit from :class:`Gem5Connection`:
|
||||
:class:`AndroidGem5Connection` and :class:`LinuxGem5Connection`.
|
||||
They inherit *almost* all methods from the parent class, without altering them.
|
||||
The only methods discussed below are those that will be overwritten by the
|
||||
:class:`LinuxGem5Connection` and :class:`AndroidGem5Connection` respectively.
|
||||
|
||||
.. class:: LinuxGem5Connection
|
||||
|
||||
A connection to a gem5 simulation that emulates a Linux system.
|
||||
|
||||
.. method:: _login_to_device(self)
|
||||
|
||||
Login to the gem5 simulated system.
|
||||
|
||||
.. class:: AndroidGem5Connection
|
||||
|
||||
A connection to a gem5 simulation that emulates an Android system.
|
||||
|
||||
.. method:: _wait_for_boot(self)
|
||||
|
||||
Wait for the gem5 simulated system to have booted and finished the booting animation.
|
221
doc/derived_measurements.rst
Normal file
221
doc/derived_measurements.rst
Normal file
@@ -0,0 +1,221 @@
|
||||
Derived Measurements
|
||||
=====================
|
||||
|
||||
|
||||
The ``DerivedMeasurements`` API provides a consistent way of performing post
|
||||
processing on a provided :class:`MeasurementCsv` file.
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
The following example shows how to use an implementation of a
|
||||
:class:`DerivedMeasurement` to obtain a list of calculated ``DerivedMetric``'s.
|
||||
|
||||
.. code-block:: ipython
|
||||
|
||||
# Import the relevant derived measurement module
|
||||
# in this example the derived energy module is used.
|
||||
In [1]: from devlib import DerivedEnergyMeasurements
|
||||
|
||||
# Obtain a MeasurementCsv file from an instrument or create from
|
||||
# existing .csv file. In this example an existing csv file is used which was
|
||||
# created with a sampling rate of 100Hz
|
||||
In [2]: from devlib import MeasurementsCsv
|
||||
In [3]: measurement_csv = MeasurementsCsv('/example/measurements.csv', sample_rate_hz=100)
|
||||
|
||||
# Process the file and obtain a list of the derived measurements
|
||||
In [4]: derived_measurements = DerivedEnergyMeasurements.process(measurement_csv)
|
||||
|
||||
In [5]: derived_measurements
|
||||
Out[5]: [device_energy: 239.1854075 joules, device_power: 5.5494089227 watts]
|
||||
|
||||
API
|
||||
---
|
||||
|
||||
Derived Measurements
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. class:: DerivedMeasurements
|
||||
|
||||
The ``DerivedMeasurements`` class provides an API for post-processing
|
||||
instrument output offline (i.e. without a connection to the target device) to
|
||||
generate additional metrics.
|
||||
|
||||
.. method:: DerivedMeasurements.process(measurement_csv)
|
||||
|
||||
Process a :class:`MeasurementsCsv`, returning a list of
|
||||
:class:`DerivedMetric` and/or :class:`MeasurementsCsv` objects that have been
|
||||
derived from the input. The exact nature and ordering of the list members
|
||||
is specific to individual 'class'`DerivedMeasurements` implementations.
|
||||
|
||||
.. method:: DerivedMeasurements.process_raw(\*args)
|
||||
|
||||
Process raw output from an instrument, returning a list :class:`DerivedMetric`
|
||||
and/or :class:`MeasurementsCsv` objects that have been derived from the
|
||||
input. The exact nature and ordering of the list members is specific to
|
||||
individual 'class'`DerivedMeasurements` implementations.
|
||||
|
||||
The arguments to this method should be paths to raw output files generated by
|
||||
an instrument. The number and order of expected arguments is specific to
|
||||
particular implementations.
|
||||
|
||||
|
||||
Derived Metric
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. class:: DerivedMetric
|
||||
|
||||
Represents a metric derived from previously collected ``Measurement``s.
|
||||
Unlike, a ``Measurement``, this was not measured directly from the target.
|
||||
|
||||
|
||||
.. attribute:: DerivedMetric.name
|
||||
|
||||
The name of the derived metric. This uniquely defines a metric -- two
|
||||
``DerivedMetric`` objects with the same ``name`` represent to instances of
|
||||
the same metric (e.g. computed from two different inputs).
|
||||
|
||||
.. attribute:: DerivedMetric.value
|
||||
|
||||
The ``numeric`` value of the metric that has been computed for a particular
|
||||
input.
|
||||
|
||||
.. attribute:: DerivedMetric.measurement_type
|
||||
|
||||
The ``MeasurementType`` of the metric. This indicates which conceptual
|
||||
category the metric falls into, its units, and conversions to other
|
||||
measurement types.
|
||||
|
||||
.. attribute:: DerivedMetric.units
|
||||
|
||||
The units in which the metric's value is expressed.
|
||||
|
||||
|
||||
Available Derived Measurements
|
||||
-------------------------------
|
||||
|
||||
.. note:: If a method of the API is not documented for a particular
|
||||
implementation, that means that it s not overridden by that
|
||||
implementation. It is still safe to call it -- an empty list will be
|
||||
returned.
|
||||
|
||||
Energy
|
||||
~~~~~~
|
||||
|
||||
.. class:: DerivedEnergyMeasurements
|
||||
|
||||
The ``DerivedEnergyMeasurements`` class is used to calculate average power and
|
||||
cumulative energy for each site if the required data is present.
|
||||
|
||||
The calculation of cumulative energy can occur in 3 ways. If a
|
||||
``site`` contains ``energy`` results, the first and last measurements are extracted
|
||||
and the delta calculated. If not, a ``timestamp`` channel will be used to calculate
|
||||
the energy from the power channel, failing back to using the sample rate attribute
|
||||
of the :class:`MeasurementCsv` file if timestamps are not available. If neither
|
||||
timestamps or a sample rate are available then an error will be raised.
|
||||
|
||||
|
||||
.. method:: DerivedEnergyMeasurements.process(measurement_csv)
|
||||
|
||||
This will return total cumulative energy for each energy channel, and the
|
||||
average power for each power channel in the input CSV. The output will contain
|
||||
all energy metrics followed by power metrics. The ordering of both will match
|
||||
the ordering of channels in the input. The metrics will by named based on the
|
||||
sites of the corresponding channels according to the following patters:
|
||||
``"<site>_total_energy"`` and ``"<site>_average_power"``.
|
||||
|
||||
|
||||
FPS / Rendering
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. class:: DerivedGfxInfoStats(drop_threshold=5, suffix='-fps', filename=None, outdir=None)
|
||||
|
||||
Produces FPS (frames-per-second) and other derived statistics from
|
||||
:class:`GfxInfoFramesInstrument` output. This takes several optional
|
||||
parameters in creation:
|
||||
|
||||
:param drop_threshold: FPS in an application, such as a game, which this
|
||||
processor is primarily targeted at, cannot reasonably
|
||||
drop to a very low value. This is specified to this
|
||||
threshold. If an FPS for a frame is computed to be
|
||||
lower than this threshold, it will be dropped on the
|
||||
assumption that frame rendering was suspended by the
|
||||
system (e.g. when idling), or there was some sort of
|
||||
error, and therefore this should be used in
|
||||
performance calculations. defaults to ``5``.
|
||||
:param suffix: The name of the generated per-frame FPS csv file will be
|
||||
derived from the input frames csv file by appending this
|
||||
suffix. This cannot be specified at the same time as
|
||||
a ``filename``.
|
||||
:param filename: As an alternative to the suffix, a complete file name for
|
||||
FPS csv can be specified. This cannot be used at the same
|
||||
time as the ``suffix``.
|
||||
:param outdir: By default, the FPS csv file will be placed in the same
|
||||
directory as the input frames csv file. This can be changed
|
||||
by specifying an alternate directory here
|
||||
|
||||
.. warning:: Specifying both ``filename`` and ``oudir`` will mean that exactly
|
||||
the same file will be used for FPS output on each invocation of
|
||||
``process()`` (even for different inputs) resulting in previous
|
||||
results being overwritten.
|
||||
|
||||
.. method:: DerivedGfxInfoStats.process(measurement_csv)
|
||||
|
||||
Process the fames csv generated by :class:`GfxInfoFramesInstrument` and
|
||||
returns a list containing exactly three entries: :class:`DerivedMetric`\ s
|
||||
``fps`` and ``total_frames``, followed by a :class:`MeasurentCsv` containing
|
||||
per-frame FPSs values.
|
||||
|
||||
.. method:: DerivedGfxInfoStats.process_raw(gfxinfo_frame_raw_file)
|
||||
|
||||
As input, this takes a single argument, which should be the path to the raw
|
||||
output file of :class:`GfxInfoFramesInstrument`. The returns stats
|
||||
accumulated by gfxinfo. At the time of writing, the stats (in order) are:
|
||||
``janks``, ``janks_pc`` (percentage of all frames),
|
||||
``render_time_50th_ptile`` (50th percentile, or median, for time to render a
|
||||
frame), ``render_time_90th_ptile``, ``render_time_95th_ptile``,
|
||||
``render_time_99th_ptile``, ``missed_vsync``, ``hight_input_latency``,
|
||||
``slow_ui_thread``, ``slow_bitmap_uploads``, ``slow_issue_draw_commands``.
|
||||
Please see the `gfxinfo documentation`_ for details.
|
||||
|
||||
.. _gfxinfo documentation: https://developer.android.com/training/testing/performance.html
|
||||
|
||||
|
||||
.. class:: DerivedSurfaceFlingerStats(drop_threshold=5, suffix='-fps', filename=None, outdir=None)
|
||||
|
||||
Produces FPS (frames-per-second) and other derived statistics from
|
||||
:class:`SurfaceFlingerFramesInstrument` output. This takes several optional
|
||||
parameters in creation:
|
||||
|
||||
:param drop_threshold: FPS in an application, such as a game, which this
|
||||
processor is primarily targeted at, cannot reasonably
|
||||
drop to a very low value. This is specified to this
|
||||
threshold. If an FPS for a frame is computed to be
|
||||
lower than this threshold, it will be dropped on the
|
||||
assumption that frame rendering was suspended by the
|
||||
system (e.g. when idling), or there was some sort of
|
||||
error, and therefore this should be used in
|
||||
performance calculations. defaults to ``5``.
|
||||
:param suffix: The name of the generated per-frame FPS csv file will be
|
||||
derived from the input frames csv file by appending this
|
||||
suffix. This cannot be specified at the same time as
|
||||
a ``filename``.
|
||||
:param filename: As an alternative to the suffix, a complete file name for
|
||||
FPS csv can be specified. This cannot be used at the same
|
||||
time as the ``suffix``.
|
||||
:param outdir: By default, the FPS csv file will be placed in the same
|
||||
directory as the input frames csv file. This can be changed
|
||||
by specifying an alternate directory here
|
||||
|
||||
.. warning:: Specifying both ``filename`` and ``oudir`` will mean that exactly
|
||||
the same file will be used for FPS output on each invocation of
|
||||
``process()`` (even for different inputs) resulting in previous
|
||||
results being overwritten.
|
||||
|
||||
.. method:: DerivedSurfaceFlingerStats.process(measurement_csv)
|
||||
|
||||
Process the fames csv generated by :class:`SurfaceFlingerFramesInstrument` and
|
||||
returns a list containing exactly three entries: :class:`DerivedMetric`\ s
|
||||
``fps`` and ``total_frames``, followed by a :class:`MeasurentCsv` containing
|
||||
per-frame FPSs values, followed by ``janks`` ``janks_pc``, and
|
||||
``missed_vsync`` metrics.
|
@@ -19,8 +19,9 @@ Contents:
|
||||
target
|
||||
modules
|
||||
instrumentation
|
||||
|
||||
|
||||
derived_measurements
|
||||
platform
|
||||
connection
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
@@ -28,7 +28,7 @@ Android target.
|
||||
# a no-op, but is included here for completeness.
|
||||
In [4]: i.setup()
|
||||
|
||||
# Find out what the instrument is capable collecting from the
|
||||
# Find out what the instrument is capable collecting from the
|
||||
# target.
|
||||
In [5]: i.list_channels()
|
||||
Out[5]:
|
||||
@@ -40,7 +40,7 @@ Android target.
|
||||
In [6]: i.reset(sites=['exynos-therm'])
|
||||
|
||||
# HWMON instrument supports INSTANTANEOUS collection, so invoking
|
||||
# take_measurement() will return a list of measurements take from
|
||||
# take_measurement() will return a list of measurements take from
|
||||
# each of the channels configured during reset()
|
||||
In [7]: i.take_measurement()
|
||||
Out[7]: [exynos-therm_temperature: 36.0 degrees]
|
||||
@@ -65,10 +65,10 @@ Instrument
|
||||
:INSTANTANEOUS: The instrument supports taking a single sample via
|
||||
``take_measurement()``.
|
||||
:CONTINUOUS: The instrument supports collecting measurements over a
|
||||
period of time via ``start()``, ``stop()``, and
|
||||
``get_data()`` methods.
|
||||
period of time via ``start()``, ``stop()``, ``get_data()``,
|
||||
and (optionally) ``get_raw`` methods.
|
||||
|
||||
.. note:: It's possible for one instrument to support more than a single
|
||||
.. note:: It's possible for one instrument to support more than a single
|
||||
mode.
|
||||
|
||||
.. attribute:: Instrument.active_channels
|
||||
@@ -99,14 +99,21 @@ Instrument
|
||||
``teardown()`` has been called), but see documentation for the instrument
|
||||
you're interested in.
|
||||
|
||||
.. method:: Instrument.reset([sites, [kinds]])
|
||||
.. method:: Instrument.reset(sites=None, kinds=None, channels=None)
|
||||
|
||||
This is used to configure an instrument for collection. This must be invoked
|
||||
before ``start()`` is called to begin collection. ``sites`` and ``kinds``
|
||||
parameters may be used to specify which channels measurements should be
|
||||
collected from (if omitted, then measurements will be collected for all
|
||||
available sites/kinds). This methods sets the ``active_channels`` attribute
|
||||
of the ``Instrument``.
|
||||
before ``start()`` is called to begin collection. This methods sets the
|
||||
``active_channels`` attribute of the ``Instrument``.
|
||||
|
||||
If ``channels`` is provided, it is a list of names of channels to enable and
|
||||
``sites`` and ``kinds`` must both be ``None``.
|
||||
|
||||
Otherwise, if one of ``sites`` or ``kinds`` is provided, all channels
|
||||
matching the given sites or kinds are enabled. If both are provided then all
|
||||
channels of the given kinds at the given sites are enabled.
|
||||
|
||||
If none of ``sites``, ``kinds`` or ``channels`` are provided then all
|
||||
available channels are enabled.
|
||||
|
||||
.. method:: Instrument.take_measurment()
|
||||
|
||||
@@ -114,14 +121,14 @@ Instrument
|
||||
:class:`Measurement` objects (one for each active channel).
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
support ``INSTANTANEOUS`` measurment.
|
||||
support ``INSTANTANEOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.start()
|
||||
|
||||
Starts collecting measurements from ``active_channels``.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurment.
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.stop()
|
||||
|
||||
@@ -129,23 +136,44 @@ Instrument
|
||||
:func:`start()`.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurment.
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.get_data(outfile)
|
||||
|
||||
Write collected data into ``outfile``. Must be called after :func:`stop()`.
|
||||
Write collected data into ``outfile``. Must be called after :func:`stop()`.
|
||||
Data will be written in CSV format with a column for each channel and a row
|
||||
for each sample. Column heading will be channel, labels in the form
|
||||
``<site>_<kind>`` (see :class:`InstrumentChannel`). The order of the coluns
|
||||
for each sample. Column heading will be channel, labels in the form
|
||||
``<site>_<kind>`` (see :class:`InstrumentChannel`). The order of the columns
|
||||
will be the same as the order of channels in ``Instrument.active_channels``.
|
||||
|
||||
If reporting timestamps, one channel must have a ``site`` named ``"timestamp"``
|
||||
and a ``kind`` of a :class:`MeasurmentType` of an appropriate time unit which will
|
||||
be used, if appropriate, during any post processing.
|
||||
|
||||
.. note:: Currently supported time units are seconds, milliseconds and
|
||||
microseconds, other units can also be used if an appropriate
|
||||
conversion is provided.
|
||||
|
||||
This returns a :class:`MeasurementCsv` instance associated with the outfile
|
||||
that can be used to stream :class:`Measurement`\ s lists (similar to what is
|
||||
returned by ``take_measurement()``.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurment.
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.get_raw()
|
||||
|
||||
Returns a list of paths to files containing raw output from the underlying
|
||||
source(s) that is used to produce the data CSV. If now raw output is
|
||||
generated or saved, an empty list will be returned. The format of the
|
||||
contents of the raw files is entirely source-dependent.
|
||||
|
||||
.. attribute:: Instrument.sample_rate_hz
|
||||
|
||||
Sample rate of the instrument in Hz. Assumed to be the same for all channels.
|
||||
|
||||
.. note:: This attribute is only provided by :class:`Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
Instrument Channel
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
@@ -157,16 +185,16 @@ Instrument Channel
|
||||
``site`` and a ``measurement_type``.
|
||||
|
||||
A ``site`` indicates where on the target a measurement is collected from
|
||||
(e.g. a volage rail or location of a sensor).
|
||||
(e.g. a voltage rail or location of a sensor).
|
||||
|
||||
A ``measurement_type`` is an instance of :class:`MeasurmentType` that
|
||||
describes what sort of measurment this is (power, temperature, etc). Each
|
||||
mesurement type has a standard unit it is reported in, regardless of an
|
||||
describes what sort of measurement this is (power, temperature, etc). Each
|
||||
measurement type has a standard unit it is reported in, regardless of an
|
||||
instrument used to collect it.
|
||||
|
||||
A channel (i.e. site/measurement_type combination) is unique per instrument,
|
||||
however there may be more than one channel associated with one site (e.g. for
|
||||
both volatage and power).
|
||||
both voltage and power).
|
||||
|
||||
It should not be assumed that any site/measurement_type combination is valid.
|
||||
The list of available channels can queried with
|
||||
@@ -174,22 +202,22 @@ Instrument Channel
|
||||
|
||||
.. attribute:: InstrumentChannel.site
|
||||
|
||||
The name of the "site" from which the measurments are collected (e.g. voltage
|
||||
The name of the "site" from which the measurements are collected (e.g. voltage
|
||||
rail, sensor, etc).
|
||||
|
||||
.. attribute:: InstrumentChannel.kind
|
||||
|
||||
A string indingcating the type of measrument that will be collted. This is
|
||||
A string indicating the type of measurement that will be collected. This is
|
||||
the ``name`` of the :class:`MeasurmentType` associated with this channel.
|
||||
|
||||
.. attribute:: InstrumentChannel.units
|
||||
|
||||
Units in which measurment will be reported. this is determined by the
|
||||
Units in which measurement will be reported. this is determined by the
|
||||
underlying :class:`MeasurmentType`.
|
||||
|
||||
.. attribute:: InstrumentChannel.label
|
||||
|
||||
A label that can be attached to measurments associated with with channel.
|
||||
A label that can be attached to measurements associated with with channel.
|
||||
This is constructed with ::
|
||||
|
||||
'{}_{}'.format(self.site, self.kind)
|
||||
@@ -205,27 +233,33 @@ be reported as "power" in Watts, and never as "pwr" in milliWatts. Currently
|
||||
defined measurement types are
|
||||
|
||||
|
||||
+-------------+---------+---------------+
|
||||
| name | units | category |
|
||||
+=============+=========+===============+
|
||||
| time | seconds | |
|
||||
+-------------+---------+---------------+
|
||||
| temperature | degrees | |
|
||||
+-------------+---------+---------------+
|
||||
| power | watts | power/energy |
|
||||
+-------------+---------+---------------+
|
||||
| voltage | volts | power/energy |
|
||||
+-------------+---------+---------------+
|
||||
| current | amps | power/energy |
|
||||
+-------------+---------+---------------+
|
||||
| energy | joules | power/energy |
|
||||
+-------------+---------+---------------+
|
||||
| tx | bytes | data transfer |
|
||||
+-------------+---------+---------------+
|
||||
| rx | bytes | data transfer |
|
||||
+-------------+---------+---------------+
|
||||
| tx/rx | bytes | data transfer |
|
||||
+-------------+---------+---------------+
|
||||
+-------------+-------------+---------------+
|
||||
| name | units | category |
|
||||
+=============+=============+===============+
|
||||
| count | count | |
|
||||
+-------------+-------------+---------------+
|
||||
| percent | percent | |
|
||||
+-------------+-------------+---------------+
|
||||
| time_us | microseconds| time |
|
||||
+-------------+-------------+---------------+
|
||||
| time_ms | milliseconds| time |
|
||||
+-------------+-------------+---------------+
|
||||
| temperature | degrees | thermal |
|
||||
+-------------+-------------+---------------+
|
||||
| power | watts | power/energy |
|
||||
+-------------+-------------+---------------+
|
||||
| voltage | volts | power/energy |
|
||||
+-------------+-------------+---------------+
|
||||
| current | amps | power/energy |
|
||||
+-------------+-------------+---------------+
|
||||
| energy | joules | power/energy |
|
||||
+-------------+-------------+---------------+
|
||||
| tx | bytes | data transfer |
|
||||
+-------------+-------------+---------------+
|
||||
| rx | bytes | data transfer |
|
||||
+-------------+-------------+---------------+
|
||||
| tx/rx | bytes | data transfer |
|
||||
+-------------+-------------+---------------+
|
||||
|
||||
|
||||
.. instruments:
|
||||
|
231
doc/modules.rst
231
doc/modules.rst
@@ -1,3 +1,5 @@
|
||||
.. _modules:
|
||||
|
||||
Modules
|
||||
=======
|
||||
|
||||
@@ -9,7 +11,7 @@ hotplug
|
||||
-------
|
||||
|
||||
Kernel ``hotplug`` subsystem allows offlining ("removing") cores from the
|
||||
system, and onlining them back int. The ``devlib`` module exposes a simple
|
||||
system, and onlining them back in. The ``devlib`` module exposes a simple
|
||||
interface to this subsystem
|
||||
|
||||
.. code:: python
|
||||
@@ -35,10 +37,10 @@ policies (governors). The ``devlib`` module exposes the following interface
|
||||
|
||||
.. note:: On ARM big.LITTLE systems, all cores on a cluster (usually all cores
|
||||
of the same type) are in the same frequency domain, so setting
|
||||
``cpufreq`` state on one core on a cluter will affect all cores on
|
||||
``cpufreq`` state on one core on a cluster will affect all cores on
|
||||
that cluster. Because of this, some devices only expose cpufreq sysfs
|
||||
interface (which is what is used by the ``devlib`` module) on the
|
||||
first cpu in a cluster. So to keep your scripts proable, always use
|
||||
first cpu in a cluster. So to keep your scripts portable, always use
|
||||
the fist (online) CPU in a cluster to set ``cpufreq`` state.
|
||||
|
||||
.. method:: target.cpufreq.list_governors(cpu)
|
||||
@@ -64,26 +66,26 @@ policies (governors). The ``devlib`` module exposes the following interface
|
||||
:param cpu: The cpu; could be a numeric or the corresponding string (e.g.
|
||||
``1`` or ``"cpu1"``).
|
||||
|
||||
.. method:: target.cpufreq.set_governor(cpu, governor, **kwargs)
|
||||
.. method:: target.cpufreq.set_governor(cpu, governor, \*\*kwargs)
|
||||
|
||||
Sets the governor for the specified cpu.
|
||||
|
||||
:param cpu: The cpu; could be a numeric or the corresponding string (e.g.
|
||||
``1`` or ``"cpu1"``).
|
||||
:param governor: The name of the governor. This must be one of the governors
|
||||
supported by the CPU (as retrunted by ``list_governors()``.
|
||||
:param governor: The name of the governor. This must be one of the governors
|
||||
supported by the CPU (as returned by ``list_governors()``.
|
||||
|
||||
Keyword arguments may be used to specify governor tunable values.
|
||||
|
||||
|
||||
.. method:: target.cpufreq.get_governor_tunables(cpu)
|
||||
|
||||
Return a dict with the values of the specfied CPU's current governor.
|
||||
Return a dict with the values of the specified CPU's current governor.
|
||||
|
||||
:param cpu: The cpu; could be a numeric or the corresponding string (e.g.
|
||||
``1`` or ``"cpu1"``).
|
||||
|
||||
.. method:: target.cpufreq.set_governor_tunables(cpu, **kwargs)
|
||||
.. method:: target.cpufreq.set_governor_tunables(cpu, \*\*kwargs)
|
||||
|
||||
Set the tunables for the current governor on the specified CPU.
|
||||
|
||||
@@ -92,7 +94,7 @@ policies (governors). The ``devlib`` module exposes the following interface
|
||||
|
||||
Keyword arguments should be used to specify tunable values.
|
||||
|
||||
.. method:: target.cpufreq.list_frequencie(cpu)
|
||||
.. method:: target.cpufreq.list_frequencies(cpu)
|
||||
|
||||
List DVFS frequencies supported by the specified CPU. Returns a list of ints.
|
||||
|
||||
@@ -104,11 +106,20 @@ policies (governors). The ``devlib`` module exposes the following interface
|
||||
target.cpufreq.set_min_frequency(cpu, frequency[, exact=True])
|
||||
target.cpufreq.set_max_frequency(cpu, frequency[, exact=True])
|
||||
|
||||
Get and set min and max frequencies on the specfied CPU. "set" functions are
|
||||
avialable with all governors other than ``userspace``.
|
||||
Get the currently set, or set new min and max frequencies for the specified
|
||||
CPU. "set" functions are available with all governors other than
|
||||
``userspace``.
|
||||
|
||||
:param cpu: The cpu; could be a numeric or the corresponding string (e.g.
|
||||
``1`` or ``"cpu1"``).
|
||||
|
||||
.. method:: target.cpufreq.get_min_available_frequency(cpu)
|
||||
target.cpufreq.get_max_available_frequency(cpu)
|
||||
|
||||
Retrieve the min or max DVFS frequency that is supported (as opposed to
|
||||
currently enforced) for a given CPU. Returns an int or None if could not be
|
||||
determined.
|
||||
|
||||
:param frequency: Frequency to set.
|
||||
|
||||
.. method:: target.cpufreq.get_frequency(cpu)
|
||||
@@ -124,13 +135,13 @@ policies (governors). The ``devlib`` module exposes the following interface
|
||||
cpuidle
|
||||
-------
|
||||
|
||||
``cpufreq`` is the kernel subsystem for managing CPU low power (idle) states.
|
||||
``cpuidle`` is the kernel subsystem for managing CPU low power (idle) states.
|
||||
|
||||
.. method:: taget.cpuidle.get_driver()
|
||||
.. method:: target.cpuidle.get_driver()
|
||||
|
||||
Return the name current cpuidle driver.
|
||||
|
||||
.. method:: taget.cpuidle.get_governor()
|
||||
.. method:: target.cpuidle.get_governor()
|
||||
|
||||
Return the name current cpuidle governor (policy).
|
||||
|
||||
@@ -153,7 +164,7 @@ cpuidle
|
||||
Enable or disable the specified or all states (optionally on the specified
|
||||
CPU.
|
||||
|
||||
You can also call ``enable()`` or ``disable()`` on :class:`CpuidleState` objects
|
||||
You can also call ``enable()`` or ``disable()`` on :class:`CpuidleState` objects
|
||||
returned by get_state(s).
|
||||
|
||||
cgroups
|
||||
@@ -169,4 +180,192 @@ TODO
|
||||
API
|
||||
---
|
||||
|
||||
TODO
|
||||
Generic Module API Description
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Modules implement discrete, optional pieces of functionality ("optional" in the
|
||||
sense that the functionality may or may not be present on the target device, or
|
||||
that it may or may not be necessary for a particular application).
|
||||
|
||||
Every module (ultimately) derives from :class:`Module` class. A module must
|
||||
define the following class attributes:
|
||||
|
||||
:name: A unique name for the module. This cannot clash with any of the existing
|
||||
names and must be a valid Python identifier, but is otherwise free-form.
|
||||
:kind: This identifies the type of functionality a module implements, which in
|
||||
turn determines the interface implemented by the module (all modules of
|
||||
the same kind must expose a consistent interface). This must be a valid
|
||||
Python identifier, but is otherwise free-form, though, where possible,
|
||||
one should try to stick to an already-defined kind/interface, lest we end
|
||||
up with a bunch of modules implementing similar functionality but
|
||||
exposing slightly different interfaces.
|
||||
|
||||
.. note:: It is possible to omit ``kind`` when defining a module, in
|
||||
which case the module's ``name`` will be treated as its
|
||||
``kind`` as well.
|
||||
|
||||
:stage: This defines when the module will be installed into a :class:`Target`.
|
||||
Currently, the following values are allowed:
|
||||
|
||||
:connected: The module is installed after a connection to the target has
|
||||
been established. This is the default.
|
||||
:early: The module will be installed when a :class:`Target` is first
|
||||
created. This should be used for modules that do not rely on a
|
||||
live connection to the target.
|
||||
|
||||
Additionally, a module must implement a static (or class) method :func:`probe`:
|
||||
|
||||
.. method:: Module.probe(target)
|
||||
|
||||
This method takes a :class:`Target` instance and returns ``True`` if this
|
||||
module is supported by that target, or ``False`` otherwise.
|
||||
|
||||
.. note:: If the module ``stage`` is ``"early"``, this method cannot assume
|
||||
that a connection has been established (i.e. it can only access
|
||||
attributes of the Target that do not rely on a connection).
|
||||
|
||||
Installation and invocation
|
||||
***************************
|
||||
|
||||
The default installation method will create an instance of a module (the
|
||||
:class:`Target` instance being the sole argument) and assign it to the target
|
||||
instance attribute named after the module's ``kind`` (or ``name`` if ``kind`` is
|
||||
``None``).
|
||||
|
||||
It is possible to change the installation procedure for a module by overriding
|
||||
the default :func:`install` method. The method must have the following
|
||||
signature:
|
||||
|
||||
.. method:: Module.install(cls, target, **kwargs)
|
||||
|
||||
Install the module into the target instance.
|
||||
|
||||
|
||||
Implementation and Usage Patterns
|
||||
*********************************
|
||||
|
||||
There are two common ways to implement the above API, corresponding to the two
|
||||
common uses for modules:
|
||||
|
||||
- If a module provides an interface to a particular set of functionality (e.g.
|
||||
an OS subsystem), that module would typically derive directly form
|
||||
:class:`Module` and would leave ``kind`` unassigned, so that it is accessed
|
||||
by it name. Its instance's methods and attributes provide the interface for
|
||||
interacting with its functionality. For examples of this type of module, see
|
||||
the subsystem modules listed above (e.g. ``cpufreq``).
|
||||
- If a module provides a platform- or infrastructure-specific implementation of
|
||||
a common function, the module would derive from one of :class:`Module`
|
||||
subclasses that define the interface for that function. In that case the
|
||||
module would be accessible via the common ``kind`` defined its super. The
|
||||
module would typically implement :func:`__call__` and be invoked directly. For
|
||||
examples of this type of module, see common function interface definitions
|
||||
below.
|
||||
|
||||
|
||||
Common Function Interfaces
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This section documents :class:`Module` classes defining interface for common
|
||||
functions. Classes derived from them provide concrete implementations for
|
||||
specific platforms.
|
||||
|
||||
|
||||
HardResetModule
|
||||
***************
|
||||
|
||||
.. attribute:: HardResetModule.kind
|
||||
|
||||
"hard_reset"
|
||||
|
||||
.. method:: HardResetModule.__call__()
|
||||
|
||||
Must be implemented by derived classes.
|
||||
|
||||
Implements hard reset for a target devices. The equivalent of physically
|
||||
power cycling the device. This may be used by client code in situations
|
||||
where the target becomes unresponsive and/or a regular reboot is not
|
||||
possible.
|
||||
|
||||
|
||||
BootModule
|
||||
**********
|
||||
|
||||
.. attribute:: BootModule.kind
|
||||
|
||||
"hard_reset"
|
||||
|
||||
.. method:: BootModule.__call__()
|
||||
|
||||
Must be implemented by derived classes.
|
||||
|
||||
Implements a boot procedure. This takes the device from (hard or soft)
|
||||
reset to a booted state where the device is ready to accept connections. For
|
||||
a lot of commercial devices the process is entirely automatic, however some
|
||||
devices (e.g. development boards), my require additional steps, such as
|
||||
interactions with the bootloader, in order to boot into the OS.
|
||||
|
||||
.. method:: Bootmodule.update(\*\*kwargs)
|
||||
|
||||
Update the boot settings. Some boot sequences allow specifying settings
|
||||
that will be utilized during boot (e.g. linux kernel boot command line). The
|
||||
default implementation will set each setting in ``kwargs`` as an attribute of
|
||||
the boot module (or update the existing attribute).
|
||||
|
||||
|
||||
FlashModule
|
||||
***********
|
||||
|
||||
.. attribute:: FlashModule.kind
|
||||
|
||||
"flash"
|
||||
|
||||
.. method:: __call__(image_bundle=None, images=None, boot_config=None)
|
||||
|
||||
Must be implemented by derived classes.
|
||||
|
||||
Flash the target platform with the specified images.
|
||||
|
||||
:param image_bundle: A compressed bundle of image files with any associated
|
||||
metadata. The format of the bundle is specific to a
|
||||
particular implementation.
|
||||
:param images: A dict mapping image names/identifiers to the path on the
|
||||
host file system of the corresponding image file. If both
|
||||
this and ``image_bundle`` are specified, individual images
|
||||
will override those in the bundle.
|
||||
:param boot_config: Some platforms require specifying boot arguments at the
|
||||
time of flashing the images, rather than during each
|
||||
reboot. For other platforms, this will be ignored.
|
||||
|
||||
|
||||
Module Registration
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Modules are specified on :class:`Target` or :class:`Platform` creation by name.
|
||||
In order to find the class associated with the name, the module needs to be
|
||||
registered with ``devlib``. This is accomplished by passing the module class
|
||||
into :func:`register_module` method once it is defined.
|
||||
|
||||
.. note:: If you're wiring a module to be included as part of ``devlib`` code
|
||||
base, you can place the file with the module class under
|
||||
``devlib/modules/`` in the source and it will be automatically
|
||||
enumerated. There is no need to explicitly register it in that case.
|
||||
|
||||
The code snippet below illustrates an implementation of a hard reset function
|
||||
for an "Acme" device.
|
||||
|
||||
.. code:: python
|
||||
|
||||
import os
|
||||
from devlib import HardResetModule, register_module
|
||||
|
||||
|
||||
class AcmeHardReset(HardResetModule):
|
||||
|
||||
name = 'acme_hard_reset'
|
||||
|
||||
def __call__(self):
|
||||
# Assuming Acme board comes with a "reset-acme-board" utility
|
||||
os.system('reset-acme-board {}'.format(self.target.name))
|
||||
|
||||
register_module(AcmeHardReset)
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
Overview
|
||||
========
|
||||
|
||||
A :class:`Target` instance serves as the main interface to the target device.
|
||||
A :class:`Target` instance serves as the main interface to the target device.
|
||||
There currently three target interfaces:
|
||||
|
||||
- :class:`LinuxTarget` for interacting with Linux devices over SSH.
|
||||
@@ -20,7 +20,7 @@ Acquiring a Target
|
||||
To create an interface to your device, you just need to instantiate one of the
|
||||
:class:`Target` derivatives listed above, and pass it the right
|
||||
``connection_settings``. Code snippet below gives a typical example of
|
||||
instantiating each of the three target types.
|
||||
instantiating each of the three target types.
|
||||
|
||||
.. code:: python
|
||||
|
||||
@@ -32,7 +32,7 @@ instantiating each of the three target types.
|
||||
# For a Linux device, you will need to provide the normal SSH credentials.
|
||||
# Both password-based, and key-based authentication is supported (password
|
||||
# authentication requires sshpass to be installed on your host machine).'
|
||||
t2 = LinuxTarget(connetion_settings={'host': '192.168.0.5',
|
||||
t2 = LinuxTarget(connection_settings={'host': '192.168.0.5',
|
||||
'username': 'root',
|
||||
'password': 'sekrit',
|
||||
# or
|
||||
@@ -57,7 +57,7 @@ Target Interface
|
||||
----------------
|
||||
|
||||
This is a quick overview of the basic interface to the device. See
|
||||
:class:`Targeet` API documentation for the full list of supported methods and
|
||||
:class:`Target` API documentation for the full list of supported methods and
|
||||
more detailed documentation.
|
||||
|
||||
One-time Setup
|
||||
@@ -74,13 +74,13 @@ This sets up the target for ``devlib`` interaction. This includes creating
|
||||
working directories, deploying busybox, etc. It's usually enough to do this once
|
||||
for a new device, as the changes this makes will persist across reboots.
|
||||
However, there is no issue with calling this multiple times, so, to be on the
|
||||
safe site, it's a good idea to call this once at the beginning of your scripts.
|
||||
safe side, it's a good idea to call this once at the beginning of your scripts.
|
||||
|
||||
Command Execution
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are several ways to execute a command on the target. In each case, a
|
||||
:class:`TargetError` will be raised if something goes wrong. In very case, it is
|
||||
:class:`TargetError` will be raised if something goes wrong. In each case, it is
|
||||
also possible to specify ``as_root=True`` if the specified command should be
|
||||
executed as root.
|
||||
|
||||
@@ -89,7 +89,7 @@ executed as root.
|
||||
from devlib import LocalLinuxTarget
|
||||
t = LocalLinuxTarget()
|
||||
|
||||
# Execute a command
|
||||
# Execute a command
|
||||
output = t.execute('echo $PWD')
|
||||
|
||||
# Execute command via a subprocess and return the corresponding Popen object.
|
||||
@@ -100,7 +100,7 @@ executed as root.
|
||||
|
||||
# Run the command in the background on the device and return immediately.
|
||||
# This will not block the connection, allowing to immediately execute another
|
||||
# command.
|
||||
# command.
|
||||
t.kick_off('echo $PWD')
|
||||
|
||||
# This is used to invoke an executable binary on the device. This allows some
|
||||
@@ -125,7 +125,7 @@ File Transfer
|
||||
t.pull('/path/to/target/file.txt', '/path/to/local/file.txt')
|
||||
|
||||
# Install the specified binary on the target. This will deploy the file and
|
||||
# ensure it's executable. This will *not* guarantee that the binary will be
|
||||
# ensure it's executable. This will *not* guarantee that the binary will be
|
||||
# in PATH. Instead the path to the binary will be returned; this should be
|
||||
# used to call the binary henceforth.
|
||||
target_bin = t.install('/path/to/local/bin.exe')
|
||||
@@ -133,7 +133,7 @@ File Transfer
|
||||
output = t.execute('{} --some-option'.format(target_bin))
|
||||
|
||||
The usual access permission constraints on the user account (both on the target
|
||||
and the host) apply.
|
||||
and the host) apply.
|
||||
|
||||
Process Control
|
||||
~~~~~~~~~~~~~~~
|
||||
@@ -154,7 +154,7 @@ Process Control
|
||||
# kill all running instances of a process.
|
||||
t.killall('badexe', signal=signal.SIGKILL)
|
||||
|
||||
# List processes running on the target. This retruns a list of parsed
|
||||
# List processes running on the target. This returns a list of parsed
|
||||
# PsEntry records.
|
||||
entries = t.ps()
|
||||
# e.g. print virtual memory sizes of all running sshd processes:
|
||||
@@ -173,7 +173,7 @@ Super User Privileges
|
||||
|
||||
It is not necessary for the account logged in on the target to have super user
|
||||
privileges, however the functionality will obviously be diminished, if that is
|
||||
not the case. ``devilib`` will determine if the logged in user has root
|
||||
not the case. ``devlib`` will determine if the logged in user has root
|
||||
privileges and the correct way to invoke it. You should avoid including "sudo"
|
||||
directly in your commands, instead, specify ``as_root=True`` where needed. This
|
||||
will make your scripts portable across multiple devices and OS's.
|
||||
@@ -193,7 +193,7 @@ working_directory
|
||||
by your script on the device and as the destination for all
|
||||
host-to-target file transfers. It may or may not permit execution so
|
||||
executables should not be run directly from here.
|
||||
|
||||
|
||||
executables_directory
|
||||
This directory allows execution. This will be used by ``install()``.
|
||||
|
||||
@@ -249,7 +249,7 @@ You can collected traces (currently, just ftrace) using
|
||||
|
||||
from devlib import AndroidTarget, FtraceCollector
|
||||
t = LocalLinuxTarget()
|
||||
|
||||
|
||||
# Initialize a collector specifying the events you want to collect and
|
||||
# the buffer size to be used.
|
||||
trace = FtraceCollector(t, events=['power*'], buffer_size=40000)
|
||||
|
171
doc/platform.rst
Normal file
171
doc/platform.rst
Normal file
@@ -0,0 +1,171 @@
|
||||
.. _platform:
|
||||
|
||||
Platform
|
||||
========
|
||||
|
||||
:class:`Platform`\ s describe the system underlying the OS. They encapsulate
|
||||
hardware- and firmware-specific details. In most cases, the generic
|
||||
:class:`Platform` class, which gets used if a platform is not explicitly
|
||||
specified on :class:`Target` creation, will be sufficient. It will automatically
|
||||
query as much platform information (such CPU topology, hardware model, etc) if
|
||||
it was not specified explicitly by the user.
|
||||
|
||||
|
||||
.. class:: Platform(name=None, core_names=None, core_clusters=None,\
|
||||
big_core=None, model=None, modules=None)
|
||||
|
||||
:param name: A user-friendly identifier for the platform.
|
||||
:param core_names: A list of CPU core names in the order they appear
|
||||
registered with the OS. If they are not specified,
|
||||
they will be queried at run time.
|
||||
:param core_clusters: A list with cluster ids of each core (starting with
|
||||
0). If this is not specified, clusters will be
|
||||
inferred from core names (cores with the same name are
|
||||
assumed to be in a cluster).
|
||||
:param big_core: The name of the big core in a big.LITTLE system. If this is
|
||||
not specified it will be inferred (on systems with exactly
|
||||
two clusters).
|
||||
:param model: Model name of the hardware system. If this is not specified it
|
||||
will be queried at run time.
|
||||
:param modules: Modules with additional functionality supported by the
|
||||
platform (e.g. for handling flashing, rebooting, etc). These
|
||||
would be added to the Target's modules. (See :ref:`modules`\ ).
|
||||
|
||||
|
||||
Versatile Express
|
||||
-----------------
|
||||
|
||||
The generic platform may be extended to support hardware- or
|
||||
infrastructure-specific functionality. Platforms exist for ARM
|
||||
VersatileExpress-based :class:`Juno` and :class:`TC2` development boards. In
|
||||
addition to the standard :class:`Platform` parameters above, these platforms
|
||||
support additional configuration:
|
||||
|
||||
|
||||
.. class:: VersatileExpressPlatform
|
||||
|
||||
Normally, this would be instantiated via one of its derived classes
|
||||
(:class:`Juno` or :class:`TC2`) that set appropriate defaults for some of
|
||||
the parameters.
|
||||
|
||||
:param serial_port: Identifies the serial port (usual a /dev node) on which the
|
||||
device is connected.
|
||||
:param baudrate: Baud rate for the serial connection. This defaults to
|
||||
``115200`` for :class:`Juno` and ``38400`` for
|
||||
:class:`TC2`.
|
||||
:param vemsd_mount: Mount point for the VEMSD (Versatile Express MicroSD card
|
||||
that is used for board configuration files and firmware
|
||||
images). This defaults to ``"/media/JUNO"`` for
|
||||
:class:`Juno` and ``"/media/VEMSD"`` for :class:`TC2`,
|
||||
though you would most likely need to change this for
|
||||
your setup as it would depend both on the file system
|
||||
label on the MicroSD card, and on how the card was
|
||||
mounted on the host system.
|
||||
:param hard_reset_method: Specifies the method for hard-resetting the devices
|
||||
(e.g. if it becomes unresponsive and normal reboot
|
||||
method doesn't not work). Currently supported methods
|
||||
are:
|
||||
|
||||
:dtr: reboot by toggling DTR line on the serial
|
||||
connection (this is enabled via a DIP switch
|
||||
on the board).
|
||||
:reboottxt: reboot by writing a filed called
|
||||
``reboot.txt`` to the root of the VEMSD
|
||||
mount (this is enabled via board
|
||||
configuration file).
|
||||
|
||||
This defaults to ``dtr`` for :class:`Juno` and
|
||||
``reboottxt`` for :class:`TC2`.
|
||||
:param bootloader: Specifies the bootloader configuration used by the board.
|
||||
The following values are currently supported:
|
||||
|
||||
:uefi: Boot via UEFI menu, by selecting the entry
|
||||
specified by ``uefi_entry`` parameter. If this
|
||||
entry does not exist, it will be automatically
|
||||
created based on values provided for ``image``,
|
||||
``initrd``, ``fdt``, and ``bootargs`` parameters.
|
||||
:uefi-shell: Boot by going via the UEFI shell.
|
||||
:u-boot: Boot using Das U-Boot.
|
||||
:bootmon: Boot directly via Versatile Express Bootmon
|
||||
using the values provided for ``image``,
|
||||
``initrd``, ``fdt``, and ``bootargs``
|
||||
parameters.
|
||||
|
||||
This defaults to ``u-boot`` for :class:`Juno` and
|
||||
``bootmon`` for :class:`TC2`.
|
||||
:param flash_method: Specifies how the device is flashed. Currently, only
|
||||
``"vemsd"`` method is supported, which flashes by
|
||||
writing firmware images to an appropriate location on
|
||||
the VEMSD.
|
||||
:param image: Specfies the kernel image name for ``uefi`` or ``bootmon`` boot.
|
||||
:param fdt: Specifies the device tree blob for ``uefi`` or ``bootmon`` boot.
|
||||
:param initrd: Specifies the ramdisk image for ``uefi`` or ``bootmon`` boot.
|
||||
:param bootargs: Specifies the boot arguments that will be pass to the
|
||||
kernel by the bootloader.
|
||||
:param uefi_entry: Then name of the UEFI entry to be used/created by
|
||||
``uefi`` bootloader.
|
||||
:param ready_timeout: Timeout, in seconds, for the time it takes the
|
||||
platform to become ready to accept connections. Note:
|
||||
this does not mean that the system is fully booted;
|
||||
just that the services needed to establish a
|
||||
connection (e.g. sshd or adbd) are up.
|
||||
|
||||
|
||||
.. _gem5-platform:
|
||||
|
||||
Gem5 Simulation Platform
|
||||
------------------------
|
||||
|
||||
By initialising a Gem5SimulationPlatform, devlib will start a gem5 simulation (based upon the
|
||||
arguments the user provided) and then connect to it using :class:`Gem5Connection`.
|
||||
Using the methods discussed above, some methods of the :class:`Target` will be altered
|
||||
slightly to better suit gem5.
|
||||
|
||||
.. class:: Gem5SimulationPlatform(name, host_output_dir, gem5_bin, gem5_args, gem5_virtio, gem5_telnet_port=None)
|
||||
|
||||
During initialisation the gem5 simulation will be kicked off (based upon the arguments
|
||||
provided by the user) and the telnet port used by the gem5 simulation will be intercepted
|
||||
and stored for use by the :class:`Gem5Connection`.
|
||||
|
||||
:param name: Platform name
|
||||
|
||||
:param host_output_dir: Path on the host where the gem5 outputs will be placed (e.g. stats file)
|
||||
|
||||
:param gem5_bin: gem5 binary
|
||||
|
||||
:param gem5_args: Arguments to be passed onto gem5 such as config file etc.
|
||||
|
||||
:param gem5_virtio: Arguments to be passed onto gem5 in terms of the virtIO device used
|
||||
to transfer files between the host and the gem5 simulated system.
|
||||
|
||||
:param gem5_telnet_port: Not yet in use as it would be used in future implementations
|
||||
of devlib in which the user could use the platform to pick
|
||||
up an existing and running simulation.
|
||||
|
||||
|
||||
.. method:: Gem5SimulationPlatform.init_target_connection([target])
|
||||
|
||||
Based upon the OS defined in the :class:`Target`, the type of :class:`Gem5Connection`
|
||||
will be set (:class:`AndroidGem5Connection` or :class:`AndroidGem5Connection`).
|
||||
|
||||
.. method:: Gem5SimulationPlatform.update_from_target([target])
|
||||
|
||||
This method provides specific setup procedures for a gem5 simulation. First of all, the m5
|
||||
binary will be installed on the guest (if it is not present). Secondly, three methods
|
||||
in the :class:`Target` will be monkey-patched:
|
||||
|
||||
- **reboot**: this is not supported in gem5
|
||||
- **reset**: this is not supported in gem5
|
||||
- **capture_screen**: gem5 might already have screencaps so the
|
||||
monkey-patched method will first try to
|
||||
transfer the existing screencaps.
|
||||
In case that does not work, it will fall back
|
||||
to the original :class:`Target` implementation
|
||||
of :func:`capture_screen`.
|
||||
|
||||
Finally, it will call the parent implementation of :func:`update_from_target`.
|
||||
|
||||
.. method:: Gem5SimulationPlatform.setup([target])
|
||||
|
||||
The m5 binary be installed, if not yet installed on the gem5 simulated system.
|
||||
It will also resize the gem5 shell, to avoid line wrapping issues.
|
276
doc/target.rst
276
doc/target.rst
@@ -2,28 +2,28 @@ Target
|
||||
======
|
||||
|
||||
|
||||
.. class:: Target(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT)
|
||||
|
||||
.. class:: Target(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=None)
|
||||
|
||||
:class:`Target` is the primary interface to the remote device. All interactions
|
||||
with the device are performed via a :class:`Target` instance, either
|
||||
directly, or via its modules or a wrapper interface (such as an
|
||||
:class:`Instrument`).
|
||||
|
||||
:param connection_settings: A ``dict`` that specifies how to connect to the remote
|
||||
device. Its contents depend on the specific :class:`Target` type used (e.g.
|
||||
:class:`AndroidTarget` expects the adb ``device`` name).
|
||||
:param connection_settings: A ``dict`` that specifies how to connect to the remote
|
||||
device. Its contents depend on the specific :class:`Target` type (used see
|
||||
:ref:`connection-types`\ ).
|
||||
|
||||
:param platform: A :class:`Target` defines interactions at Operating System level. A
|
||||
:param platform: A :class:`Target` defines interactions at Operating System level. A
|
||||
:class:`Platform` describes the underlying hardware (such as CPUs
|
||||
available). If a :class:`Platform` instance is not specified on
|
||||
:class:`Target` creation, one will be created automatically and it will
|
||||
dynamically probe the device to discover as much about the underlying
|
||||
hardware as it can.
|
||||
hardware as it can. See also :ref:`platform`\ .
|
||||
|
||||
:param working_directory: This is primary location for on-target file system
|
||||
interactions performed by ``devlib``. This location *must* be readable and
|
||||
writable directly (i.e. without sudo) by the connection's user account.
|
||||
It may or may not allow execution. This location will be created,
|
||||
writable directly (i.e. without sudo) by the connection's user account.
|
||||
It may or may not allow execution. This location will be created,
|
||||
if necessary, during ``setup()``.
|
||||
|
||||
If not explicitly specified, this will be set to a default value
|
||||
@@ -35,10 +35,10 @@ Target
|
||||
(obviously). It should also be possible to write to this location,
|
||||
possibly with elevated privileges (i.e. on a rooted Linux target, it
|
||||
should be possible to write here with sudo, but not necessarily directly
|
||||
by the connection's account). This location will be created,
|
||||
by the connection's account). This location will be created,
|
||||
if necessary, during ``setup()``.
|
||||
|
||||
This location does *not* to be same as the system's executables
|
||||
This location does *not* need to be same as the system's executables
|
||||
location. In fact, to prevent devlib from overwriting system's defaults,
|
||||
it better if this is a separate location, if possible.
|
||||
|
||||
@@ -52,8 +52,8 @@ Target
|
||||
|
||||
:param modules: a list of additional modules to be installed. Some modules will
|
||||
try to install by default (if supported by the underlying target).
|
||||
Current default modules are ``hotplug``, ``cpufreq``, ``cpuidle``,
|
||||
``cgroups``, and ``hwmon``.
|
||||
Current default modules are ``hotplug``, ``cpufreq``, ``cpuidle``,
|
||||
``cgroups``, and ``hwmon`` (See :ref:`modules`\ ).
|
||||
|
||||
See modules documentation for more detail.
|
||||
|
||||
@@ -68,6 +68,9 @@ Target
|
||||
prompted on the target. This may be used by some modules that establish
|
||||
auxiliary connections to a target over UART.
|
||||
|
||||
:param conn_cls: This is the type of connection that will be used to communicate
|
||||
with the device.
|
||||
|
||||
.. attribute:: Target.core_names
|
||||
|
||||
This is a list containing names of CPU cores on the target, in the order in
|
||||
@@ -83,18 +86,18 @@ Target
|
||||
|
||||
.. attribute:: Target.big_core
|
||||
|
||||
This is the name of the cores that the "big"s in an ARM big.LITTLE
|
||||
This is the name of the cores that are the "big"s in an ARM big.LITTLE
|
||||
configuration. This is obtained via the underlying :class:`Platform`.
|
||||
|
||||
.. attribute:: Target.little_core
|
||||
|
||||
This is the name of the cores that the "little"s in an ARM big.LITTLE
|
||||
This is the name of the cores that are the "little"s in an ARM big.LITTLE
|
||||
configuration. This is obtained via the underlying :class:`Platform`.
|
||||
|
||||
.. attribute:: Target.is_connected
|
||||
|
||||
A boolean value that indicates whether an active connection exists to the
|
||||
target device.
|
||||
target device.
|
||||
|
||||
.. attribute:: Target.connected_as_root
|
||||
|
||||
@@ -146,7 +149,7 @@ Target
|
||||
thread.
|
||||
|
||||
.. method:: Target.connect([timeout])
|
||||
|
||||
|
||||
Establish a connection to the target. It is usually not necessary to call
|
||||
this explicitly, as a connection gets automatically established on
|
||||
instantiation.
|
||||
@@ -199,21 +202,23 @@ Target
|
||||
operations during reboot process to detect if the reboot has failed and
|
||||
the device has hung.
|
||||
|
||||
.. method:: Target.push(source, dest [, timeout])
|
||||
.. method:: Target.push(source, dest [,as_root , timeout])
|
||||
|
||||
Transfer a file from the host machine to the target device.
|
||||
|
||||
:param source: path of to the file on the host
|
||||
:param dest: path of to the file on the target
|
||||
:param as_root: whether root is required. Defaults to false.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
|
||||
.. method:: Target.pull(source, dest [, timeout])
|
||||
.. method:: Target.pull(source, dest [, as_root, timeout])
|
||||
|
||||
Transfer a file from the target device to the host machine.
|
||||
|
||||
:param source: path of to the file on the target
|
||||
:param dest: path of to the file on the host
|
||||
:param as_root: whether root is required. Defaults to false.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
|
||||
@@ -225,7 +230,7 @@ Target
|
||||
:param timeout: Timeout (in seconds) for the execution of the command. If
|
||||
specified, an exception will be raised if execution does not complete
|
||||
with the specified period.
|
||||
:param check_exit_code: If ``True`` (the default) the exit code (on target)
|
||||
:param check_exit_code: If ``True`` (the default) the exit code (on target)
|
||||
from execution of the command will be checked, and an exception will be
|
||||
raised if it is not ``0``.
|
||||
:param as_root: The command will be executed as root. This will fail on
|
||||
@@ -262,9 +267,27 @@ Target
|
||||
will be interpreted as a comma-separated list of cpu ranges, e.g.
|
||||
``"0,4-7"``.
|
||||
:param as_root: Specify whether the command should be run as root
|
||||
:param timeout: If this is specified and invocation does not terminate within this number
|
||||
:param timeout: If this is specified and invocation does not terminate within this number
|
||||
of seconds, an exception will be raised.
|
||||
|
||||
.. method:: Target.background_invoke(binary [, args [, in_directory [, on_cpus [, as_root ]]]])
|
||||
|
||||
Execute the specified binary on target (must already be installed) as a background
|
||||
task, under the specified conditions and return the :class:`subprocess.Popen`
|
||||
instance for the command.
|
||||
|
||||
:param binary: binary to execute. Must be present and executable on the device.
|
||||
:param args: arguments to be passed to the binary. The can be either a list or
|
||||
a string.
|
||||
:param in_directory: execute the binary in the specified directory. This must
|
||||
be an absolute path.
|
||||
:param on_cpus: taskset the binary to these CPUs. This may be a single ``int`` (in which
|
||||
case, it will be interpreted as the mask), a list of ``ints``, in which
|
||||
case this will be interpreted as the list of cpus, or string, which
|
||||
will be interpreted as a comma-separated list of cpu ranges, e.g.
|
||||
``"0,4-7"``.
|
||||
:param as_root: Specify whether the command should be run as root
|
||||
|
||||
.. method:: Target.kick_off(command [, as_root])
|
||||
|
||||
Kick off the specified command on the target and return immediately. Unlike
|
||||
@@ -288,24 +311,50 @@ Target
|
||||
|
||||
.. method:: Target.read_int(self, path)
|
||||
|
||||
Equivalent to ``Target.read_value(path, kind=devlab.utils.types.integer)``
|
||||
Equivalent to ``Target.read_value(path, kind=devlib.utils.types.integer)``
|
||||
|
||||
.. method:: Target.read_bool(self, path)
|
||||
|
||||
Equivalent to ``Target.read_value(path, kind=devlab.utils.types.boolean)``
|
||||
Equivalent to ``Target.read_value(path, kind=devlib.utils.types.boolean)``
|
||||
|
||||
.. method:: Target.write_value(path, value [, verify])
|
||||
|
||||
Write the value to the specified path on the target. This is primarily
|
||||
Write the value to the specified path on the target. This is primarily
|
||||
intended for sysfs/procfs/debugfs etc.
|
||||
|
||||
:param path: file to write into
|
||||
:param value: value to be written
|
||||
:param verify: If ``True`` (the default) the value will be read back after
|
||||
it is written to make sure it has been written successfully. This due to
|
||||
it is written to make sure it has been written successfully. This due to
|
||||
some sysfs entries silently failing to set the written value without
|
||||
returning an error code.
|
||||
|
||||
.. method:: Target.read_tree_values(path, depth=1, dictcls=dict):
|
||||
|
||||
Read values of all sysfs (or similar) file nodes under ``path``, traversing
|
||||
up to the maximum depth ``depth``.
|
||||
|
||||
Returns a nested structure of dict-like objects (``dict``\ s by default) that
|
||||
follows the structure of the scanned sub-directory tree. The top-level entry
|
||||
has a single item who's key is ``path``. If ``path`` points to a single file,
|
||||
the value of the entry is the value ready from that file node. Otherwise, the
|
||||
value is a dict-line object with a key for every entry under ``path``
|
||||
mapping onto its value or further dict-like objects as appropriate.
|
||||
|
||||
:param path: sysfs path to scan
|
||||
:param depth: maximum depth to descend
|
||||
:param dictcls: a dict-like type to be used for each level of the hierarchy.
|
||||
|
||||
.. method:: Target.read_tree_values_flat(path, depth=1):
|
||||
|
||||
Read values of all sysfs (or similar) file nodes under ``path``, traversing
|
||||
up to the maximum depth ``depth``.
|
||||
|
||||
Returns a dict mapping paths of file nodes to corresponding values.
|
||||
|
||||
:param path: sysfs path to scan
|
||||
:param depth: maximum depth to descend
|
||||
|
||||
.. method:: Target.reset()
|
||||
|
||||
Soft reset the target. Typically, this means executing ``reboot`` on the
|
||||
@@ -392,7 +441,9 @@ Target
|
||||
.. method:: Target.capture_screen(filepath)
|
||||
|
||||
Take a screenshot on the device and save it to the specified file on the
|
||||
host. This may not be supported by the target.
|
||||
host. This may not be supported by the target. You can optionally insert a
|
||||
``{ts}`` tag into the file name, in which case it will be substituted with
|
||||
on-target timestamp of the screen shot in ISO8601 format.
|
||||
|
||||
.. method:: Target.install(filepath[, timeout[, with_name]])
|
||||
|
||||
@@ -402,6 +453,17 @@ Target
|
||||
:param timeout: Optional timeout (in seconds) for the installation
|
||||
:param with_name: This may be used to rename the executable on the target
|
||||
|
||||
|
||||
.. method:: Target.install_if_needed(host_path, search_system_binaries=True)
|
||||
|
||||
Check to see if the binary is already installed on the device and if not,
|
||||
install it.
|
||||
|
||||
:param host_path: path to the executable on the host
|
||||
:param search_system_binaries: Specify whether to search the devices PATH
|
||||
when checking to see if the executable is installed, otherwise only check
|
||||
user installed binaries.
|
||||
|
||||
.. method:: Target.uninstall(name)
|
||||
|
||||
Uninstall the specified executable from the target
|
||||
@@ -420,3 +482,165 @@ Target
|
||||
Returns ``True`` if an executable with the specified name is installed on the
|
||||
target and ``False`` other wise.
|
||||
|
||||
.. method:: Target.extract(path, dest=None)
|
||||
|
||||
Extracts the specified archive/file and returns the path to the extracted
|
||||
contents. The extraction method is determined based on the file extension.
|
||||
``zip``, ``tar``, ``gzip``, and ``bzip2`` are supported.
|
||||
|
||||
:param dest: Specified an on-target destination directory (which must exist)
|
||||
for the extracted contents.
|
||||
|
||||
Returns the path to the extracted contents. In case of files (gzip and
|
||||
bzip2), the path to the decompressed file is returned; for archives, the
|
||||
path to the directory with the archive's contents is returned.
|
||||
|
||||
.. method:: Target.is_network_connected()
|
||||
|
||||
Checks for internet connectivity on the device. This doesn't actually
|
||||
guarantee that the internet connection is "working" (which is rather
|
||||
nebulous), it's intended just for failing early when definitively _not_
|
||||
connected to the internet.
|
||||
|
||||
:returns: ``True`` if internet seems available, ``False`` otherwise.
|
||||
|
||||
Android Target
|
||||
---------------
|
||||
|
||||
.. class:: AndroidTarget(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=AdbConnection, package_data_directory="/data/data")
|
||||
|
||||
:class:`AndroidTarget` is a subclass of :class:`Target` with additional features specific to a device running Android.
|
||||
|
||||
:param package_data_directory: This is the location of the data stored
|
||||
for installed Android packages on the device.
|
||||
|
||||
.. method:: AndroidTarget.set_rotation(rotation)
|
||||
|
||||
Specify an integer representing the desired screen rotation with the
|
||||
following mappings: Natural: ``0``, Rotated Left: ``1``, Inverted : ``2``
|
||||
and Rotated Right : ``3``.
|
||||
|
||||
.. method:: AndroidTarget.get_rotation(rotation)
|
||||
|
||||
Returns an integer value representing the orientation of the devices
|
||||
screen. ``0`` : Natural, ``1`` : Rotated Left, ``2`` : Inverted
|
||||
and ``3`` : Rotated Right.
|
||||
|
||||
.. method:: AndroidTarget.set_natural_rotation()
|
||||
|
||||
Sets the screen orientation of the device to its natural (0 degrees)
|
||||
orientation.
|
||||
|
||||
.. method:: AndroidTarget.set_left_rotation()
|
||||
|
||||
Sets the screen orientation of the device to 90 degrees.
|
||||
|
||||
.. method:: AndroidTarget.set_inverted_rotation()
|
||||
|
||||
Sets the screen orientation of the device to its inverted (180 degrees)
|
||||
orientation.
|
||||
|
||||
.. method:: AndroidTarget.set_right_rotation()
|
||||
|
||||
Sets the screen orientation of the device to 270 degrees.
|
||||
|
||||
.. method:: AndroidTarget.set_auto_rotation(autorotate)
|
||||
|
||||
Specify a boolean value for whether the devices auto-rotation should
|
||||
be enabled.
|
||||
|
||||
.. method:: AndroidTarget.get_auto_rotation()
|
||||
|
||||
Returns ``True`` if the targets auto rotation is currently enabled and
|
||||
``False`` otherwise.
|
||||
|
||||
.. method:: AndroidTarget.set_airplane_mode(mode)
|
||||
|
||||
Specify a boolean value for whether the device should be in airplane mode.
|
||||
|
||||
.. note:: Requires the device to be rooted if the device is running Android 7+.
|
||||
|
||||
.. method:: AndroidTarget.get_airplane_mode()
|
||||
|
||||
Returns ``True`` if the target is currently in airplane mode and
|
||||
``False`` otherwise.
|
||||
|
||||
.. method:: AndroidTarget.set_brightness(value)
|
||||
|
||||
Sets the devices screen brightness to a specified integer between ``0`` and
|
||||
``255``.
|
||||
|
||||
.. method:: AndroidTarget.get_brightness()
|
||||
|
||||
Returns an integer between ``0`` and ``255`` representing the devices
|
||||
current screen brightness.
|
||||
|
||||
.. method:: AndroidTarget.set_auto_brightness(auto_brightness)
|
||||
|
||||
Specify a boolean value for whether the devices auto brightness
|
||||
should be enabled.
|
||||
|
||||
.. method:: AndroidTarget.get_auto_brightness()
|
||||
|
||||
Returns ``True`` if the targets auto brightness is currently
|
||||
enabled and ``False`` otherwise.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_off()
|
||||
|
||||
Checks if the devices screen is on and if so turns it off.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_on()
|
||||
|
||||
Checks if the devices screen is off and if so turns it on.
|
||||
|
||||
.. method:: AndroidTarget.is_screen_on()
|
||||
|
||||
Returns ``True`` if the targets screen is currently on and ``False``
|
||||
otherwise.
|
||||
|
||||
.. method:: AndroidTarget.homescreen()
|
||||
|
||||
Returns the device to its home screen.
|
||||
|
||||
.. method:: AndroidTarget.swipe_to_unlock(direction="diagonal")
|
||||
|
||||
Performs a swipe input on the device to try and unlock the device.
|
||||
A direction of ``"horizontal"``, ``"vertical"`` or ``"diagonal"``
|
||||
can be supplied to specify in which direction the swipe should be
|
||||
performed. By default ``"diagonal"`` will be used to try and
|
||||
support the majority of newer devices.
|
||||
|
||||
|
||||
ChromeOS Target
|
||||
---------------
|
||||
|
||||
.. class:: ChromeOsTarget(connection_settings=None, platform=None, working_directory=None, executables_directory=None, android_working_directory=None, android_executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, package_data_directory="/data/data")
|
||||
|
||||
:class:`ChromeOsTarget` is a subclass of :class:`LinuxTarget` with
|
||||
additional features specific to a device running ChromeOS for example,
|
||||
if supported, its own android container which can be accessed via the
|
||||
``android_container`` attribute. When making calls to or accessing
|
||||
properties and attributes of the ChromeOS target, by default they will
|
||||
be applied to Linux target as this is where the majority of device
|
||||
configuration will be performed and if not available, will fall back to
|
||||
using the android container if available. This means that all the
|
||||
available methods from
|
||||
:class:`LinuxTarget` and :class:`AndroidTarget` are available for
|
||||
:class:`ChromeOsTarget` if the device supports android otherwise only the
|
||||
:class:`LinuxTarget` methods will be available.
|
||||
|
||||
:param working_directory: This is the location of the working
|
||||
directory to be used for the Linux target container. If not specified will
|
||||
default to ``"/mnt/stateful_partition/devlib-target"``.
|
||||
|
||||
:param android_working_directory: This is the location of the working
|
||||
directory to be used for the android container. If not specified it will
|
||||
use the working directory default for :class:`AndroidTarget.`.
|
||||
|
||||
:param android_executables_directory: This is the location of the
|
||||
executables directory to be used for the android container. If not
|
||||
specified will default to a ``bin`` subfolder in the
|
||||
``android_working_directory.``
|
||||
|
||||
:param package_data_directory: This is the location of the data stored
|
||||
for installed Android packages on the device.
|
||||
|
53
setup.py
53
setup.py
@@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import imp
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
@@ -20,8 +21,10 @@ from itertools import chain
|
||||
|
||||
try:
|
||||
from setuptools import setup
|
||||
from setuptools.command.sdist import sdist as orig_sdist
|
||||
except ImportError:
|
||||
from distutils.core import setup
|
||||
from distutils.command.sdist import sdist as orig_sdist
|
||||
|
||||
|
||||
devlib_dir = os.path.join(os.path.dirname(__file__), 'devlib')
|
||||
@@ -37,6 +40,26 @@ try:
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
with open(os.path.join(devlib_dir, '__init__.py')) as fh:
|
||||
# Extract the version by parsing the text of the file,
|
||||
# as may not be able to load as a module yet.
|
||||
for line in fh:
|
||||
if '__version__' in line:
|
||||
parts = line.split("'")
|
||||
__version__ = parts[1]
|
||||
break
|
||||
else:
|
||||
raise RuntimeError('Did not see __version__')
|
||||
|
||||
vh_path = os.path.join(devlib_dir, 'utils', 'version.py')
|
||||
# can load this, as it does not have any devlib imports
|
||||
version_helper = imp.load_source('version_helper', vh_path)
|
||||
commit = version_helper.get_commit()
|
||||
if commit:
|
||||
__version__ = '{}+{}'.format(__version__, commit)
|
||||
|
||||
|
||||
packages = []
|
||||
data_files = {}
|
||||
source_dir = os.path.dirname(__file__)
|
||||
@@ -59,20 +82,23 @@ for root, dirs, files in os.walk(devlib_dir):
|
||||
params = dict(
|
||||
name='devlib',
|
||||
description='A framework for automating workload execution and measurment collection on ARM devices.',
|
||||
version='0.0.3',
|
||||
version=__version__,
|
||||
packages=packages,
|
||||
package_data=data_files,
|
||||
url='N/A',
|
||||
url='https://github.com/ARM-software/devlib',
|
||||
license='Apache v2',
|
||||
maintainer='ARM Ltd.',
|
||||
install_requires=[
|
||||
'python-dateutil', # converting between UTC and local time.
|
||||
'pexpect>=3.3', # Send/recieve to/from device
|
||||
'pyserial', # Serial port interface
|
||||
'wrapt', # Basic for construction of decorator functions
|
||||
'future', # Python 2-3 compatibility
|
||||
],
|
||||
extras_require={
|
||||
'daq': ['daqpower'],
|
||||
'doc': ['sphinx'],
|
||||
'monsoon': ['python-gflags'],
|
||||
},
|
||||
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
|
||||
classifiers=[
|
||||
@@ -83,7 +109,28 @@ params = dict(
|
||||
],
|
||||
)
|
||||
|
||||
all_extras = list(chain(params['extras_require'].itervalues()))
|
||||
all_extras = list(chain(iter(params['extras_require'].values())))
|
||||
params['extras_require']['full'] = all_extras
|
||||
|
||||
|
||||
class sdist(orig_sdist):
|
||||
|
||||
user_options = orig_sdist.user_options + [
|
||||
('strip-commit', 's',
|
||||
"Strip git commit hash from package version ")
|
||||
]
|
||||
|
||||
def initialize_options(self):
|
||||
orig_sdist.initialize_options(self)
|
||||
self.strip_commit = False
|
||||
|
||||
|
||||
def run(self):
|
||||
if self.strip_commit:
|
||||
self.distribution.get_version = lambda : __version__.split('+')[0]
|
||||
orig_sdist.run(self)
|
||||
|
||||
|
||||
params['cmdclass'] = {'sdist': sdist}
|
||||
|
||||
setup(**params)
|
||||
|
@@ -4,7 +4,7 @@
|
||||
#
|
||||
CROSS_COMPILE?=aarch64-linux-gnu-
|
||||
CC=$(CROSS_COMPILE)gcc
|
||||
CFLAGS='-Wl,-static -Wl,-lc'
|
||||
CFLAGS=-static -lc
|
||||
|
||||
readenergy: readenergy.c
|
||||
$(CC) $(CFLAGS) readenergy.c -o readenergy
|
||||
|
@@ -89,6 +89,9 @@
|
||||
// Default counter poll period (in milliseconds).
|
||||
#define DEFAULT_PERIOD 100
|
||||
|
||||
// Default duration for the instrument execution (in seconds); 0 means 'forever'
|
||||
#define DEFAULT_DURATION 0
|
||||
|
||||
// A single reading from the energy meter. The values are the proper readings converted
|
||||
// to appropriate units (e.g. Watts for power); they are *not* raw counter values.
|
||||
struct reading
|
||||
@@ -111,7 +114,7 @@ struct reading
|
||||
double sys_enm_ch0_gpu;
|
||||
};
|
||||
|
||||
inline uint64_t join_64bit_register(uint32_t *buffer, int index)
|
||||
static inline uint64_t join_64bit_register(uint32_t *buffer, int index)
|
||||
{
|
||||
uint64_t result = 0;
|
||||
result |= buffer[index];
|
||||
@@ -141,12 +144,17 @@ int nsleep(const struct timespec *req, struct timespec *rem)
|
||||
|
||||
void print_help()
|
||||
{
|
||||
fprintf(stderr, "Usage: readenergy [-t PERIOD] -o OUTFILE\n\n"
|
||||
fprintf(stderr, "Usage: readenergy [-t PERIOD] [-o OUTFILE]\n\n"
|
||||
"Read Juno energy counters every PERIOD milliseconds, writing them\n"
|
||||
"to OUTFILE in CSV format until SIGTERM is received.\n\n"
|
||||
"to OUTFILE in CSV format either until SIGTERM is received OR\n"
|
||||
"till the specified duration elapsed.\n"
|
||||
"If OUTFILE is not specified, stdout will be used.\n\n"
|
||||
"Parameters:\n"
|
||||
" PERIOD is the counter poll period in milliseconds.\n"
|
||||
" (Defaults to 100 milliseconds.)\n"
|
||||
" DURATION is the duration before execution terminates.\n"
|
||||
" (Defaults to 0 seconds, meaning run till user\n"
|
||||
" terminates execution.\n"
|
||||
" OUTFILE is the output file path\n");
|
||||
}
|
||||
|
||||
@@ -163,6 +171,7 @@ struct config
|
||||
{
|
||||
struct timespec period;
|
||||
char *output_file;
|
||||
long duration_in_sec;
|
||||
};
|
||||
|
||||
void config_init_period_from_millis(struct config *this, long millis)
|
||||
@@ -175,9 +184,10 @@ void config_init(struct config *this, int argc, char *argv[])
|
||||
{
|
||||
this->output_file = NULL;
|
||||
config_init_period_from_millis(this, DEFAULT_PERIOD);
|
||||
this->duration_in_sec = DEFAULT_DURATION;
|
||||
|
||||
int opt;
|
||||
while ((opt = getopt(argc, argv, "ht:o:")) != -1)
|
||||
while ((opt = getopt(argc, argv, "ht:o:d:")) != -1)
|
||||
{
|
||||
switch(opt)
|
||||
{
|
||||
@@ -187,6 +197,9 @@ void config_init(struct config *this, int argc, char *argv[])
|
||||
case 'o':
|
||||
this->output_file = optarg;
|
||||
break;
|
||||
case 'd':
|
||||
this->duration_in_sec = atol(optarg);
|
||||
break;
|
||||
case 'h':
|
||||
print_help();
|
||||
exit(EXIT_SUCCESS);
|
||||
@@ -197,13 +210,6 @@ void config_init(struct config *this, int argc, char *argv[])
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
if (this->output_file == NULL)
|
||||
{
|
||||
fprintf(stderr, "ERROR: Mandatory -o option not specified.\n\n");
|
||||
print_help();
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------- /config ---------------------------------------------------
|
||||
@@ -219,13 +225,17 @@ struct emeter
|
||||
|
||||
void emeter_init(struct emeter *this, char *outfile)
|
||||
{
|
||||
this->out = fopen(outfile, "w");
|
||||
if (this->out == NULL)
|
||||
if(outfile)
|
||||
{
|
||||
fprintf(stderr, "ERROR: Could not open output file %s; got %s\n", outfile, strerror(errno));
|
||||
exit(EXIT_FAILURE);
|
||||
this->out = fopen(outfile, "w");
|
||||
if (this->out == NULL)
|
||||
{
|
||||
fprintf(stderr, "ERROR: Could not open output file %s; got %s\n", outfile, strerror(errno));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
} else {
|
||||
this->out = stdout;
|
||||
}
|
||||
|
||||
this->fd = open("/dev/mem", O_RDONLY);
|
||||
if(this->fd < 0)
|
||||
{
|
||||
@@ -243,10 +253,12 @@ void emeter_init(struct emeter *this, char *outfile)
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
fprintf(this->out, "sys_curr,a57_curr,a53_curr,gpu_curr,"
|
||||
"sys_volt,a57_volt,a53_volt,gpu_volt,"
|
||||
"sys_pow,a57_pow,a53_pow,gpu_pow,"
|
||||
"sys_cenr,a57_cenr,a53_cenr,gpu_cenr\n");
|
||||
if(this->out) {
|
||||
fprintf(this->out, "sys_current,a57_current,a53_current,gpu_current,"
|
||||
"sys_voltage,a57_voltage,a53_voltage,gpu_voltage,"
|
||||
"sys_power,a57_power,a53_power,gpu_power,"
|
||||
"sys_energy,a57_energy,a53_energy,gpu_energy\n");
|
||||
}
|
||||
}
|
||||
|
||||
void emeter_read_measurements(struct emeter *this, struct reading *reading)
|
||||
@@ -314,13 +326,19 @@ void emeter_finalize(struct emeter *this)
|
||||
|
||||
// -------------------------------------- /emeter ----------------------------------------------------
|
||||
|
||||
int done = 0;
|
||||
volatile int done = 0;
|
||||
|
||||
void term_handler(int signum)
|
||||
{
|
||||
done = 1;
|
||||
}
|
||||
|
||||
void sigalrm_handler(int signum)
|
||||
{
|
||||
done = 1;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct sigaction action;
|
||||
@@ -333,11 +351,27 @@ int main(int argc, char *argv[])
|
||||
config_init(&config, argc, argv);
|
||||
emeter_init(&emeter, config.output_file);
|
||||
|
||||
struct timespec remaining;
|
||||
while (!done)
|
||||
if (0 != config.duration_in_sec)
|
||||
{
|
||||
/*Set the alarm with the duration from use only if a non-zero value is specified
|
||||
else it will run forever until SIGTERM signal received from user*/
|
||||
/*Set the signal handler first*/
|
||||
signal(SIGALRM, sigalrm_handler);
|
||||
/*Now set the alarm for the duration specified by the user*/
|
||||
alarm(config.duration_in_sec);
|
||||
|
||||
}
|
||||
|
||||
if(config.output_file)
|
||||
{
|
||||
struct timespec remaining;
|
||||
while (!done)
|
||||
{
|
||||
emeter_take_reading(&emeter);
|
||||
nsleep(&config.period, &remaining);
|
||||
}
|
||||
} else {
|
||||
emeter_take_reading(&emeter);
|
||||
nsleep(&config.period, &remaining);
|
||||
}
|
||||
|
||||
emeter_finalize(&emeter);
|
||||
|
Reference in New Issue
Block a user