2017-10-15 820 views
0

我有Linux fio命令的json輸出,如下圖所示,我將解析像字典這樣的值,從某些鍵提取某些值。但是這個json輸出的嵌套層將輸出聚集成KVP中的巨大「值」。有關如何更好地解析這些嵌套數據結構的技巧?Python - 解析(fio)json輸出

{ 
    "disk_util": [ 
    { 
     "aggr_util": 96.278308, 
     "in_queue": 247376, 
     "write_ticks": 185440, 
     "read_ticks": 61924, 
     "write_merges": 0, 
     "read_merges": 0, 
     "write_ios": 240866, 
     "read_ios": 18257, 
     "name": "dm-0", 
     "util": 97.257058, 
     "aggr_read_ios": 18465, 
     "aggr_write_ios": 243642, 
     "aggr_read_merges": 1, 
     "aggr_write_merge": 72, 
     "aggr_read_ticks": 62420, 
     "aggr_write_ticks": 185796, 
     "aggr_in_queue": 245504 
    }, 
    { 
     "util": 96.278308, 
     "name": "sda", 
     "read_ios": 18465, 
     "write_ios": 243642, 
     "read_merges": 1, 
     "write_merges": 72, 
     "read_ticks": 62420, 
     "write_ticks": 185796, 
     "in_queue": 245504 
    } 
    ], 
    "jobs": [ 
    { 
     "latency_window": 0, 
     "latency_percentile": 100, 
     "latency_target": 0, 
     "latency_depth": 64, 
     "latency_ms": { 
     ">=2000": 0, 
     "2000": 0, 
     "1000": 0, 
     "750": 0, 
     "2": 0, 
     "4": 0, 
     "10": 0, 
     "20": 0, 
     "50": 0, 
     "100": 0, 
     "250": 0, 
     "500": 0 
     }, 
     "latency_us": { 
     "1000": 0, 
     "750": 0, 
     "2": 0, 
     "4": 0, 
     "10": 0, 
     "20": 0, 
     "50": 0, 
     "100": 0, 
     "250": 0, 
     "500": 0 
     }, 
     "write": { 
     "iops_samples": 35, 
     "iops_stddev": 1608.115728, 
     "iops_mean": 13835.571429, 
     "iops_max": 16612, 
     "iops_min": 9754, 
     "bw_samples": 35, 
     "drop_ios": 0, 
     "short_ios": 0, 
     "total_ios": 243678, 
     "runtime": 17611, 
     "iops": 13836.692976, 
     "bw": 55346, 
     "io_kbytes": 974712, 
     "io_bytes": 998105088, 
     "slat_ns": { 
      "stddev": 0, 
      "mean": 0, 
      "max": 0, 
      "min": 0 
     }, 
     "clat_ns": { 
      "percentile": { 
      "0.00": 0 
      }, 
      "stddev": 0, 
      "mean": 0, 
      "max": 0, 
      "min": 0 
     }, 
     "lat_ns": { 
      "stddev": 0, 
      "mean": 0, 
      "max": 0, 
      "min": 0 
     }, 
     "bw_min": 39016, 
     "bw_max": 66448, 
     "bw_agg": 99.994218, 
     "bw_mean": 55342.8, 
     "bw_dev": 6432.427333 
     }, 
     "read": { 
     "iops_samples": 35, 
     "iops_stddev": 126.732776, 
     "iops_mean": 1048.257143, 
     "iops_max": 1336, 
     "iops_min": 772, 
     "bw_samples": 35, 
     "drop_ios": 0, 
     "short_ios": 0, 
     "total_ios": 18466, 
     "runtime": 17611, 
     "iops": 1048.549202, 
     "bw": 4194, 
     "io_kbytes": 73864, 
     "io_bytes": 75636736, 
     "slat_ns": { 
      "stddev": 0, 
      "mean": 0, 
      "max": 0, 
      "min": 0 
     }, 
     "clat_ns": { 
      "percentile": { 
      "0.00": 0 
      }, 
      "stddev": 0, 
      "mean": 0, 
      "max": 0, 
      "min": 0 
     }, 
     "lat_ns": { 
      "stddev": 0, 
      "mean": 0, 
      "max": 0, 
      "min": 0 
     }, 
     "bw_min": 3088, 
     "bw_max": 5344, 
     "bw_agg": 99.993188, 
     "bw_mean": 4193.714286, 
     "bw_dev": 506.844597 
     }, 
     "job options": { 
     "rwmixread": "7", 
     "rw": "randrw", 
     "size": "1G", 
     "iodepth": "64", 
     "bs": "4k", 
     "filename": "test", 
     "name": "test" 
     }, 
     "elapsed": 18, 
     "eta": 0, 
     "error": 0, 
     "groupid": 0, 
     "jobname": "test", 
     "trim": { 
     "iops_samples": 0, 
     "iops_stddev": 0, 
     "iops_mean": 0, 
     "iops_max": 0, 
     "iops_min": 0, 
     "bw_samples": 0, 
     "drop_ios": 0, 
     "short_ios": 0, 
     "total_ios": 0, 
     "runtime": 0, 
     "iops": 0, 
     "bw": 0, 
     "io_kbytes": 0, 
     "io_bytes": 0, 
     "slat_ns": { 
      "stddev": 0, 
      "mean": 0, 
      "max": 0, 
      "min": 0 
     }, 
     "clat_ns": { 
      "percentile": { 
      "0.00": 0 
      }, 
      "stddev": 0, 
      "mean": 0, 
      "max": 0, 
      "min": 0 
     }, 
     "lat_ns": { 
      "stddev": 0, 
      "mean": 0, 
      "max": 0, 
      "min": 0 
     }, 
     "bw_min": 0, 
     "bw_max": 0, 
     "bw_agg": 0, 
     "bw_mean": 0, 
     "bw_dev": 0 
     }, 
     "usr_cpu": 11.447391, 
     "sys_cpu": 74.680597, 
     "ctx": 28972, 
     "majf": 0, 
     "minf": 31, 
     "iodepth_level": { 
     ">=64": 99.975967, 
     "32": 0.1, 
     "16": 0.1, 
     "8": 0.1, 
     "4": 0.1, 
     "2": 0.1, 
     "1": 0.1 
     }, 
     "latency_ns": { 
     "1000": 0, 
     "750": 0, 
     "2": 0, 
     "4": 0, 
     "10": 0, 
     "20": 0, 
     "50": 0, 
     "100": 0, 
     "250": 0, 
     "500": 0 
     } 
    } 
    ], 
    "global options": { 
    "gtod_reduce": "1", 
    "direct": "1", 
    "ioengine": "libaio", 
    "randrepeat": "1" 
    }, 
    "time": "Sat Oct 14 23:18:28 2017", 
    "timestamp_ms": 1508023108010, 
    "timestamp": 1508023108, 
    "fio version": "fio-3.1" 
} 

我從文件導入它真的簡單化:

import json 

my_file = open('fio.json', 'r') 
my_dict = json.load(my_file) 

for k, v in my_dict.items(): 
    print("Key: {0}, value: {1}").format(k, v) 

但當迭代,它使所有的嵌套表和類型的字典返回被改寫的輸出,像

Key: disk_util, value: [{u'aggr_write_ticks': 185796, u'write_merges': 0, u'write_ticks': 185440, u'write_ios': 240866, u'aggr_write_ios': 243642, u'aggr_read_ticks': 62420, u'read_ios': 18257, u'util': 97.257058, u'read_ticks': 61924, u'aggr_write_merge': 72, u'read_merges': 0, u'aggr_in_queue': 245504, u'aggr_read_ios': 18465, u'aggr_util': 96.278308, u'aggr_read_merges': 1, u'in_queue': 247376, u'name': u'dm-0'}, {u'read_merges': 1, u'name': u'sda', u'write_ios': 243642, u'read_ios': 18465, u'util': 96.278308, u'read_ticks': 62420, u'write_merges': 72, u'in_queue': 245504, u'write_ticks': 185796}] 
+0

那麼,這就是嵌套字典的工作方式。 –

+0

它看起來像是正確解析JSON - 如果輸出是你的問題,請嘗試使用'pprint'模塊。或者,如果您想編寫自己的輸出方法,則需要遞歸執行嵌套/縮進操作。 – PaulMcG

回答

0

JSON .load()維護json文件類型。

您似乎有語法錯誤。

錯誤的位置)

import json 


my_file = open('fio.json', 'r') 
my_dict = json.load(my_file) 

for index, key in enumerate(my_dict): 
    print("Key: {0}, value: {1}".format(key, my_dict[key]))