replace common qcom sources with samsung ones

This commit is contained in:
SaschaNes
2025-08-12 22:13:00 +02:00
parent ba24dcded9
commit 6f7753de11
5682 changed files with 2450203 additions and 103634 deletions

View File

@@ -0,0 +1,409 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
# Copyright (c) 2015, 2017, 2019-2021 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import datetime
import logging
import logging.handlers
import os
import struct
import sys
import re
from optparse import OptionParser
count = 0
address = []
data = []
dcc_sink = []
next_ll_offset = []
MAX_LOOP_COUNT = 4096
def bm(msb, lsb):
'Creates a bitmask from msb to lsb'
return int(('1' * (msb - lsb + 1)) + ('0' * lsb), 2)
def bvalsel(msb, lsb, val):
'Masks and returns the bits from msb to lsb in val'
return ((val & bm(msb, lsb)) >> lsb)
def log_init(name, path, filename):
# Set up logger
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# Add the log message handler to the logger
if filename is not None:
handler = logging.FileHandler(path + '/' + filename, mode='w')
else:
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
return logger
def add_addr(base, offset, length):
for i in range(0, length):
addr = base + offset + (i * 4)
address.append(addr)
def add_loop_addr(loop_nr, loop_count):
for i in range(0, loop_count):
for j in range(len(address) - loop_nr, len(address)):
address.append(address[j])
def read_data(data_pt):
nr = count
while nr > 0:
word = data_pt.read(4)
if len(word) != 4:
break
val = struct.unpack('<L', word)[0]
data.append(val)
nr = nr - 1
next_ll_offset.append(hex(data_pt.tell()))
return nr
list_nr = []
def read_config(config_pt):
offset = 0
base = 0
length = 1
list_nr.append(0)
count = 0
if options.version is None:
address_descriptor = 0x1 << 31
link_descriptor = 0
loop_descriptor = None
rd_mod_wr_descriptor = None
dcc_write_ind = None
link_second_arg = 8
# We return zero and fail
on_zero_link_len = 0
track_len = 0
empty_ind = 0x0
else:
address_descriptor = 0
link_descriptor = 0x3 << 30
loop_descriptor = 0x1 << 30
rd_mod_wr_descriptor = 0x1 << 31
dcc_write_ind = 0x1 << 28
link_second_arg = 7
#indicates end of list
on_zero_link_len = -1
#word size
track_len = 4
#empty SRAM is filled with 0xdededede
empty_ind = 0xdededede
if options.config_loopoffset is not None:
config_loopoffset = int(options.config_loopoffset)
else:
config_loopoffset = 13
while True:
word = config_pt.read(4)
if len(word) != 4:
break
val = struct.unpack('<L', word)[0]
if val == 0:
break
descriptor = val & (0x3 << 30)
read_write_ind = val & (0x1 << 28)
if val == empty_ind:
continue
elif descriptor == address_descriptor:
if read_write_ind == dcc_write_ind:
config_pt.seek(8, 1)
else:
base = ((val & 0x0FFFFFFF) << 4)
offset = 0
length = 1
tmp_count = 0
elif descriptor == link_descriptor:
total_length = 0
for i in range(0, 2):
offset = offset + (val & 0xFF) * 4 + (length - 1) * track_len
val = val >> 8
length = (val & 0x7f)
val = val >> link_second_arg
if length != 0:
total_length += length
count = count + 1
tmp_count = tmp_count + 1
add_addr(base, offset, length)
else:
if (i == 0 ):
return list_nr[on_zero_link_len]
else:
offset = 0
if total_length > 0:
list_nr.append(total_length + list_nr[- 1])
elif descriptor == loop_descriptor:
loop_offset = val & bm(config_loopoffset - 1, 0)
loop_count = bvalsel(27, config_loopoffset, val)
if loop_count > MAX_LOOP_COUNT:
print ("loop offset is wrong or SRAM is corrupted\n")
exit()
if loop_offset == 0:
continue
try:
loop_nr = list_nr[-1] - list_nr[-loop_offset//2 - 1]
list_nr.append(loop_nr * loop_count + list_nr[-1])
count = count + 1
add_loop_addr(loop_nr, loop_count)
except Exception as err:
pass
elif descriptor == rd_mod_wr_descriptor:
'''
Skip over mask and value of rd_mod_wr.
There is no gaurantee of this being actually written
and we never read the value back to confirm.
Remove address added by previous entry since
that's for rd_mod_wr instead of read operation.
'''
for i in range(0, tmp_count):
if (val & 1 == 0): # check for Keep bit
last_length = list_nr[-1] - list_nr[-2]
list_nr.pop()
count = count - 1
for j in range(0, last_length):
address.pop()
config_pt.seek(8, 1)
return count
def new_linked_list(config_pt):
word = config_pt.read(4)
if len(word)!= 4:
return False
else:
val = struct.unpack('<L', word)[0]
if val != 0:
config_pt.seek(-4, 1)
return True
else:
return False
def dump_regs_json(options):
log.info("Dumping regs in JSON format in \'{0}\' file.".format(options.outfile))
parsed_data.info("{")
parsed_data.info("\t\"version\": 1,")
parsed_data.info("\t\"timestamp\": \"{0}\",".format(datetime.date.today().strftime('%m/%d/%y')))
parsed_data.info("\t\"generator\": \"Linux DCC Parser\",")
parsed_data.info("\t\"chip\": {")
parsed_data.info("\t\t\"name\": \"{0}\",".format(options.chipname))
parsed_data.info("\t\t\"version\": \"{0}\"".format(options.chipversion))
parsed_data.info("\t},")
parsed_data.info("\t\"registers\": [")
for addr, val in zip(address, data):
parsed_data.info("\t\t{{ \"address\": \"0x{0:08x}\", \"value\": \"0x{1:08x}\" }},".format(addr, val))
parsed_data.info("\t]")
parsed_data.info("}")
return
def dump_regs_xml(options):
log.info("Dumping regs in XML format in \'{0}\' file.".format(options.outfile))
parsed_data.info("<?xml version=\"1.0\" encoding=\"utf-8\"?>")
parsed_data.info("<hwioDump version=\"1\">")
parsed_data.info("\t<timestamp>{0}</timestamp>".format(datetime.date.today().strftime('%m/%d/%y')))
parsed_data.info("\t<generator>Linux DCC Parser</generator>")
parsed_data.info("\t<chip name=\"{0}\" version=\"{1}\">".format(options.chipname, options.chipversion))
for addr, val in zip(address, data):
parsed_data.info("\t\t<register address=\"0x{0:08x}\" value=\"0x{1:08x}\" />".format(addr, val))
parsed_data.info("\t</chip>")
try:
parsed_data.info("\t<next_ll_offset>next_ll_offset : {0} </next_ll_offset>".format(next_ll_offset[-1]))
except:
pass
parsed_data.info("</hwioDump>")
return
def dump_regs(options):
if not address:
log.error('No configuration found in SRAM!!')
sys.exit(1)
if options.json is True:
dump_regs_json(options)
else:
dump_regs_xml(options)
def read_data_atb(atb_data_pt, count):
atb_count = 0
for line in atb_data_pt:
if "ATID\" : 65, \"OpCode\" : \"D8\"" in line:
data1 = ""
i = 0
while i < 4:
data_byte_re = re.match(
"\{\"ATID\" : 65, \"OpCode\" : \"D8\", \"Payload\" : "
"\"0x([0-9A-Fa-f][0-9A-Fa-f])\"\}", line)
if data_byte_re:
data1 = (data_byte_re.group(1)) + data1
i += 1
else:
log.error("ATB file format wrong")
return atb_count
if i < 4:
line = atb_data_pt.next()
data.append(int(data1, 16))
atb_count = atb_count + 1
if atb_count >= count:
break
elif "ATID\" : 65, \"OpCode\" : \"D32\"" in line:
#print line
data_byte_re = re.match(
"\{\"ATID\" : 65, \"OpCode\" : \"D32\", \"Payload\" : "
"\"0x([0-9A-Fa-f]+)\"}", line)
if data_byte_re:
data1 = (data_byte_re.group(1))
#print data1
data.append(int(data1, 16))
atb_count = atb_count + 1
if atb_count >= count:
break
return atb_count
if __name__ == '__main__':
usage = 'usage: %prog [options to print]. Run with --help for more details'
parser = OptionParser(usage)
parser.add_option('-s', '--sram-file', dest='sramfile',
help='sram image path')
parser.add_option('-a', '--atb-file', dest='atbfile', help='atb image path')
parser.add_option('-j', '--json', action='store_true',
help='output in JSON format')
parser.add_option('-o', '--out-dir', dest='outdir', help='output dir path')
parser.add_option('-f', '--output-file', dest='outfile',
help='output filename')
parser.add_option('-l', '--log-file', dest='logfile', help='Log filename')
parser.add_option('', '--chip-name', dest='chipname', help='chip name')
parser.add_option('', '--chip-version', dest='chipversion',
help='chip version')
parser.add_option('--v2', dest='version', action="store_const", const='2',
help='DCC driver version 2')
parser.add_option('--config-offset', dest='config_offset',
help='Start offset for DCC configuration')
parser.add_option('--data-offset', dest='data_offset',
help='Start offset for DCC Data')
parser.add_option('--config-loopoffset', dest='config_loopoffset',
help='Offset of loop value')
parser.add_option('--dcc_sink', dest='dcc_sink',
help='DCC sink(SRAM/ATB).Comma seperated list if more than one list used')
(options, args) = parser.parse_args()
args = ''
for arg in sys.argv:
args = args + arg + ' '
if options.outdir:
if not os.path.exists(options.outdir):
print ('!!! Out directory does not exist. Creating...')
try:
os.makedirs(options.outdir)
except:
print ("Failed to create %s. You probably don't have permissions there. Bailing." % options.outdir)
sys.exit(1)
else:
options.outdir = '.'
if options.json:
ext = '.json'
else:
ext = '.xml'
if options.dcc_sink is None:
dcc_sink.append('SRAM')
else:
dcc_sink = options.dcc_sink.split(',')
print(dcc_sink)
if options.outfile is None:
options.outfile = 'dcc_captured_data{0}'.format(ext)
log = log_init('LOG', options.outdir, options.logfile)
log.info("Data Capture and Compare(DCC) parser.")
if options.sramfile is None:
log.error("No SRAM image file given! Exiting...")
parser.print_usage()
sys.exit(1)
try:
sram_file = open(options.sramfile, 'rb')
except:
log.error("could not open path {0}".format(options.sramfile))
log.error("Do you have read permissions on the path?")
sys.exit(1)
count = 0
if options.config_offset is not None:
sram_file.seek(int(options.config_offset, 16), 1)
parsed_data = log_init('PARSED_DATA', options.outdir, options.outfile)
if options.atbfile is not None and os.path.exists(options.atbfile):
atb_file = open(options.atbfile, 'r')
for sink in dcc_sink:
count = read_config(sram_file)
print("Number of registers in list:" , count)
print("Sink used for the list:" , sink)
if sink == 'SRAM':
print('Read data from SRAM')
if options.data_offset is not None:
sram_file.seek(int(options.data_offset, 16))
if read_data(sram_file):
log.error('Couldn\'t read complete data.')
sys.exit(1)
elif sink == 'ATB':
print('Read data from ATB file')
if options.atbfile is not None:
try:
atb_count = read_data_atb(atb_file, count)
if atb_count < count:
del address[-count:]
del data[-atb_count:]
log.error("ATB file don't have complete DCC data")
except:
log.error("could not read ATB data {0}".format(options.atbfile))
del address[-count:]
else:
log.error('ATB file not given')
del address[-count:]
if not new_linked_list(sram_file):
log.error("Next list not available")
break
if options.atbfile is not None and os.path.exists(options.atbfile):
atb_file.close()
dump_regs(options)
sram_file.close()
sys.stderr.flush()

View File

@@ -0,0 +1,176 @@
# Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import datetime
import logging
import logging.handlers
import os
import struct
import sys
from optparse import OptionParser
count = 0
address = []
data = []
def log_init(name, path, filename):
# Set up logger
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# Add the log message handler to the logger
if filename is not None:
handler = logging.FileHandler(path + '/' + filename, mode='w')
else:
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
return logger
def read_data(data_pt):
nr = count
flag = 1
while nr > 0:
word = data_pt.read(4)
if len(word) != 4:
break
val = struct.unpack('<L', word)[0]
if flag % 2 != 0:
address.append(val)
else:
data.append(val)
flag = flag + 1
nr = nr - 1
return nr
def read_config(config_pt):
while True:
word = config_pt.read(4)
if len(word) != 4:
break
val = struct.unpack('<L', word)[0]
if val == 0:
break
return val
def dump_regs_json(options):
log.info("Dumping regs in JSON format in \'{0}\' file.".format(options.outfile))
parsed_data.info("{")
parsed_data.info("\t\"version\": 1,")
parsed_data.info("\t\"timestamp\": \"{0}\",".format(datetime.date.today().strftime('%m/%d/%y')))
parsed_data.info("\t\"generator\": \"Linux DCC Parser\",")
parsed_data.info("\t\"chip\": {")
parsed_data.info("\t\t\"name\": \"{0}\",".format(options.chipname))
parsed_data.info("\t\t\"version\": \"{0}\"".format(options.chipversion))
parsed_data.info("\t},")
parsed_data.info("\t\"registers\": [")
for addr, val in zip(address, data):
parsed_data.info("\t\t{{ \"address\": \"0x{0:08x}\", \"value\": \"0x{1:08x}\" }},".format(addr, val))
parsed_data.info("\t]")
parsed_data.info("}")
return
def dump_regs_xml(options):
print "dumping xml file"
log.info("Dumping regs in XML format in \'{0}\' file.".format(options.outfile))
parsed_data.info("<?xml version=\"1.0\" encoding=\"utf-8\"?>")
parsed_data.info("<hwioDump version=\"1\"")
parsed_data.info("\t<timestamp>{0}</timestamp>".format(datetime.date.today().strftime('%m/%d/%y')))
parsed_data.info("\t<generator>Linux DCC Parser</generator>")
parsed_data.info("\t<chip name=\"{0}\" version=\"{1}\">".format(options.chipname, options.chipversion))
for addr, val in zip(address, data):
parsed_data.info("\t\t<register address=\"0x{0:08x}\" value=\"0x{1:08x}\" />".format(addr, val))
parsed_data.info("\t</chip>")
parsed_data.info("</hwioDump>")
return
def dump_regs(options):
if options.json is True:
dump_regs_json(options)
else:
dump_regs_xml(options)
if __name__ == '__main__':
usage = 'usage: %prog [options to print]. Run with --help for more details'
parser = OptionParser(usage)
parser.add_option('-s', '--sysreg-file', dest='sysregfile',
help='sram image path')
parser.add_option('-j', '--json', action='store_true',
help='output in JSON format')
parser.add_option('-o', '--out-dir', dest='outdir', help='output dir path')
parser.add_option('-f', '--output-file', dest='outfile',
help='output filename')
parser.add_option('-l', '--log-file', dest='logfile', help='Log filename')
parser.add_option('', '--chip-name', dest='chipname', help='chip name')
parser.add_option('', '--chip-version', dest='chipversion',
help='chip version')
(options, args) = parser.parse_args()
print "sysregs_parser_invoked"
args = ''
for arg in sys.argv:
args = args + arg + ' '
if options.outdir:
if not os.path.exists(options.outdir):
print ('!!! Out directory does not exist. Creating...')
try:
os.makedirs(options.outdir)
except:
print ("Failed to create %s. You probably don't have permissions there. Bailing." % options.outdir)
sys.exit(1)
else:
options.outdir = '.'
if options.json:
ext = '.json'
else:
ext = '.xml'
if options.outfile is None:
options.outfile = 'dcc_captured_data_minidump{0}'.format(ext)
log = log_init('LOG', options.outdir, options.logfile)
log.info("Data Capture and Compare(sysregs) parser.")
if options.sysregfile is None:
log.error("No SRAM image file given! Exiting...")
parser.print_usage()
sys.exit(1)
try:
sysreg_file = open(options.sysregfile, 'rb')
except:
log.error("could not open path {0}".format(options.sysregfile))
log.error("Do you have read permissions on the path?")
sys.exit(1)
count = 0
count = read_config(sysreg_file)
if count == 0:
log.error('No configuration found in sysdbg_regs!!')
sys.exit(1)
if read_data(sysreg_file):
log.error('Couldn\'t read complete data.')
else:
parsed_data = log_init('PARSED_DATA', options.outdir, options.outfile)
dump_regs(options)
sysreg_file.close()
sys.stderr.flush()

View File

@@ -0,0 +1,177 @@
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
watch_html:
while :; do inotifywait -e modify -e create -e delete -e move -r .. && make html; done
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/LinuxRamdumpParserv2.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/LinuxRamdumpParserv2.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/LinuxRamdumpParserv2"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/LinuxRamdumpParserv2"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

View File

@@ -0,0 +1,263 @@
# Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../linux-ramdump-parser-v2'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Linux Ramdump Parser v2'
copyright = u'The Linux Foundation. All rights reserved.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'LinuxRamdumpParserv2doc'
# -- Options for LaTeX output ---------------------------------------------
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
latex_elements = {
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'LinuxRamdumpParserv2.tex',
u'Linux Ramdump Parser v2 Documentation',
u'Qualcomm Innovation Center', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'linuxramdumpparserv2', u'Linux Ramdump Parser v2 Documentation',
[u'Qualcomm Innovation Center'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'LinuxRamdumpParserv2', u'Linux Ramdump Parser v2 Documentation',
u'Qualcomm Innovation Center', 'LinuxRamdumpParserv2',
'Linux Ramdump Parser.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
autoclass_content = 'both'

View File

@@ -0,0 +1,9 @@
GDBMI
=====
The ``gdbmi`` module provides the low-level interface to gdb's
`gdbmi <https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI.html>`_.
.. automodule:: gdbmi
:members:
:undoc-members:

View File

@@ -0,0 +1,15 @@
Hacking
=======
This section covers the LRDP's internal API. It is intended for developers
looking to add their own parser plugin to the LRDP.
.. toctree::
:maxdepth: 2
writing_parsers
ramdump
gdbmi
register
sizes
parser_util

View File

@@ -0,0 +1,18 @@
Welcome to Linux Ramdump Parser v2's documentation!
===================================================
Contents:
.. toctree::
:maxdepth: 2
usage
hacking
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@@ -0,0 +1,5 @@
parser_util
===========
.. automodule:: parser_util
:members:

View File

@@ -0,0 +1,7 @@
.. _`RamDump`:
RamDump
=======
.. automodule:: ramdump
:members:

View File

@@ -0,0 +1,7 @@
.. _`register`:
register
========
.. automodule:: register
:members:

View File

@@ -0,0 +1,8 @@
.. _`Sizes`:
Sizes
=====
.. automodule:: sizes
:members:
:undoc-members:

View File

@@ -0,0 +1,6 @@
Usage
=====
::
ramparse.py -o . -a /path/to/dumps

View File

@@ -0,0 +1,7 @@
.. _`Writing Parsers`:
Writing Parsers
===============
.. autofunction:: parser_util.register_parser
:noindex:

View File

@@ -0,0 +1,119 @@
Python Linux Ramdump Parser
What does this tool do?
----------------------------------
This tool takes as its input a vmlinux symbol file, and files representing
memory from devices that run Linux. It proceeds to dump useful information
such as process stacks, IRQ and workqueue information.
What does this tool need?
----------------------------------
1) Python. This tool has been tested with Python 2.6.5 on both Linux and Windows
1) a set of RAM dumps. Ideally, the load location of each dump as well.
2) The corresponding vmlinux file
How is this tool invoked?
----------------------------------
python ramparse.py invokes the parser. Options:
--ram-file <file path> <start> <end> : Add this ram file to be parsed.
At least one of --ram-file and --auto-dump required
--vmlinux <path> : path for vmlinux to use. This is required
--auto-dump <path to folder> : Automatically find files for a RAM dump and
detect useful informaton.
--gdb-path <path> : path for the GNU gdb debugger. If no path is given, the
path will be used from local_settings.py
--gdb-path <path> : path for the nm tool. If no path is given, the
path will be used from local_settings.py
--outdir <path> : Output directory to store any files written. If no path is
given, the ramfile directory will be used if given, else the current directory
will be used.
--out-file <path> : File to write all output to. If no path is given,
linux-parser-output.txt is used
--stdout : Write to stdout instead of the out-file. This overrides any
--out-file given.
--qtf : Use QTF tool to parse and save QDSS trace data
--qtf-path <path> : QTF tool executable
--timeout <timeout valye in secs>: each parser will be terminated within given time limit value
The list of features parsed is constantly growing. Please use --help option
to see the full list of features that can be parsed.
System requirements
-------------------------------
Python 2.7 is required to run this tool. It can be downloaded and
installed from https://www.python.org/ or through your system's
software package manager.
If you already have python2.7 installed but it's not the default
python2 interpreter on your system (e.g. if python2 points to
python2.6) then you'll need to invoke the script with python2.7
explicitly, for example:
$ python2.7 $(which ramparse.py) ...
instead of:
$ ramparse.py ...
Setting up the toolchains
-------------------------------
The parser relies on having access to gdb and nm to work. You will need to
specify the paths to these tools. This can be done in three ways
1) Using --gdb-path and --nm-path to specify the absolute path
2) Using CROSS_COMPILE to specify the prefix
3) Using local_settings.py as described below
4) Install this library from https://github.com/eliben/pyelftools
- Download the code from avobe github link. Download the zip file.
- After download the zip file, you will find a folder pyelftools-master.
- Inside this folder you will find another folder named "elftools"
- copy that entire folder and paste it in below directory <installed Python path>\Lib\site-packages
5) for --timeout option please do 'pip install func_timeout'
https://pypi.org/project/func-timeout/
Just having gdb/nm on the path is not supported as there are too many
variations on names to invoke.
local_settings.py
-------------------------------
The parser attempts to figure out most of the settings automatically but there
are some settings that are unique to the environment of the running system.
These must be specified in local_settings.py. The current format for the file
is
<setting name> = <string identifying the feaure>
Currently supported features:
gdb_path - absolute path to the gdb tool for the ramdumps
nm_path - absolute path to the gdb tool for the ramdumps
gdb64_path - absolute path to the 64-bit gdb tool for the ramdumps
nm64_path - absolute path to the 64-bit nm tool for the ramdumps
qtf_path - absolute path to qtf tool executable
scandump_parser_path - absolute path to scandump parser for the ramdumps
crashtool - absolute path to the 'crash' tool that is able to provide
gdb like debugging and also host an extension that extracts
ftrace
trace_ext - shared library extension that allows the 'crash' tool to read
raw ftrace data from a ramdump and dump it into a binary format
tracecmdtool - absolute path to the trace-cmd tool that converts the binary
trace format created by trace_ext to human readable ftrace
Note that local_settings.py is just a python file so the file may take advantage
of python features.
make_rpm.sh
-------------------------------
This script helps packages the linux-ramdump-parser-v2 subdirectory in this repo as an RPM deliverable.
Usage: ./make_rpm.sh [-a <Architecture>]

View File

@@ -0,0 +1,504 @@
# Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved.
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from sizes import SZ_4K, SZ_64K, SZ_2M, SZ_32M, SZ_1G, SZ_256G
from sizes import get_order, order_size_strings
import re
import mm
NUM_PT_LEVEL = 4
NUM_FL_PTE = 512
NUM_SL_PTE = 512
NUM_TL_PTE = 512
NUM_LL_PTE = 512
FLSL_BASE_MASK = 0xFFFFFFFFF000
FLSL_TYPE_BLOCK = (1 << 0)
FLSL_TYPE_TABLE = (3 << 0)
FLSL_PTE_TYPE_MASK = (3 << 0)
LL_TYPE_PAGE = (3 << 0)
LL_PAGE_MASK = 0xFFFFFFFFF000
LL_AP_BITS = (0x3 << 6)
LL_CH = (0x1 << 52)
LL_XN = (0x1 << 54)
LL_ATTR_INDX = (0x7 << 2)
LL_SH_BITS = (0x3 << 8)
ATTR_IDX_NONCACHED = 0x0
ATTR_IDX_CACHE = 0x1
ATTR_IDX_DEV = 0x2
ATTR_IDX_UPST = 0x3
ATTR_IDX_LLC_NWA = 0x4
SH_NON_SHARE = (0x0 << 8)
SH_RESERVED = (0x1 << 8)
SH_OUTER_SHARE = (0x2 << 8)
SH_INNER_SHARE = (0x3 << 8)
LL_AP_RO = (0x3 << 6)
LL_AP_RW = (0x1 << 6)
LL_AP_PR_RW = (0x0 << 6)
LL_AP_PR_RO = (0x2 << 6)
class FlatMapping(object):
def __init__(self, virt, phys=-1, type='[]', size=SZ_4K, attr_indx_str='[]',
shareability_str='[]', execute_never_str='[]', mapped=False):
self.virt = virt
self.phys = phys
self.type = type
self.map_size = size
self.attr_indx_str = attr_indx_str
self.shareability_str = shareability_str
self.execute_never_str = execute_never_str
self.mapped = mapped
class CollapsedMapping(object):
def __init__(self, virt_start, virt_end, phys_start=-1, phys_end=-1,
map_type='[]', map_size=SZ_4K, attr_indx_str='[]',
shareability_str='[]', execute_never_str='[]', mapped=False):
self.virt_start = virt_start
self.virt_end = virt_end - 1
self.phys_start = phys_start
self.phys_end = phys_end - 1
self.map_type = map_type
self.map_size = map_size
self.attr_indx_str = attr_indx_str
self.shareability_str = shareability_str
self.execute_never_str = execute_never_str
self.mapped = mapped
"""
Create a single Collapsed mapping representing the FlatMappings between
first and last, inclusive.
"""
def add_collapsed_mapping(mappings, first, last):
virt_start = first.virt
map = CollapsedMapping(
virt_start = virt_start,
virt_end = last.virt + last.map_size,
phys_start = first.phys,
phys_end = (last.phys & 0xFFFFFFFFF000) + last.map_size,
map_type = first.type,
map_size = first.map_size,
attr_indx_str = first.attr_indx_str,
shareability_str = first.shareability_str,
execute_never_str = first.execute_never_str,
mapped = first.mapped)
if virt_start not in mappings:
mappings[virt_start] = map
else:
map.type = 'Duplicate'
mappings[virt_start] = map
"""
Combine adjacent holes in the page table, but leave all valid entries
unchanged.
"""
def create_collapsed_mapping(flat_mapping):
collapsed_mapping = {}
if not len(flat_mapping.keys()):
return collapsed_mapping
virt_addrs = sorted(flat_mapping.keys())
start_map = prev_map = flat_mapping[virt_addrs[0]]
new_mapping = False
for virt in virt_addrs[1:]:
map = flat_mapping[virt]
if map.map_size == prev_map.map_size \
and map.type == prev_map.type \
and map.mapped == prev_map.mapped \
and map.attr_indx_str == prev_map.attr_indx_str \
and not map.mapped:
new_mapping = False
else:
new_mapping = True
if new_mapping:
add_collapsed_mapping(
collapsed_mapping, start_map, prev_map)
start_map = map
prev_map = map
"""Add the last entry"""
add_collapsed_mapping(collapsed_mapping, start_map, prev_map)
return collapsed_mapping
def __add_flat_mapping(mappings, virt, phy_addr, map_type_str, page_size, attr_indx_str,
shareability_str, execute_never_str, mapped):
map = FlatMapping(virt, phy_addr, map_type_str, page_size, attr_indx_str,
shareability_str, execute_never_str, mapped)
if virt not in mappings:
mappings[virt] = map
else:
map.type = 'Duplicate'
mappings[virt] = map
def add_flat_mapping(ramdump, mappings, fl_idx, sl_idx, tl_idx, ll_idx, phy_addr,
map_type, page_size, attr_indx, shareability,
xn_bit, mapped):
virt = (fl_idx << 39) | (sl_idx << 30) | (tl_idx << 21) | (ll_idx << 12)
map_type_str = '[R/W]'
attr_indx_str ='[]'
if map_type == LL_AP_RO:
map_type_str = '[RO]'
elif map_type == LL_AP_PR_RW:
map_type_str = '[P R/W]'
elif map_type == LL_AP_PR_RO:
map_type_str = '[P RO]'
shareability_str = 'N/A'
if shareability != -1:
if shareability == SH_NON_SHARE:
shareability_str = 'Non-Shareable'
if shareability == SH_RESERVED:
shareability_str = 'Reserved'
if shareability == SH_OUTER_SHARE:
shareability_str = 'Outer-Shareable'
if shareability == SH_INNER_SHARE:
shareability_str = 'Inner-Shareable'
attr_indx_str = 'N/A'
if attr_indx != -1:
if attr_indx == ATTR_IDX_NONCACHED:
attr_indx_str = 'Non-Cached'
elif attr_indx == ATTR_IDX_CACHE:
attr_indx_str = 'Cached'
elif attr_indx == ATTR_IDX_DEV:
attr_indx_str = 'Device'
elif attr_indx == ATTR_IDX_UPST:
attr_indx_str = 'UPSTREAM'
elif attr_indx == ATTR_IDX_LLC_NWA:
attr_indx_str = 'LLC_NWA'
if xn_bit == 1:
execute_never_str = 'True'
elif xn_bit == 0:
execute_never_str = 'False'
elif xn_bit == -1:
execute_never_str = 'N/A'
"""
for ipa to pa translations, detection of block mappings is not currently supported.
block mappings are stored as multiple 4kb mappings instead.
if the ipa is not valid, fall back to the existing behavior.
combining of S1 and S2 attributes is not supported; only S1 attributes are saved.
"""
if (ramdump.s2_walk and ramdump.iommu_pg_table_format == "fastrpc"
and mapped):
ipa = phy_addr
for offset in range(0, page_size, 4096):
s2_mapped = True
phy_addr = ramdump.mmu.page_table_walkel2(ipa + offset)
if not phy_addr:
phy_addr = -1
s2_mapped = False
__add_flat_mapping(mappings, virt + offset, phy_addr, map_type_str, 4096,
attr_indx_str, shareability_str, execute_never_str, s2_mapped)
else:
__add_flat_mapping(mappings, virt, phy_addr, map_type_str, page_size, attr_indx_str,
shareability_str, execute_never_str, mapped)
return mappings
def get_super_section_mapping_info(ramdump, pg_table, index):
pg_table_virt = mm.phys_to_virt(ramdump, pg_table)
phy_addr = ramdump.read_u64(pg_table_virt)
current_phy_addr = -1
current_page_size = SZ_1G
current_map_type = 0
status = True
if phy_addr is not None:
current_map_type = phy_addr & LL_AP_BITS
current_phy_addr = phy_addr & 0xFFFFC0000FFF
else:
status = False
return (current_phy_addr, current_page_size, current_map_type, status)
def get_section_mapping_info(ramdump, pg_table, index):
pg_table_virt = mm.phys_to_virt(ramdump, pg_table)
phy_addr = ramdump.read_u64(pg_table_virt)
current_phy_addr = -1
current_page_size = SZ_2M
current_map_type = 0
status = True
section_skip_count = 0
attr_indx = 0
sh_bits = -1
xn_bit = 0
if phy_addr is not None:
current_map_type = phy_addr & LL_AP_BITS
attr_indx = ( (phy_addr & LL_ATTR_INDX) >> 2 )
if attr_indx == ATTR_IDX_NONCACHED or attr_indx == ATTR_IDX_CACHE:
sh_bits = phy_addr & LL_SH_BITS # Shareability info available
# only for Normal Memory
if phy_addr & LL_XN:
xn_bit = 1
if phy_addr & LL_CH:
current_phy_addr = phy_addr & 0xFFFFFE000FFF
current_page_size = SZ_32M
section_skip_count = 15
# Current + next 15 entries are contiguous
else:
current_phy_addr = phy_addr & 0xFFFFFFE00FFF
current_page_size = SZ_2M
return (current_phy_addr, current_page_size, current_map_type,
status, section_skip_count, attr_indx, sh_bits, xn_bit)
def get_mapping_info(ramdump, pg_table, index):
ll_pte = pg_table + (index * 8)
ll_pte_virt = mm.phys_to_virt(ramdump, ll_pte)
phy_addr = ramdump.read_u64(ll_pte_virt)
current_phy_addr = -1
current_page_size = SZ_4K
current_map_type = 0
status = True
skip_count = 0
attr_indx = 0
sh_bits = -1
xn_bit = 0
if phy_addr is not None:
current_map_type = phy_addr & LL_AP_BITS
if phy_addr & LL_TYPE_PAGE:
current_phy_addr = phy_addr & 0xFFFFFFFFFFFF
attr_indx = ( (phy_addr & LL_ATTR_INDX) >> 2 )
if attr_indx == ATTR_IDX_NONCACHED or attr_indx == ATTR_IDX_CACHE:
sh_bits = phy_addr & LL_SH_BITS # Shareability info available
# only for Normal Memory
if phy_addr & LL_XN:
xn_bit = 1
if phy_addr & LL_CH:
current_phy_addr = phy_addr & 0xFFFFFFFF0FFF
current_page_size = SZ_64K
skip_count = 15
# Current + next 15 entries are contiguous
elif phy_addr != 0:
# Error condition if at last level it is not LL_TYPE_PAGE
current_phy_addr = phy_addr
status = False
return (current_phy_addr, current_page_size, current_map_type,
status, skip_count, attr_indx, sh_bits, xn_bit)
def fl_entry(ramdump, fl_pte, skip_fl):
fl_pg_table_entry = ramdump.read_u64(fl_pte)
if fl_pg_table_entry is None:
return (0, 0)
sl_pte = fl_pg_table_entry & FLSL_BASE_MASK
if skip_fl == 1:
fl_pg_table_entry = FLSL_TYPE_TABLE
sl_pte = fl_pte
# Make 1st level entry look like dummy entry of type table in
# case of only 3 level page tables and make sl_pte = fl_pte
# as we start parsing from second level.
return (fl_pg_table_entry, sl_pte)
def parse_2nd_level_table(ramdump, sl_pg_table_entry, fl_index,
sl_index, tmp_mapping):
tl_pte = sl_pg_table_entry & FLSL_BASE_MASK
section_skip_count = 0
for tl_index in range(0, NUM_TL_PTE):
tl_pte_virt = mm.phys_to_virt(ramdump, tl_pte)
tl_pg_table_entry = ramdump.read_u64(tl_pte_virt)
if tl_pg_table_entry == 0 or tl_pg_table_entry is None:
tmp_mapping = add_flat_mapping(
ramdump, tmp_mapping, fl_index, sl_index,
tl_index, 0, -1,
-1, SZ_2M, -1, -1, -1, False)
tl_pte += 8
continue
tl_entry_type = tl_pg_table_entry & FLSL_PTE_TYPE_MASK
if tl_entry_type == FLSL_TYPE_TABLE:
ll_pte = tl_pg_table_entry & FLSL_BASE_MASK
skip_count = 0
for ll_index in range(0, NUM_LL_PTE):
if skip_count:
skip_count -= 1
continue
(phy_addr, page_size, map_type, status,
skip_count, attr_indx, shareability,
xn_bit) = get_mapping_info(ramdump, ll_pte, ll_index)
if status and phy_addr != -1:
tmp_mapping = add_flat_mapping(
ramdump, tmp_mapping, fl_index, sl_index,
tl_index, ll_index, phy_addr, map_type,
page_size, attr_indx, shareability, xn_bit, True)
else:
tmp_mapping = add_flat_mapping(
ramdump, tmp_mapping, fl_index, sl_index,
tl_index, ll_index, -1,
-1, page_size, attr_indx, shareability, xn_bit, False)
elif tl_entry_type == FLSL_TYPE_BLOCK:
if section_skip_count:
section_skip_count -= 1
continue
(phy_addr, page_size, map_type, status,
section_skip_count, attr_indx, shareability,
xn_bit) = get_section_mapping_info(ramdump, tl_pte, tl_index)
if status and phy_addr != -1:
tmp_mapping = add_flat_mapping(
ramdump, tmp_mapping, fl_index, sl_index,
tl_index, 0, phy_addr,
map_type, page_size, attr_indx, shareability, xn_bit, True)
tl_pte += 8
return tmp_mapping
def create_flat_mappings(ramdump, pg_table, level):
tmp_mapping = {}
fl_pte = pg_table
skip_fl = 0
fl_range = NUM_FL_PTE
if level == 3:
skip_fl = 1
fl_range = 1
# In case we have only 3 level page table we want to skip first level
# and just parse second, third and last level. To keep unify code for 3
# level and 4 level parsing just run first level loop once and directly
# jump to start parsing from second level
for fl_index in range(0, fl_range):
(fl_pg_table_entry, sl_pte) = fl_entry(ramdump, fl_pte, skip_fl)
if fl_pg_table_entry == 0:
tmp_mapping = add_flat_mapping(
ramdump, tmp_mapping, fl_index, 0, 0, 0,
-1, -1, SZ_256G, -1, -1, -1, False)
fl_pte += 8
continue
for sl_index in range(0, NUM_SL_PTE):
sl_pg_table_entry = ramdump.read_u64(sl_pte)
if sl_pg_table_entry == 0 or sl_pg_table_entry is None:
tmp_mapping = add_flat_mapping(ramdump, tmp_mapping,
fl_index, sl_index, 0, 0,
-1, -1, SZ_1G, -1, -1, -1, False)
sl_pte += 8
continue
sl_entry_type = sl_pg_table_entry & FLSL_PTE_TYPE_MASK
if sl_entry_type == FLSL_TYPE_TABLE:
tmp_mapping = parse_2nd_level_table(ramdump, sl_pg_table_entry,
fl_index, sl_index,
tmp_mapping)
elif sl_entry_type == FLSL_TYPE_BLOCK:
(phy_addr, page_size, map_type, status) \
= get_super_section_mapping_info(ramdump, sl_pte, sl_index)
if status and phy_addr != -1:
#TODO: Fix memory attributes for 2nd-level entry
tmp_mapping = add_flat_mapping(
ramdump, tmp_mapping, fl_index, sl_index, 0, 0,
phy_addr, map_type, page_size, -1, -1, -1, True)
sl_pte += 8
fl_pte += 8
return tmp_mapping
def parse_aarch64_tables(ramdump, d, domain_num):
device_name = re.sub(r'[^a-zA-Z0-9]', '_', d.client_name.strip())
if device_name is None:
device_name = "xxxx"
fname = 'arm_iommu_domain_%02d_%s_0x%12X.txt' % (domain_num, device_name,
d.pg_table)
with ramdump.open_file('smmu_info/'+ fname, 'w') as outfile:
redirect = 'OFF'
if d.redirect is None:
redirect = 'UNKNOWN'
elif d.redirect > 0:
redirect = 'ON'
iommu_context = ' '.join('%s (%s)' % (name, num)
for (name, num) in d.ctx_list)
iommu_context = iommu_context or 'None attached'
outfile.write(
'IOMMU Context: %s. Domain: %s'
'[L2 cache redirect for page tables is %s]\n' % (
iommu_context, d.client_name, redirect))
outfile.write(
'[VA Start -- VA End ] [Size ] [PA Start -- PA End ] '
'[Attributes][Page Table Entry Size] [Memory Type] '
'[Shareability] [Non-Executable] \n')
if d.pg_table == 0:
outfile.write(
'No Page Table Found. (Probably a secure domain)\n')
else:
flat_mapping = create_flat_mappings(ramdump, d.pg_table, d.level)
collapsed_mapping = create_collapsed_mapping(flat_mapping)
for virt in sorted(collapsed_mapping.keys()):
mapping = collapsed_mapping[virt]
if mapping.mapped:
outfile.write(
'0x%x--0x%x [0x%x] A:0x%x--0x%x [0x%x] %s[%s] [%s] '
'[%s] [%s]\n' %
(mapping.virt_start, mapping.virt_end,
mapping.map_size, mapping.phys_start,
mapping.phys_end, mapping.map_size, mapping.map_type,
order_size_strings[get_order(mapping.map_size)],
mapping.attr_indx_str, mapping.shareability_str,
mapping.execute_never_str))
else:
outfile.write(
'0x%x--0x%x [0x%x] [UNMAPPED]\n' %
(mapping.virt_start, mapping.virt_end,
mapping.virt_end - mapping.virt_start + 1))

View File

@@ -0,0 +1,36 @@
# Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import ctypes
def bm(msb, lsb):
'Creates a bitmask from msb to lsb'
return int(('1' * (msb - lsb + 1)) + ('0' * lsb), 2)
def bvalsel(msb, lsb, val):
'Masks and returns the bits from msb to lsb in val'
return ((val & bm(msb, lsb)) >> lsb)
def is_set(val, bit):
'Checks whether particular bit is set in val'
if (val >> bit) & 0x1:
return True
return False
def align(x, a):
"""Round x up to the nearest multiple of a"""
# See include/uapi/linux/kernel.h
notmask = ctypes.c_uint64(~(a - 1)).value
return (x + a - 1) & notmask

View File

@@ -0,0 +1,80 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
from boards import Board
class BoardQCM6490(Board):
def __init__(self, socid):
super(BoardQCM6490, self).__init__()
self.socid = socid
self.board_num = "qcm6490"
self.cpu = 'CORTEXA53'
self.ram_start = 0x80000000
self.smem_addr = 0x900000
self.smem_addr_buildinfo = 0x9071c0
self.phys_offset = 0xA0000000
self.imem_start = 0x14680000
self.kaslr_addr = 0x146aa6d0
self.wdog_addr = 0x146aa658
self.imem_file_name = 'OCIMEM.BIN'
self.arm_smmu_v12 = True
class BoardQCM6490SVM(Board):
def __init__(self, socid):
super(BoardQCM6490SVM, self).__init__()
self.socid = socid
self.board_num = "qcm6490svm"
self.cpu = 'CORTEXA53'
self.ram_start = 0x80000000
self.smem_addr = 0x900000
self.smem_addr_buildinfo = 0x9071c0
self.phys_offset = 0xD0780000
self.imem_start = 0x14680000
self.kaslr_addr = 0x146aa6d0
self.wdog_addr = 0x146aa658
self.imem_file_name = 'OCIMEM.BIN'
self.arm_smmu_v12 = True
class BoardQCS9100(Board):
def __init__(self, socid):
super(BoardQCS9100, self).__init__()
self.socid = socid
self.board_num = "qcs9100"
self.cpu = 'CORTEXA53'
self.ram_start = 0x80000000
self.smem_addr = 0x900000
self.smem_addr_buildinfo = 0x9071c0
self.phys_offset = 0xA0000000
self.imem_start = 0x14680000
self.kaslr_addr = 0x146aa6d0
self.wdog_addr = 0x146aa658
self.imem_file_name = 'OCIMEM.BIN'
self.arm_smmu_v12 = True
class BoardQCS9100SVM(Board):
def __init__(self, socid):
super(BoardQCS9100SVM, self).__init__()
self.socid = socid
self.board_num = "qcs9100svm"
self.cpu = 'CORTEXA53'
self.ram_start = 0x80000000
self.smem_addr = 0x900000
self.smem_addr_buildinfo = 0x9071c0
self.phys_offset = 0xD0780000
self.imem_start = 0x14680000
self.kaslr_addr = 0x146aa6d0
self.wdog_addr = 0x146aa658
self.imem_file_name = 'OCIMEM.BIN'
self.arm_smmu_v12 = True
BoardQCM6490(socid=475)
BoardQCM6490SVM(socid=475)
BoardQCM6490(socid=499)
BoardQCS9100(socid=533)
BoardQCS9100SVM(socid=533)
BoardQCS9100(socid=534)
BoardQCS9100SVM(socid=534)
BoardQCS9100(socid=667)
BoardQCS9100SVM(socid=667)

View File

@@ -0,0 +1,67 @@
# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
class Board(object):
""" Class to describe a board the parser knows how to parse
socid = shared id unique to a board type
board_num = human readable board number indicating the board type
(e.g. 8960, 8974)
cpu = T32 cpu model
ram_start = start of the DDR
imem_start = start of location in which the watchdog address is stored
smem_addr = start of the shared memory region
phys_offset = physical offset of the board (CONFIG_PHYS_OFFSET)
wdog_addr = absolute physical address to check for FIQs
imem_file_name = file name corresponding to imem_start
kaslr_addr = virtual address relocation offset from vmlinux to ramdump
It is not recommended to create instances of this class directly.
Instead, classes should derive from this class and set fiels appropriately
for each socid
"""
def __init__(self):
self.socid = -1
self.board_num = "-1"
self.cpu = 'UNKNOWN'
self.ram_start = 0
self.imem_start = 0
self.smem_addr = 0
self.phys_offset = 0
self.wdog_addr = 0
self.imem_file_name = None
register_board(self)
def register_board(b):
global boards
boards.append(b)
def get_supported_boards():
""" Called by other part of the code to get a list of boards """
extensions_path = os.path.join(os.path.dirname(__file__), 'extensions')
if os.path.exists(extensions_path):
dir = os.path.join(os.path.dirname(__file__), 'extensions', 'board_def.py')
if os.path.exists(dir):
import extensions.board_def
else:
dir = os.path.join(os.path.dirname(__file__), 'board_config.py')
if os.path.exists(dir):
import board_config
return boards
def get_supported_ids():
""" Returns a list of ids to be used with --force-hardware"""
return list(set(b.board_num for b in boards))
boards = list()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,124 @@
# Copyright (c) 2015, 2017, 2020 The Linux Foundation. All rights reserved.
# Copyright (c) 2022, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import struct
import os
from print_out import print_out_str
from ramparse import VERSION
dcc_register_list = [
'DCC_HW_VERSION',
'DCC_HW_INFO',
'DCC_CGC_CFG',
'DCC_LL',
'DCC_RAM_CFG',
'DCC_CFG',
'DCC_SW_CTL',
'DCC_STATUS',
'DCC_FETCH_ADDR',
'DCC_SRAM_ADDR',
'DCC_INT_ENABLE',
'DCC_INT_STATUS',
'DCC_QSB_CFG'
]
# DCC regs hash table
dcc_regs = {}
class DccRegDump():
def __init__(self, start, end):
self.start_addr = start
self.end_addr = end
def parse_all_regs(self, ram_dump):
num_reg = len(dcc_register_list)
if (self.start_addr + 4 * num_reg) > self.end_addr:
return False
for reg in dcc_register_list:
dcc_regs[reg] = ram_dump.read_u32(self.start_addr, False)
self.start_addr += 4
return True
def dump_all_regs(self, ram_dump):
outfile = ram_dump.open_file('dcc_regs.txt')
outfile.write('DCC registers:\n')
for reg in dcc_register_list:
outfile.write('{0} : 0x{1:08x}\n'.format(reg, dcc_regs[reg]))
outfile.close()
class DccSramDump():
def __init__(self, start, end, ram_dump):
self.start_addr = start
self.end_addr = end
self.bin_dir = None
if ram_dump.ram_addr is None:
self.bin_dir = ram_dump.autodump
else:
self.bin_dir = ram_dump.ram_addr
self.bin_dir="\\".join(self.bin_dir[0][0].split('\\')[:-1])
self.dcc_bin = os.path.join(self.bin_dir, 'DCC_SRAM.BIN')
if os.path.isfile(self.dcc_bin):
self.start_addr = 0x6000
self.end_addr = 0x8000
def dump_sram_img(self, ram_dump):
if self.start_addr >= self.end_addr:
return False
rsz = self.end_addr - self.start_addr
if os.path.isfile(self.dcc_bin):
return self.dump_sram_img_bin(ram_dump, self.dcc_bin)
if 'DCC_HW_INFO' not in dcc_regs \
or dcc_regs['DCC_HW_INFO'] == 0:
print_out_str('DCC HW Info missing! Skipping sram dump...')
return False
if dcc_regs['DCC_CFG'] & 0x1:
print_out_str('DCC is configured in CRC mode. Skipping sram dump ...')
return False
if dcc_regs['DCC_RAM_CFG'] == 0:
print_out_str('No config found in DCC SRAM. Skipping sram dump ...')
return False
file_path = os.path.join(ram_dump.outdir, 'sram.bin')
sramfile = open(file_path, 'wb')
if not os.path.isfile(file_path):
print_out_str("sram.bin file creation failed")
return FALSE
for i in range(0, rsz):
val = ram_dump.read_byte(self.start_addr + i, False)
sramfile.write(struct.pack('<B', val))
sramfile.close()
return True
def dump_sram_img_bin(self, ram_dump, dcc_bin):
if self.start_addr >= self.end_addr:
return False
f = open(dcc_bin, 'rb')
f.seek(self.start_addr, 1)
bin_data=f.read()
sramfile = ram_dump.open_file('sram.bin')
sramfile.write(bin_data)
f.close()
sramfile.close()
return True

View File

@@ -0,0 +1,992 @@
# Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import struct
import linux_list as llist
import re
import shutil
import os
import platform
import random
import subprocess
import sys
import time
import local_settings
from scandump_reader import Scandump_v2
from collections import OrderedDict
from dcc import DccRegDump, DccSramDump
from pmic import PmicRegDump
from print_out import print_out_str, print_out_exception
from qdss import QDSSDump
from watchdog_v2 import TZRegDump_v2
from cachedumplib import lookup_cache_type
from tlbdumplib import lookup_tlb_type
from vsens import VsensData
from sysregs import SysRegDump
from fcmdump import FCM_Dump
MEMDUMPV2_MAGIC = 0x42445953
MEMDUMPV_HYP_MAGIC = 0x42444832
MAX_NUM_ENTRIES = 0x164
IMEM_OFFSET_MEM_DUMP_TABLE = 0x3f010
msm_dump_cpu_type = ['MSM_DUMP_CPU_TYPE_INVALID',
'MSM_DUMP_CPU_TYPE_AARCH32',
'MSM_DUMP_CPU_TYPE_AARCH64',
'MSM_DUMP_CPU_TYPE_HYDRA']
msm_dump_ctx_type= [
'MSM_DUMP_CTX_TYPE_PHYS_NS_CPU_CTX',
'MSM_DUMP_CTX_TYPE_PHYS_SEC_CPU_CTX',
'MSM_DUMP_CTX_TYPE_NS_VCPU_EL10_CTX',
'MSM_DUMP_CTX_TYPE_SEC_VCPU_EL10_CTX',
'MSM_DUMP_CTX_TYPE_NS_VCPU_NESTED_EL2_CTX']
msm_dump_regset_ids = {}
msm_dump_regset_ids[0] = 'MSM_DUMP_REGSET_IDS_INVALID'
msm_dump_regset_ids[16] = 'MSM_DUMP_REGSET_IDS_AARCH64_GPRS'
msm_dump_regset_ids[17] = 'MSM_DUMP_REGSET_IDS_AARCH64_NEON'
msm_dump_regset_ids[18] = 'MSM_DUMP_REGSET_IDS_AARCH64_SVE'
msm_dump_regset_ids[19] = 'MSM_DUMP_REGSET_IDS_AARCH64_SYSREGS_EL0'
msm_dump_regset_ids[20] = 'MSM_DUMP_REGSET_IDS_AARCH64_EL1'
msm_dump_regset_ids[21] = 'MSM_DUMP_REGSET_IDS_AARCH64_EL2'
msm_dump_regset_ids[22] = 'MSM_DUMP_REGSET_IDS_AARCH64_VM_EL2'
msm_dump_regset_ids[23] = 'MSM_DUMP_REGSET_IDS_AARCH64_EL3'
msm_dump_regset_ids[24] = 'MSM_DUMP_REGSET_IDS_AARCH64_DBG_EL1'
msm_dump_regset_ids[25] = 'MSM_DUMP_REGSET_IDS_AARCH64_CNTV_EL10'
msm_dump_regset_ids[26] = 'MSM_DUMP_REGSET_IDS_AARCH64_CNTP_EL10'
msm_dump_regset_ids[27] = 'MSM_DUMP_REGSET_IDS_AARCH64_CNT_EL2'
class client(object):
MSM_DUMP_DATA_CPU_CTX = 0x00
MSM_DUMP_DATA_L1_INST_TLB = 0x20
MSM_DUMP_DATA_L1_DATA_TLB = 0x40
MSM_DUMP_DATA_L1_INST_CACHE = 0x60
MSM_DUMP_DATA_L1_DATA_CACHE = 0x80
MSM_DUMP_DATA_ETM_REG = 0xA0
MSM_DUMP_DATA_L2_CACHE = 0xC0
MSM_DUMP_DATA_L3_CACHE = 0xD0
MSM_DUMP_DATA_OCMEM = 0xE0
MSM_DUMP_DATA_DBGUI_REG = 0xE5
MSM_DUMP_DATA_MISC = 0xE8
MSM_DUMP_DATA_VSENSE = 0xE9
MSM_DUMP_DATA_TMC_ETF = 0xF0
MSM_DUMP_DATA_TMC_ETF_SWAO = 0xF1
MSM_DUMP_DATA_TMC_REG = 0x100
MSM_DUMP_DATA_TMC_ETR_REG = 0x100
MSM_DUMP_DATA_TMC_ETF_REG = 0x101
MSM_DUMP_DATA_TMC_ETR1_REG = 0x105
MSM_DUMP_DATA_TMC_ETF_SWAO_REG = 0x102
MSM_DUMP_DATA_LOG_BUF = 0x110
MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111
MSM_DUMP_DATA_L2_TLB = 0x120
MSM_DUMP_DATA_DCC_REG = 0xE6
MSM_DUMP_DATA_DCC_SRAM = 0xE7
MSM_DUMP_DATA_SCANDUMP = 0xEB
MSM_DUMP_DATA_RPMH = 0xEC
MSM_DUMP_DATA_FCMDUMP = 0xEE
MSM_DUMP_DATA_CPUSS = 0xEF
MSM_DUMP_DATA_SCANDUMP_PER_CPU = 0x130
MSM_DUMP_DATA_LLC_CACHE = 0x140
MSM_DUMP_DATA_MHM = 0x161
MSM_DUMP_DATA_MAX = MAX_NUM_ENTRIES
# Client functions will be executed in top-to-bottom order
client_types = [
('MSM_DUMP_DATA_SCANDUMP', 'parse_scandump'),
('MSM_DUMP_DATA_SCANDUMP_PER_CPU', 'parse_scandump'),
('MSM_DUMP_DATA_FCMDUMP', 'parse_fcmdump'),
('MSM_DUMP_DATA_CPUSS', 'parse_cpuss'),
('MSM_DUMP_DATA_CPU_CTX', 'parse_cpu_ctx'),
('MSM_DUMP_DATA_L1_INST_TLB', 'parse_tlb_common'),
('MSM_DUMP_DATA_L1_DATA_TLB', 'parse_tlb_common'),
('MSM_DUMP_DATA_L1_INST_CACHE', 'parse_cache_common'),
('MSM_DUMP_DATA_L1_DATA_CACHE', 'parse_cache_common'),
('MSM_DUMP_DATA_L2_CACHE', 'parse_cache_common'),
('MSM_DUMP_DATA_L3_CACHE', 'parse_l3_cache'),
('MSM_DUMP_DATA_OCMEM', 'parse_ocmem'),
('MSM_DUMP_DATA_DBGUI_REG', 'parse_qdss_common'),
('MSM_DUMP_DATA_VSENSE', 'parse_vsens'),
('MSM_DUMP_DATA_PMIC', 'parse_pmic'),
('MSM_DUMP_DATA_DCC_REG', 'parse_dcc_reg'),
('MSM_DUMP_DATA_DCC_SRAM', 'parse_dcc_sram'),
('MSM_DUMP_DATA_TMC_ETF', 'parse_qdss_common'),
('MSM_DUMP_DATA_TMC_ETF_SWAO', 'parse_qdss_common'),
('MSM_DUMP_DATA_TMC_ETR_REG', 'parse_qdss_common'),
('MSM_DUMP_DATA_TMC_ETR1_REG', 'parse_qdss_common'),
('MSM_DUMP_DATA_TMC_ETF_SWAO_REG', 'parse_qdss_common'),
('MSM_DUMP_DATA_TMC_REG', 'parse_qdss_common'),
('MSM_DUMP_DATA_L2_TLB', 'parse_tlb_common'),
('MSM_DUMP_DATA_LLC_CACHE', 'parse_system_cache_common'),
('MSM_DUMP_DATA_MISC', 'parse_sysdbg_regs'),
('MSM_DUMP_DATA_MHM', 'parse_mhm_dump')
]
qdss_tag_to_field_name = {
'MSM_DUMP_DATA_TMC_ETR_REG': 'tmc_etr_start',
'MSM_DUMP_DATA_TMC_ETR1_REG': 'tmc_etr1_start',
'MSM_DUMP_DATA_TMC_REG': 'tmc_etr_start',
'MSM_DUMP_DATA_TMC_ETF': 'etf_start',
'MSM_DUMP_DATA_TMC_ETF_SWAO': 'tmc_etf_swao_start',
'MSM_DUMP_DATA_TMC_ETF_SWAO_REG': 'tmc_etf_swao_reg_start',
'MSM_DUMP_DATA_DBGUI_REG': 'dbgui_start',
}
# Client functions will be executed in top-to-bottom order
minidump_dump_table_type = [
('MSM_DUMP_DATA_SCANDUMP', 'KSCANDUMP'),
('MSM_DUMP_DATA_CPU_CTX', 'KCPU_CTX'),
('MSM_DUMP_DATA_L1_INST_TLB', 'KCPUSS'),
('MSM_DUMP_DATA_L1_DATA_TLB','KCPUSS'),
('MSM_DUMP_DATA_L1_INST_CACHE', 'KCPUSS'),
('MSM_DUMP_DATA_L1_DATA_CACHE', 'KCPUSS'),
('MSM_DUMP_DATA_L2_CACHE', 'KCPUSS'),
('MSM_DUMP_DATA_L3_CACHE', 'KCPUSS'),
('MSM_DUMP_DATA_VSENSE', 'KVSENSE'),
('MSM_DUMP_DATA_PMIC', 'KPMIC'),
('MSM_DUMP_DATA_DCC_REG', 'KDCC_REG'),
('MSM_DUMP_DATA_DCC_SRAM', 'KDCC_SRAM'),
('MSM_DUMP_DATA_TMC_ETF', 'KTMC_ETF'),
('MSM_DUMP_DATA_TMC_ETR_REG', 'KTMC_REG'),
('MSM_DUMP_DATA_TMC_REG', 'KTMC_REG'),
('MSM_DUMP_DATA_MISC', 'KMISC')
]
class DebugImage_v2():
def __init__(self, ramdump):
self.qdss = QDSSDump()
self.dump_type_lookup_table = []
self.dump_data_id_lookup_table = {}
if ramdump.kernel_version > (3, 9, 9):
self.event_call = 'struct trace_event_call'
self.event_class = 'struct trace_event_class'
else:
self.event_call = 'struct ftrace_event_call'
self.event_class = 'struct ftrace_event_class'
def parse_scandump(self, version, start, end, client_id, ram_dump):
scandump_file_prefix = "scandump_core"
core_bin_prefix = "core"
chipset = ram_dump.hw_id
try:
scan_wrapper_path = local_settings.scandump_parser_path
except AttributeError:
print_out_str('Could not find scandump_parser_path . Please define scandump_parser_path in local_settings')
return
if ram_dump.arm64:
arch = "aarch64"
else:
arch = "aarch32"
if client_id == client.MSM_DUMP_DATA_SCANDUMP:
output = os.path.join(ram_dump.outdir, scandump_file_prefix)
input = os.path.join(ram_dump.outdir, "core.bin")
core_num = client_id & 0xF
elif client_id >= client.MSM_DUMP_DATA_SCANDUMP_PER_CPU:
core_num = client_id & 0xF
output = '{0}_{1:x}'.format(scandump_file_prefix, core_num)
output = os.path.join(ram_dump.outdir, output)
input_filename = '{0}_{1:x}.bin'.format(core_bin_prefix, core_num)
input = os.path.join(ram_dump.outdir, input_filename)
print_out_str(
'Parsing scandump context start {0:x} end {1:x} {2} {3}'.format(start, end, output, input))
header_bin = ram_dump.open_file(input, 'wb')
header_bin.write(ram_dump.read_physical(start, end - start))
header_bin.close()
subprocess.call('py -2 {0} -d {1} -o {2} -f {3} -c {4}'.format(scan_wrapper_path, input, output, arch, chipset))
sv2 = Scandump_v2(core_num,ram_dump,version)
reg_info = sv2.prepare_dict()
if reg_info is not None:
sv2.dump_core_pc(ram_dump)
sv2.dump_all_regs(ram_dump)
return
def parse_cpuss(self, version, start, end, client_id, ram_dump):
client_name = self.dump_data_id_lookup_table[client_id]
cpuss_file_prefix = "cpuss_reg_dump.xml"
print_out_str(
'Parsing {0} context start {1:x} end {2:x}'.format(client_name, start, end))
try:
cpuss_parser_path = local_settings.cpuss_parser_path
cpuss_parser_json = local_settings.cpuss_parser_json
except AttributeError:
print_out_str('Could not find cpuss_parser_path . Please define cpuss_parser_path in local_settings')
return
offset = None
offset, input = ram_dump.get_read_physical_offset(start)
if offset is None:
print_out_str("parse_cpuss start address {0} not found".format(start))
print_out_str("parse_cpuss offset address = {0} input = {1} cpuss_parser_json = {2}".format(hex(int(offset)),input,cpuss_parser_json))
subprocess.call('py -2 {0} -i {1} -O {2} -o {3} -j {4}'.format(cpuss_parser_path, input, hex(int(offset)), ram_dump.outdir, cpuss_parser_json))
def parse_fcmdump(self, version, start, end, client_id, ram_dump):
client_name = self.dump_data_id_lookup_table[client_id]
print_out_str(
'Parsing {0} context start {1:x} end {2:x}'.format(client_name, start, end))
fcmdumps = FCM_Dump(start, end)
if fcmdumps.dump_fcm_img(ram_dump) is False:
print_out_str('!!! Could not dump FCM')
return
def get_vcpu_index(self, ram_dump, affinity):
vcpu_index = 0
if affinity:
if hasattr(ram_dump.board, 'aff_shift'):
aff_shift = ram_dump.board.aff_shift
else:
aff_shift = [0,0,0,0]
tmp_vcpu_index = affinity
for i in range(0, len(aff_shift)):
vcpu_index |= ((tmp_vcpu_index >> (i * 8)) & 0xff) << aff_shift[i]
if hasattr(ram_dump.board, 'core_map'):
vcpu_index = ram_dump.board.core_map.get(vcpu_index, vcpu_index)
else:
vcpu_index = affinity
return vcpu_index
def parse_cpu_ctx(self, version, start, end, client_id, ram_dump,dump_data_name=None):
core = client_id - client.MSM_DUMP_DATA_CPU_CTX
if version == 32 or version == "32":
try:
cpu_type_offset = ram_dump.field_offset(
'struct msm_dump_cpu_ctx', 'cpu_type')
if cpu_type_offset is None:
cpu_type_offset = 0x0
ctx_type_offset = ram_dump.field_offset(
'struct msm_dump_cpu_ctx', 'ctx_type')
if ctx_type_offset is None:
ctx_type_offset = 0x4
cpu_id_offset = ram_dump.field_offset(
'struct msm_dump_cpu_ctx', 'cpu_id')
if cpu_id_offset is None:
cpu_id_offset = 0xC
cpu_index_offset = ram_dump.field_offset(
'struct msm_dump_cpu_ctx', 'affinity')
if cpu_index_offset is None:
cpu_index_offset = 0x10
machine_id_offset = ram_dump.field_offset(
'struct msm_dump_cpu_ctx', 'machine_id')
if machine_id_offset is None:
machine_id_offset = 0x14
registers_offset = ram_dump.field_offset(
'struct msm_dump_cpu_ctx', 'registers')
if registers_offset is None:
registers_offset = 0x20
regset_num_register_offset = ram_dump.field_offset(
'struct msm_dump_cpu_ctx', 'num_register_sets')
if regset_num_register_offset is None:
regset_num_register_offset = 0x1C
regset_id_offset = ram_dump.field_offset(
'struct msm_dump_cpu_register_entry', 'regset_id')
if regset_id_offset is None:
regset_id_offset = 0x0
regset_addr_offset = ram_dump.field_offset(
'struct msm_dump_cpu_register_entry', 'regset_addr')
if regset_addr_offset is None:
regset_addr_offset = 0x8
affinity = ram_dump.read_u32(start + cpu_index_offset,False)
cpu_index = self.get_vcpu_index(ram_dump, affinity)
print_out_str(
'Parsing CPU{2:d} affinity {5:x} context start {0:x} end {1:x} version {3} client_id-> {4:x}'.format(start, end, cpu_index, version, client_id, affinity))
cpu_type = ram_dump.read_u32(start + cpu_type_offset,False)
print_out_str("cpu_type = {0}".format(msm_dump_cpu_type[cpu_type]))
ctx_type = ram_dump.read_u32(start + ctx_type_offset,False)
print_out_str("ctx_type = {0}".format(msm_dump_ctx_type[ctx_type]))
print_out_str("cpu_index = {0}".format(cpu_index))
regset_num_register = ram_dump.read_u32(start + regset_num_register_offset,False)
registers = start + registers_offset
registers_size = ram_dump.sizeof('struct msm_dump_cpu_register_entry')
if registers_size is None:
registers_size = 0x10
regset_name_addr = OrderedDict()
for i in range(0,regset_num_register):
registers_addr = registers + registers_size * i
regset_id = ram_dump.read_u32(registers_addr + regset_id_offset,False)
if regset_id == 0:
break
regset_name = msm_dump_regset_ids[regset_id]
print_out_str("regset_name = {0}".format(regset_name))
regset_addr = ram_dump.read_u64(registers_addr + regset_addr_offset,False)
regset_size = ram_dump.sizeof('struct msm_dump_aarch64_gprs')
if regset_size is None:
regset_size = 0x110
regset_end = regset_addr + regset_size
regset_name_addr[regset_name] = [regset_addr,regset_end]
regs = TZRegDump_v2()
cpu_index_num = "{0:d}".format(cpu_index)
if dump_data_name and "vm_3" not in dump_data_name:
core = "vcpu" + str(cpu_index_num) + "_" + dump_data_name.split('_vcpu_')[0]
else:
core = "vcpu"+str(cpu_index_num)
regs_flag = regs.init_regs_v2(version, regset_name_addr, core, ram_dump)
if regs_flag == False:
print_out_str('!!! Could not get registers from TZ dump')
return
regs.dump_core_pc_gprs(ram_dump)
regs.dump_all_regs_gprs(ram_dump)
except Exception as err:
pass
else:
regs = TZRegDump_v2()
if regs.init_regs(version, start, end, core, ram_dump) is False:
print_out_str('!!! Could not get registers from TZ dump')
return
regs.dump_core_pc(ram_dump)
regs.dump_all_regs(ram_dump)
def parse_pmic(self, version, start, end, client_id, ram_dump):
client_name = self.dump_data_id_lookup_table[client_id]
print_out_str(
'Parsing {0} context start {1:x} end {2:x}'.format(client_name, start, end))
regs = PmicRegDump(start, end)
if regs.parse_all_regs(ram_dump) is False:
print_out_str('!!! Could not get registers from PMIC dump')
return
regs.dump_all_regs(ram_dump)
def parse_dcc_reg(self, version, start, end, client_id, ram_dump):
client_name = self.dump_data_id_lookup_table[client_id]
print_out_str(
'Parsing {0} context start {1:x} end {2:x}'.format(client_name, start, end))
regs = DccRegDump(start, end)
if regs.parse_all_regs(ram_dump) is False:
print_out_str('!!! Could not get registers from DCC register dump')
return
regs.dump_all_regs(ram_dump)
return
def parse_dcc_sram(self, version, start, end, client_id, ram_dump):
client_name = self.dump_data_id_lookup_table[client_id]
print_out_str(
'Parsing {0} context start {1:x} end {2:x}'.format(client_name, start, end))
regs = DccSramDump(start, end, ram_dump)
if regs.dump_sram_img(ram_dump) is False:
print_out_str('!!! Could not dump SRAM')
else:
ram_dump.dcc = True
return
def parse_sysdbg_regs(self, version, start, end, client_id, ram_dump):
client_name = self.dump_data_id_lookup_table[client_id]
print_out_str(
'Parsing {0} context start {1:x} end {2:x}'.format(client_name, start, end))
sysregs = SysRegDump(start, end)
if sysregs.dump_sysreg_img(ram_dump) is False:
print_out_str('!!! Could not dump sysdbg_regs')
else:
ram_dump.sysreg = True
return
def parse_vsens(self, version, start, end, client_id, ram_dump):
client_name = self.dump_data_id_lookup_table[client_id]
print_out_str(
'Parsing {0} context start {1:x} end {2:x}'.format(client_name, start, end))
regs = VsensData()
if regs.init_dump_regs(start, end, ram_dump) is False:
print_out_str('!!! Could not get registers from Vsens Dump')
return
regs.print_vsens_regs(ram_dump)
def parse_qdss_common(self, version, start, end, client_id, ram_dump):
client_name = self.dump_data_id_lookup_table[client_id]
print_out_str(
'Parsing {0} context start {1:x} end {2:x}'.format(client_name, start, end))
if client_id == client.MSM_DUMP_DATA_TMC_ETF_REG:
setattr(self.qdss, 'tmc_etf_start', start)
else:
setattr(self.qdss, qdss_tag_to_field_name[client_name], start)
def parse_cache_common(self, version, start, end, client_id, ramdump):
if ramdump.skip_TLB_Cache_parse:
return
client_name = self.dump_data_id_lookup_table[client_id]
core = client_id & 0xF
filename = '{0}_0x{1:x}'.format(client_name, core)
outfile = ramdump.open_file(filename)
cache_type = lookup_cache_type(ramdump.hw_id, client_id, version)
try:
cache_type.parse(start, end, ramdump, outfile)
except NotImplementedError:
print_out_str('Cache dumping not supported for %s on this target'
% client_name)
except:
# log exceptions and continue by default
if not ramdump.debug:
print_out_str('!!! Unhandled exception while running {0}'.format(client_name))
print_out_exception()
else:
raise
outfile.close()
def parse_system_cache_common(self, version, start, end, client_id, ramdump):
if ramdump.skip_TLB_Cache_parse:
return
client_name = self.dump_data_id_lookup_table[client_id]
bank_number = client_id - client.MSM_DUMP_DATA_LLC_CACHE
filename = '{0}_0x{1:x}'.format(client_name, bank_number)
outfile = ramdump.open_file(filename)
cache_type = lookup_cache_type(ramdump.hw_id, client_id, version)
try:
cache_type.parse(start, end, ramdump, outfile)
except NotImplementedError:
print_out_str('System cache dumping not supported %s'
% client_name)
except:
# log exceptions and continue by default
if not ramdump.debug:
print_out_str('!!! Unhandled exception while running {0}'.format(client_name))
print_out_exception()
else:
raise
outfile.close()
def parse_tlb_common(self, version, start, end, client_id, ramdump):
if ramdump.skip_TLB_Cache_parse:
return
client_name = self.dump_data_id_lookup_table[client_id]
core = client_id & 0xF
filename = '{0}_0x{1:x}'.format(client_name, core)
outfile = ramdump.open_file(filename)
cache_type = lookup_tlb_type(ramdump.hw_id, client_id, version)
try:
cache_type.parse(start, end, ramdump, outfile)
except NotImplementedError:
print_out_str('TLB dumping not supported for %s on this target'
% client_name)
except:
# log exceptions and continue by default
if not ramdump.debug:
print_out_str('!!! Unhandled exception while running {0}'.format(client_name))
print_out_exception()
else:
raise
outfile.close()
def ftrace_field_func(self, common_list, ram_dump):
name_offset = ram_dump.field_offset('struct ftrace_event_field', 'name')
type_offset = ram_dump.field_offset('struct ftrace_event_field', 'type')
filter_type_offset = ram_dump.field_offset('struct ftrace_event_field', 'filter_type')
field_offset = ram_dump.field_offset('struct ftrace_event_field', 'offset')
size_offset = ram_dump.field_offset('struct ftrace_event_field', 'size')
signed_offset = ram_dump.field_offset('struct ftrace_event_field', 'is_signed')
name = ram_dump.read_word(common_list + name_offset)
field_name = ram_dump.read_cstring(name, 256)
type_name = ram_dump.read_word(common_list + type_offset)
type_str = ram_dump.read_cstring(type_name, 256)
offset = ram_dump.read_u32(common_list + field_offset)
size = ram_dump.read_u32(common_list + size_offset)
signed = ram_dump.read_u32(common_list + signed_offset)
if re.match('(.*)\[(.*)', type_str) and not(re.match('__data_loc', type_str)):
s = re.split('\[', type_str)
s[1] = '[' + s[1]
self.formats_out.write("\tfield:{0} {1}{2};\toffset:{3};\tsize:{4};\tsigned:{5};\n".format(s[0], field_name, s[1], offset, size, signed))
else:
self.formats_out.write("\tfield:{0} {1};\toffset:{2};\tsize:{3};\tsigned:{4};\n".format(type_str, field_name, offset, size, signed))
def ftrace_events_func(self, ftrace_list, ram_dump):
event_offset = ram_dump.field_offset(self.event_call, 'event')
fmt_offset = ram_dump.field_offset(self.event_call,'print_fmt')
class_offset = ram_dump.field_offset(self.event_call, 'class')
flags_offset = ram_dump.field_offset(self.event_call, 'flags')
flags = ram_dump.read_word(ftrace_list + flags_offset)
if ram_dump.kernel_version >= (4, 14):
TRACE_EVENT_FL_TRACEPOINT = 0x10
elif ram_dump.kernel_version >= (4, 9):
TRACE_EVENT_FL_TRACEPOINT = 0x20
else:
TRACE_EVENT_FL_TRACEPOINT = 0x40
if (ram_dump.kernel_version >= (3, 18) and (flags & TRACE_EVENT_FL_TRACEPOINT)):
tp_offset = ram_dump.field_offset(self.event_call, 'tp')
tp_name_offset = ram_dump.field_offset('struct tracepoint', 'name')
tp = ram_dump.read_word(ftrace_list + tp_offset)
name = ram_dump.read_word(tp + tp_name_offset)
else:
name_offset = ram_dump.field_offset(self.event_call, 'name')
name = ram_dump.read_word(ftrace_list + name_offset)
type_offset = ram_dump.field_offset('struct trace_event', 'type')
fields_offset = ram_dump.field_offset(self.event_class, 'fields')
common_field_list = ram_dump.address_of('ftrace_common_fields')
field_next_offset = ram_dump.field_offset('struct ftrace_event_field', 'link')
name_str = ram_dump.read_cstring(name, 512)
event_id = ram_dump.read_word(ftrace_list + event_offset + type_offset)
fmt = ram_dump.read_word(ftrace_list + fmt_offset)
fmt_str = ram_dump.read_cstring(fmt, 2048)
self.formats_out.write("name: {0}\n".format(name_str))
self.formats_out.write("ID: {0}\n".format(event_id))
self.formats_out.write("format:\n")
list_walker = llist.ListWalker(ram_dump, common_field_list, field_next_offset)
list_walker.walk_prev(common_field_list, self.ftrace_field_func, ram_dump)
self.formats_out.write("\n")
event_class = ram_dump.read_word(ftrace_list + class_offset)
field_list = event_class + fields_offset
list_walker = llist.ListWalker(ram_dump, field_list, field_next_offset)
list_walker.walk_prev(field_list, self.ftrace_field_func, ram_dump)
self.formats_out.write("\n")
self.formats_out.write("print fmt: {0}\n".format(fmt_str))
def collect_ftrace_format(self, ram_dump):
self.formats_out = ram_dump.open_file('formats.txt')
ftrace_events_list = ram_dump.address_of('ftrace_events')
next_offset = ram_dump.field_offset(self.event_call, 'list')
list_walker = llist.ListWalker(ram_dump, ftrace_events_list, next_offset)
list_walker.walk_prev(ftrace_events_list, self.ftrace_events_func, ram_dump)
self.formats_out.close
def parse_sysreg(self,ram_dump):
out_dir = ram_dump.outdir
sysreg_parser_path_minidump = os.path.join(os.path.dirname(__file__), '..', 'dcc_parser',
'sysregs_parser_minidump.py')
if sysreg_parser_path_minidump is None:
print_out_str("!!! Incorrect path for SYSREG specified.")
return
if not os.path.exists(sysreg_parser_path_minidump):
print_out_str("!!! sysreg_parser_path_minidump {0} does not exist! "
"Check your settings!"
.format(sysreg_parser_path_minidump))
return
if os.path.getsize(os.path.join(out_dir, 'sysdbg_regs.bin')) > 0:
sysdbg_file = os.path.join(out_dir, 'sysdbg_regs.bin')
else:
return
p = subprocess.Popen([sys.executable, sysreg_parser_path_minidump, '-s', sysdbg_file, '--out-dir', out_dir],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
print_out_str('--------')
print_out_str(p.communicate()[0])
def sorted_dump_data_clients(self, ram_dump, table, table_num_entries):
""" Returns a sorted list of (client_name, func, client_address) where
client_address --
the (struct msm_dump_entry*) which contains a client_id mapping to
client_name
func --
registered function in client_types to parse entries of
this type
the return value is sorted in the same order as the client names
in client_types
"""
dump_entry_id_offset = ram_dump.field_offset(
'struct msm_dump_entry', 'id')
dump_entry_size = ram_dump.sizeof('struct msm_dump_entry')
results = list()
client_table = dict(client_types)
# get first column of client_types
client_names = [x[0] for x in client_types]
for j in range(0, table_num_entries):
client_entry = table + j * dump_entry_size
client_id = ram_dump.read_u32(
client_entry + dump_entry_id_offset, False)
if client_id in self.dump_data_id_lookup_table:
client_name = self.dump_data_id_lookup_table[client_id]
if client_name not in client_table:
print_out_str(
'!!! client_id = 0x{0:x} client_name = {1} Does not have an associated function. Skipping!'.format(client_id,client_name))
continue
else:
print_out_str(
'!!! Invalid dump client id found 0x{0:x}'.format(client_id))
continue
results.append((client_name, client_table[client_name], client_entry))
results.sort(key=lambda x: client_names.index(x[0]))
return results
def minidump_data_clients(self, ram_dump, client_id,entry_pa_addr,
end_addr):
results = list()
client_table = dict(client_types)
# get first column of client_types
if client_id not in self.dump_data_id_lookup_table:
print_out_str(
'!!! {0} Unknown client id. Skipping!'.format(client_id))
return None
client_name = self.dump_data_id_lookup_table[client_id]
if client_name not in client_table:
print_out_str(
'!!! {0} Does not have an associated function. Skipping!'.format(client_name))
return None
results.append((client_name, client_id,client_table[client_name], entry_pa_addr,end_addr))
return results
def parse_mhm_dump(self, version, start, end, client_id, ram_dump):
if ram_dump.arm64:
arch = "aarch64"
else:
arch = "aarch32"
if client_id == client.MSM_DUMP_DATA_MHM:
input = os.path.join(ram_dump.outdir, "mhm_scandump.bin")
print_out_str(
'Parsing mhm dump start {0:x} end {1:x} {2}'.format(start, end, input))
header_bin = ram_dump.open_file(input, mode='wb')
header_bin.write(ram_dump.read_physical(start, end - start))
header_bin.close()
return
class MsmDumpTable(object):
def __init__(self):
self.name = "Anon"
self.phys_addr = 0x0
self.version = 0x0
self.num_entries = 0x0
""" Create an instance of MsmDumpTable, or None on error """
def validateMsmDumpTable(self, ram_dump, name, table_phys):
if table_phys is None:
print_out_str('debug_image.py: Table {}: Unable to read dump table base address'.format(name))
return None
version = ram_dump.read_structure_field(
table_phys, 'struct msm_dump_table', 'version',
virtual = False)
if version is None:
print_out_str('Table {}: Version is bogus! Can\'t parse debug image'.format(name))
return None
num_entries = ram_dump.read_structure_field(
table_phys, 'struct msm_dump_table', 'num_entries',
virtual = False)
if num_entries is None or num_entries > 100:
print_out_str('Table {}: num_entries is bogus! Can\'t parse debug image'.format(name))
return None
table = self.MsmDumpTable()
table.name = name
table.phys_addr = table_phys
table.version = version
table.num_entries = num_entries
return table
def parse_dump_v2(self, ram_dump):
self.dump_type_lookup_table = ram_dump.gdbmi.get_enum_lookup_table(
'msm_dump_type', 2)
cpus = ram_dump.get_num_cpus()
# per cpu entries
for i in ram_dump.iter_cpus():
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_CPU_CTX + i] = 'MSM_DUMP_DATA_CPU_CTX'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_L1_INST_TLB + i] = 'MSM_DUMP_DATA_L1_INST_TLB'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_L1_DATA_TLB + i] = 'MSM_DUMP_DATA_L1_DATA_TLB'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_L1_INST_CACHE + i] = 'MSM_DUMP_DATA_L1_INST_CACHE'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_L1_DATA_CACHE + i] = 'MSM_DUMP_DATA_L1_DATA_CACHE'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_L2_CACHE + i] = 'MSM_DUMP_DATA_L2_CACHE'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_L3_CACHE + i] = 'MSM_DUMP_DATA_L3_CACHE'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_ETM_REG + i] = 'MSM_DUMP_DATA_ETM_REG'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_SCANDUMP_PER_CPU + i] = 'MSM_DUMP_DATA_SCANDUMP_PER_CPU'
for i in range(0, 4):
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_LLC_CACHE + i] = 'MSM_DUMP_DATA_LLC_CACHE'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_OCMEM] = 'MSM_DUMP_DATA_OCMEM'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_DBGUI_REG] = 'MSM_DUMP_DATA_DBGUI_REG'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_MISC] = 'MSM_DUMP_DATA_MISC'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_VSENSE] = 'MSM_DUMP_DATA_VSENSE'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_TMC_ETF] = 'MSM_DUMP_DATA_TMC_ETF'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_TMC_ETF_SWAO] = 'MSM_DUMP_DATA_TMC_ETF_SWAO'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_TMC_ETF_REG] = 'MSM_DUMP_DATA_TMC_ETF_REG'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_DCC_REG] = 'MSM_DUMP_DATA_DCC_REG'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_DCC_SRAM] = 'MSM_DUMP_DATA_DCC_SRAM'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_SCANDUMP] = 'MSM_DUMP_DATA_SCANDUMP'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_LLC_CACHE] = 'MSM_DUMP_DATA_LLC_CACHE'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_TMC_ETF_SWAO_REG] = 'MSM_DUMP_DATA_TMC_ETF_SWAO_REG'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_TMC_REG] = 'MSM_DUMP_DATA_TMC_REG'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_FCMDUMP] = 'MSM_DUMP_DATA_FCMDUMP'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_CPUSS] = 'MSM_DUMP_DATA_CPUSS'
# 0x100 - tmc-etr registers and 0x101 - for tmc-etf registers
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_TMC_ETR_REG + 1] = 'MSM_DUMP_DATA_TMC_ETR_REG'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_TMC_ETR1_REG] = 'MSM_DUMP_DATA_TMC_ETR1_REG'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_LOG_BUF] = 'MSM_DUMP_DATA_LOG_BUF'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_LOG_BUF_FIRST_IDX] = 'MSM_DUMP_DATA_LOG_BUF_FIRST_IDX'
self.dump_data_id_lookup_table[
client.MSM_DUMP_DATA_MHM] = 'MSM_DUMP_DATA_MHM'
for i in ram_dump.iter_cpus():
self.dump_data_id_lookup_table[client.MSM_DUMP_DATA_L2_TLB + i] = 'MSM_DUMP_DATA_L2_TLB'
if not ram_dump.minidump or (ram_dump.minidump and ram_dump.kernel_version > (5,10,0)):
dump_table_ptr_offset = ram_dump.field_offset(
'struct msm_memory_dump', 'table')
dump_table_version_offset = ram_dump.field_offset(
'struct msm_dump_table', 'version')
dump_table_num_entry_offset = ram_dump.field_offset(
'struct msm_dump_table', 'num_entries')
dump_table_entry_offset = ram_dump.field_offset(
'struct msm_dump_table', 'entries')
dump_entry_id_offset = ram_dump.field_offset(
'struct msm_dump_entry', 'id')
dump_entry_name_offset = ram_dump.field_offset(
'struct msm_dump_entry', 'name')
dump_entry_type_offset = ram_dump.field_offset(
'struct msm_dump_entry', 'type')
dump_entry_addr_offset = ram_dump.field_offset(
'struct msm_dump_entry', 'addr')
dump_data_version_offset = ram_dump.field_offset(
'struct msm_dump_data', 'version')
dump_data_magic_offset = ram_dump.field_offset(
'struct msm_dump_data', 'magic')
dump_data_name_offset = ram_dump.field_offset(
'struct msm_dump_data', 'name')
dump_data_addr_offset = ram_dump.field_offset(
'struct msm_dump_data', 'addr')
dump_data_len_offset = ram_dump.field_offset(
'struct msm_dump_data', 'len')
dump_data_reserved_offset = ram_dump.field_offset(
'struct msm_dump_data', 'reserved')
dump_entry_size = ram_dump.sizeof('struct msm_dump_entry')
dump_data_size = ram_dump.sizeof('struct msm_dump_data')
"""
Some multi-guest hypervisor systems override the imem location
with a table for a crashed guest. So the value from IMEM may
not match the value saved in the linux variable 'memdump'.
"""
if hasattr(ram_dump.board, 'imem_offset_memdump_table'):
imem_dump_table_offset = ram_dump.board.imem_offset_memdump_table
else:
imem_dump_table_offset = IMEM_OFFSET_MEM_DUMP_TABLE
if ram_dump.minidump and ram_dump.kernel_version >= (5, 10):
for a in ram_dump.ebi_files:
md_pattern = re.compile(r'md_shrdimem', re.IGNORECASE)
if re.search(md_pattern, a[3]):
table_phys = ram_dump.read_word(a[1] + 0x10, virtual=False)
break
else:
table_phys = ram_dump.read_word(
ram_dump.board.imem_start + imem_dump_table_offset,
virtual = False)
root_table = self.validateMsmDumpTable(ram_dump, "IMEM", table_phys)
if root_table is None:
table_phys = ram_dump.read_structure_field(
'memdump', 'struct msm_memory_dump', 'table_phys')
root_table = self.validateMsmDumpTable(ram_dump, "RAM", table_phys)
if root_table is None:
return
print_out_str('\nDebug image version: {0}.{1} Number of table entries {2}'.format(
root_table.version >> 20, root_table.version & 0xFFFFF, root_table.num_entries))
print_out_str('--------')
out_dir = ram_dump.outdir
sdi_dump_out = open(os.path.join(out_dir , 'sdi_dump_table.txt') , 'w')
sdi_dump_out.write("DumpTable base = 0x{0:02x} \n".format(root_table.phys_addr))
sdi_dump_out.write("DumpTable Src = {0}\n".format(root_table.name))
for i in range(0, root_table.num_entries):
this_entry = root_table.phys_addr + dump_table_entry_offset + \
i * dump_entry_size
entry_id = ram_dump.read_u32(this_entry + dump_entry_id_offset, virtual = False)
entry_type = ram_dump.read_u32(this_entry + dump_entry_type_offset, virtual = False)
entry_addr = ram_dump.read_word(this_entry + dump_entry_addr_offset, virtual = False)
if entry_type > len(self.dump_type_lookup_table):
print_out_str(
'!!! Invalid dump table entry type found {0:x}'.format(entry_type))
continue
table_version = ram_dump.read_u32(
entry_addr + dump_table_version_offset, False)
if table_version is None:
print_out_str('Dump table entry version is bogus! Can\'t parse debug image')
continue
table_num_entries = ram_dump.read_u32(
entry_addr + dump_table_num_entry_offset, False)
if table_num_entries is None or table_num_entries > MAX_NUM_ENTRIES:
print_out_str('Dump table entry num_entries is bogus! Can\'t parse debug image')
continue
print_out_str(
'Debug image version: {0}.{1} Entry type: {2} Number of entries: {3}'.format(
table_version >> 20, table_version & 0xFFFFF,
self.dump_type_lookup_table[entry_type], table_num_entries))
lst = self.sorted_dump_data_clients(
ram_dump, entry_addr + dump_table_entry_offset,
table_num_entries)
for (client_name, func, client_entry) in lst:
print_out_str('--------')
client_id = ram_dump.read_u32(
client_entry + dump_entry_id_offset, False)
client_type = ram_dump.read_u32(
client_entry + dump_entry_type_offset, False)
client_addr = ram_dump.read_word(
client_entry + dump_entry_addr_offset, False)
if client_type > len(self.dump_type_lookup_table):
print_out_str(
'!!! Invalid dump client type found {0:x}'.format(client_type))
continue
dump_data_magic = ram_dump.read_u32(
client_addr + dump_data_magic_offset, False)
dump_data_version = ram_dump.read_u32(
client_addr + dump_data_version_offset, False)
dump_data_name = ram_dump.read_cstring(
client_addr + dump_data_name_offset,
ram_dump.sizeof('((struct msm_dump_data *)0x0)->name'),
False)
dump_data_addr = ram_dump.read_dword(
client_addr + dump_data_addr_offset, False)
dump_data_len = ram_dump.read_dword(
client_addr + dump_data_len_offset, False)
if dump_data_magic is None:
print_out_str("!!! Address {0:x} is bogus! Can't parse!".format(
client_addr + dump_data_magic_offset))
continue
print_out_str('Parsing debug information for {0}. Version: {1} Magic: {2:x} Source: {3}'.format(
client_name, dump_data_version, dump_data_magic,
dump_data_name))
sdi_dump_out.write("Id = {0} type = {1} Addr = 0x{2:02x} "
"version {3} magic {4} DataAddr 0x{5:02x} DataLen {6} "
"Dataname {7} \n"
.format(client_id,client_type,client_addr,
dump_data_version,dump_data_magic,dump_data_addr,
dump_data_len,dump_data_name))
if dump_data_magic != MEMDUMPV2_MAGIC and dump_data_magic != MEMDUMPV_HYP_MAGIC:
print_out_str("!!! Magic {0:x} doesn't match! No context will be parsed".format(dump_data_magic))
continue
if "parse_cpu_ctx" in func:
getattr(DebugImage_v2, func)(
self, dump_data_version, dump_data_addr,
dump_data_addr + dump_data_len, client_id, ram_dump,dump_data_name=dump_data_name)
else:
getattr(DebugImage_v2, func)(
self, dump_data_version, dump_data_addr,
dump_data_addr + dump_data_len, client_id, ram_dump)
sdi_dump_out.close()
else:
dump_table_num_entry_offset = ram_dump.field_offset(
'struct md_table', 'num_regions')
dump_table_entry_offset = ram_dump.field_offset(
'struct md_table', 'entry')
dump_entry_name_offset = ram_dump.field_offset(
'struct md_region', 'name')
dump_entry_id_offset = ram_dump.field_offset(
'struct md_region', 'id')
dump_entry_va_offset = ram_dump.field_offset(
'struct md_region', 'virt_addr')
dump_entry_pa_offset = ram_dump.field_offset(
'struct md_region', 'phys_addr')
dump_entry_size_offset = ram_dump.field_offset(
'struct md_region', 'size')
dump_entry_size = ram_dump.sizeof('struct md_region')
mem_dump_data = ram_dump.address_of('minidump_table')
mem_dump_table = ram_dump.read_word(
mem_dump_data + dump_table_entry_offset)
mem_table_num_entry = ram_dump.read_u32(
mem_dump_data + dump_table_num_entry_offset)
print_out_str('--------')
for i in range(0, mem_table_num_entry):
this_entry = mem_dump_data + dump_table_entry_offset + \
i * dump_entry_size
entry_id = ram_dump.read_u32(this_entry + dump_entry_id_offset)
entry_va_addr = ram_dump.read_u64(this_entry + dump_entry_va_offset)
entry_pa_addr = ram_dump.read_u64(this_entry + dump_entry_pa_offset)
entry_size = ram_dump.read_u64(this_entry + dump_entry_size_offset)
end_addr = entry_pa_addr + entry_size
minidump_dump_table_value = dict(minidump_dump_table_type)
if entry_pa_addr in ram_dump.ebi_pa_name_map:
section_name = ram_dump.ebi_pa_name_map[entry_pa_addr]
section_name = re.sub("\d+", "", section_name)
#if section_name in minidump_dump_table_value.values():
lst = self.minidump_data_clients(
ram_dump, entry_id,entry_pa_addr,end_addr)
if lst:
client_name, client_id,func,\
client_entry,client_end = lst[0]
print_out_str('--------')
getattr(DebugImage_v2, func)(
self, 20, client_entry,
client_end, client_id, ram_dump)
if ram_dump.sysreg:
self.parse_sysreg(ram_dump)
self.qdss.dump_standard(ram_dump)
if not ram_dump.skip_qdss_bin and not ram_dump.minidump:
self.qdss.save_etf_bin(ram_dump)
self.qdss.save_etf_swao_bin(ram_dump)
self.qdss.save_etr_bin(ram_dump)
if ram_dump.ftrace_format:
self.collect_ftrace_format(ram_dump)

View File

@@ -0,0 +1,287 @@
# Copyright (c) 2014-2015, 2020-2021 The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import re
import string
import traceback
from parser_util import cleanupString
LOG_MAGIC = 0x5d7aefca
class DmesgLib(object):
def __init__(self, ramdump, outfile):
self.ramdump = ramdump
self.wrap_cnt = 0
self.outfile = outfile
if (self.ramdump.sizeof('struct printk_log') is None):
self.struct_name = 'struct log'
else:
self.struct_name = 'struct printk_log'
def log_from_idx(self, idx, logbuf):
len_offset = self.ramdump.field_offset(self.struct_name, 'len')
msg = logbuf + idx
msg_len = self.ramdump.read_u16(msg + len_offset)
if (msg_len == 0):
return logbuf
else:
return msg
def log_next(self, idx, logbuf):
len_offset = self.ramdump.field_offset(self.struct_name, 'len')
msg = idx
msg_len = self.ramdump.read_u16(msg + len_offset)
if (msg_len == 0):
self.wrap_cnt += 1
return logbuf
else:
return idx + msg_len
def verify_log_helper(self, msg, verbose):
# return early if CONFIG_LOG_BUF_MAGIC is not defined
log_align_addr = self.ramdump.address_of('__log_align')
if (log_align_addr is None):
return True
len_offset = self.ramdump.field_offset(self.struct_name, 'len')
text_offset = self.ramdump.field_offset(self.struct_name, 'text_len')
dict_offset = self.ramdump.field_offset(self.struct_name, 'dict_len')
magic_offset = self.ramdump.field_offset(self.struct_name, 'magic')
msg_len = self.ramdump.read_u16(msg + len_offset)
text_len = self.ramdump.read_u16(msg + text_offset)
dict_len = self.ramdump.read_u16(msg + dict_offset)
magic = self.ramdump.read_u32(msg + magic_offset)
log_size = self.ramdump.sizeof(self.struct_name)
log_align = self.ramdump.read_u32(log_align_addr)
is_logwrap_marker = not bool(text_len | msg_len | dict_len)
err = []
if (magic != LOG_MAGIC):
err.append('Bad Magic')
computed_msg_len = (text_len + dict_len + log_size + log_align - 1) & ~(log_align - 1)
if (not is_logwrap_marker and (msg_len != computed_msg_len)):
err.append('Bad length')
err = ' '.join(err)
if (err):
if (verbose):
f = '--------- Corrupted Dmesg {} for record @ {:x} ---------\n'.format(err, msg)
self.outfile.write(f)
f = self.ramdump.hexdump(msg - 0x40, 0xC0)
self.outfile.write(f)
return False
return True
def verify_log(self, msg, logbuf_addr, last_idx):
logbuf_size = self.ramdump.sizeof('__log_buf')
log_size = self.ramdump.sizeof(self.struct_name)
verbose = True
while msg != logbuf_addr + last_idx:
if (self.verify_log_helper(msg, verbose)):
return msg
verbose = False
msg = msg + 0x4
if (msg > logbuf_addr + logbuf_size - log_size):
msg = logbuf_addr
self.wrap_cnt += 1
return logbuf_addr + last_idx
def extract_dmesg_flat(self):
addr = self.ramdump.read_word(self.ramdump.address_of('log_buf'))
size = self.ramdump.read_word(self.ramdump.address_of('log_buf_len'))
dmesg = self.ramdump.read_physical(self.ramdump.virt_to_phys(addr), size)
self.outfile.write(cleanupString(dmesg.decode('ascii', 'ignore')) + '\n')
def extract_dmesg_binary(self):
first_idx_addr = self.ramdump.address_of('log_first_idx')
last_idx_addr = self.ramdump.address_of('log_next_idx')
logbuf_addr = self.ramdump.read_word(
self.ramdump.address_of('log_buf'))
time_offset = self.ramdump.field_offset(self.struct_name, 'ts_nsec')
len_offset = self.ramdump.field_offset(self.struct_name, 'len')
text_len_offset = self.ramdump.field_offset(self.struct_name, 'text_len')
log_size = self.ramdump.sizeof(self.struct_name)
first_idx = self.ramdump.read_u32(first_idx_addr)
if self.ramdump.is_config_defined('CONFIG_PRINTK_CALLER'):
callerid_off = self.ramdump.field_offset(self.struct_name, 'caller_id')
last_idx = self.ramdump.read_u32(last_idx_addr)
curr_idx = logbuf_addr + first_idx
while curr_idx != logbuf_addr + last_idx and self.wrap_cnt < 2:
timestamp = self.ramdump.read_dword(curr_idx + time_offset)
if self.ramdump.is_config_defined('CONFIG_PRINTK_CALLER'):
caller_data = self.ramdump.read_u32(curr_idx + callerid_off)
tid_info = "T"
if (caller_data & 0x80000000):
tid_info = "C"
caller_id_data = caller_data & ~0x80000000
caller_id_data = tid_info + str(caller_id_data)
text_len = self.ramdump.read_u16(curr_idx + text_len_offset)
text_str = self.ramdump.read_cstring(curr_idx + log_size, text_len)
if text_str is not None:
for partial in text_str.split('\n'):
if self.ramdump.is_config_defined('CONFIG_PRINTK_CALLER'):
f = '[{0:>5}.{1:0>6d}] [{caller_id_data:>6}] {2}\n'.format(
timestamp // 1000000000, (timestamp % 1000000000) // 1000, partial, caller_id_data=caller_id_data)
else:
f = '[{0:>5}.{1:0>6d}] {2}\n'.format(
timestamp // 1000000000, (timestamp % 1000000000) // 1000, partial)
self.outfile.write(f)
curr_idx = self.log_next(curr_idx, logbuf_addr)
curr_idx = self.verify_log(curr_idx, logbuf_addr, last_idx)
else:
self.outfile.write("[ Log wraps around ] at {0} \n".format(hex(curr_idx)))
curr_idx = logbuf_addr
self.wrap_cnt += 1
def extract_lockless_dmesg(self, write_to_file=True):
prb_addr = self.ramdump.read_pointer('prb')
off = self.ramdump.field_offset('struct printk_ringbuffer', 'desc_ring')
desc_ring_addr = prb_addr + off
off = self.ramdump.field_offset('struct prb_desc_ring', 'count_bits')
desc_ring_count = 1 << self.ramdump.read_u32(desc_ring_addr + off)
desc_sz = self.ramdump.sizeof('struct prb_desc')
off = self.ramdump.field_offset('struct prb_desc_ring', 'descs')
descs_addr = self.ramdump.read_ulong(desc_ring_addr + off)
info_sz = self.ramdump.sizeof('struct printk_info')
off = self.ramdump.field_offset('struct prb_desc_ring', 'infos')
infos_addr = self.ramdump.read_ulong(desc_ring_addr + off)
off = self.ramdump.field_offset(
'struct printk_ringbuffer', 'text_data_ring')
text_data_ring_addr = prb_addr + off
off = self.ramdump.field_offset('struct prb_data_ring', 'size_bits')
text_data_sz = 1 << self.ramdump.read_u32(text_data_ring_addr + off)
off = self.ramdump.field_offset('struct prb_data_ring', 'data')
data_addr = self.ramdump.read_ulong(text_data_ring_addr + off)
sv_off = self.ramdump.field_offset('struct prb_desc', 'state_var')
off = self.ramdump.field_offset('struct prb_desc','text_blk_lpos')
begin_off = off + self.ramdump.field_offset(
'struct prb_data_blk_lpos', 'begin')
next_off = off + self.ramdump.field_offset(
'struct prb_data_blk_lpos', 'next')
ts_off = self.ramdump.field_offset('struct printk_info', 'ts_nsec')
callerid_off = self.ramdump.field_offset('struct printk_info', 'caller_id')
len_off = self.ramdump.field_offset('struct printk_info', 'text_len')
desc_committed = 1
desc_finalized = 2
desc_sv_bits = self.ramdump.sizeof('long') * 8
desc_flags_shift = desc_sv_bits - 2
desc_flags_mask = 3 << desc_flags_shift
desc_id_mask = ~desc_flags_mask
off = self.ramdump.field_offset('struct prb_desc_ring','tail_id')
tail_id = self.ramdump.read_ulong(desc_ring_addr + off)
off = self.ramdump.field_offset('struct prb_desc_ring','head_id')
head_id = self.ramdump.read_ulong(desc_ring_addr + off)
did = tail_id
dmesg_list={}
while True:
ind = did % desc_ring_count
desc_off = desc_sz * ind
info_off = info_sz * ind
# skip non-committed record
state = 3 & (self.ramdump.read_ulong(descs_addr + desc_off +
sv_off) >> desc_flags_shift)
if state != desc_committed and state != desc_finalized:
if did == head_id:
break
did = (did + 1) & desc_id_mask
continue
begin = self.ramdump.read_ulong(descs_addr + desc_off +
begin_off) % text_data_sz
end = self.ramdump.read_ulong(descs_addr + desc_off +
next_off) % text_data_sz
if begin & 1 == 1:
text = ""
else:
if begin > end:
begin = 0
text_start = begin + self.ramdump.sizeof('long')
text_len = self.ramdump.read_u16(infos_addr +
info_off + len_off)
if end - text_start < text_len:
text_len = end - text_start
if text_len < 0:
text_len = 0
text = self.ramdump.read_cstring(data_addr +
text_start, text_len)
time_stamp = self.ramdump.read_u64(infos_addr +
info_off + ts_off)
caller_data = self.ramdump.read_u32(infos_addr +
info_off + callerid_off)
tid_info = "T"
if (caller_data & 0x80000000):
tid_info = "C"
caller_id_data = caller_data & ~0x80000000
pid = caller_id_data
caller_id_data = tid_info + str(caller_id_data)
for line in text.splitlines():
msg = u"[{time:12.6f}][{caller_id_data:>6}] {line}\n".format(
time=time_stamp / 1000000000.0,
line=line,caller_id_data=caller_id_data)
#print(msg, write_to_file)
if write_to_file:
self.outfile.write(msg)
else:
dmesg = []
dmesg.append(pid)
dmesg.append(line)
dmesg_list[time_stamp] = dmesg
if did == head_id:
break
did = (did + 1) & desc_id_mask
return dmesg_list
def extract_dmesg(self):
major, minor, patch = self.ramdump.kernel_version
if (major, minor) >= (5, 10):
return self.extract_lockless_dmesg()
if (major, minor) >= (3, 7):
self.extract_dmesg_binary()
return
self.extract_dmesg_flat()
def get_dmesg_as_dict(self):
major, minor, patch = self.ramdump.kernel_version
if (major, minor) >= (5, 10):
try:
return self.extract_lockless_dmesg(False)
except:
traceback.format_exc()
return {}

View File

@@ -0,0 +1 @@
../../../proprietary/ramdump-parser

View File

@@ -0,0 +1,31 @@
# Copyright (c) 2017, 2020 The Linux Foundation. All rights reserved.
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import struct
import os
from print_out import print_out_str
from ramparse import VERSION
class FCM_Dump():
def __init__(self, start, end):
self.start_addr = start
self.end_addr = end
def dump_fcm_img(self,ram_dump):
if self.start_addr >= self.end_addr:
return False
rsz = self.end_addr - self.start_addr
fcmfile = ram_dump.open_file('fcm.bin', mode='wb')
fcm_data= ram_dump.read_physical(self.start_addr, rsz)
fcmfile.write(fcm_data)
fcmfile.close()

View File

@@ -0,0 +1,464 @@
# Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import re
import subprocess
import module_table
from print_out import print_out_str
from tempfile import NamedTemporaryFile
GDB_SENTINEL = '(gdb) '
GDB_DATA_LINE = '~'
GDB_OOB_LINE = '^'
def gdb_hex_to_dec(val):
match = re.search('(0x[0-9a-fA-F]+)', val)
return int(match.group(1), 16)
class GdbSymbol(object):
def __init__(self, symbol, section, addr, offset=None):
self.symbol = symbol
self.section = section
self.addr = addr
if offset is not None:
self.offset = offset
class GdbMIResult(object):
def __init__(self, lines, oob_lines):
self.lines = lines
self.oob_lines = oob_lines
class GdbMIException(Exception):
def __init__(self, *args):
self.value = '\n *** '.join([str(i) for i in args])
def __str__(self):
return self.value
class GdbMI(object):
"""Interface to the ``gdbmi`` subprocess. This should generally be
used as a context manager (using Python's ``with`` statement),
like so::
>>> with GdbMI(gdb_path, elf) as g:
print('GDB Version: ' + g.version())
"""
def __init__(self, gdb_path, elf, kaslr_offset=0):
self.gdb_path = gdb_path
self.elf = elf
self.kaslr_offset = kaslr_offset
self._cache = {}
self._gdbmi = None
self.mod_table = None
self.gdbmi_aslr_offset = 0
def open(self):
"""Open the connection to the ``gdbmi`` backend. Not needed if using
``gdbmi`` as a context manager (recommended).
"""
if sys.platform.startswith("win"):
import ctypes
SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN
ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX);
subprocess_flags = 0x8000000 #win32con.CREATE_NO_WINDOW?
else:
subprocess_flags = 0
self._gdbmi = subprocess.Popen(
[self.gdb_path, '--interpreter=mi2', self.elf],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
creationflags=subprocess_flags
)
self._flush_gdbmi()
self._run('set max-value-size unlimited')
def close(self):
"""Close the connection to the ``gdbmi`` backend. Not needed if using
``gdbmi`` as a context manager (recommended).
"""
if not self._gdbmi:
return
cmd = 'quit'
self._run(cmd)
self._gdbmi.kill()
self._gdbmi = None
def __enter__(self):
self.open()
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
self.close()
def _flush_gdbmi(self):
while True:
line = self._gdbmi.stdout.readline().rstrip('\r\n')
if line == GDB_SENTINEL:
break
def setup_module_table(self, module_table):
self.mod_table = module_table
for mod in self.mod_table.module_table:
if not mod.get_sym_path():
continue
load_mod_sym_cmd = ['add-symbol-file', mod.get_sym_path().replace('\\', '\\\\')]
if ".text" not in mod.section_offsets.keys():
load_mod_sym_cmd += ['0x{:x}'.format(mod.module_offset - self.kaslr_offset)]
for segment, offset in mod.section_offsets.items():
load_mod_sym_cmd += ['-s', segment, '0x{:x}'.format(offset - self.kaslr_offset) ]
self._run(' '.join(load_mod_sym_cmd))
def _run(self, cmd, skip_cache=False, save_in_cache=True):
"""Runs a gdb command and returns a GdbMIResult of the result. Results
are cached (unless skip_cache=True) for quick future lookups.
- cmd: Command to run (e.g. "show version")
- skip_cache: Don't use a previously cached result
- save_in_cache: Whether we should save this result in the cache
"""
if self._gdbmi is None:
raise Exception(
'BUG: GdbMI not initialized. ' +
'Please use GdbMI.open or a context manager.')
if not skip_cache:
if cmd in self._cache:
return GdbMIResult(self._cache[cmd], [])
output = []
oob_output = []
try:
self._gdbmi.stdin.write(cmd.rstrip('\n') + '\n')
self._gdbmi.stdin.flush()
except Exception as err:
return GdbMIResult(output, oob_output)
while True:
line = self._gdbmi.stdout.readline()
"""
readline blocks, unless the pipe is closed on the other end, in which case it
returns an empty line, without trailing \n.
"""
if not len(line):
break
line = line.rstrip('\r\n')
if line == GDB_SENTINEL:
break
if line.startswith(GDB_DATA_LINE):
# strip the leading "~"
line = line[1:]
# strip the leading and trailing "
line = line[1:-1]
if line.startswith("\\n"):
continue
# strip any trailing (possibly escaped) newlines
if line.endswith('\\n'):
line = line[:-2]
elif line.endswith('\n'):
line = line.rstrip('\n')
output.append(line)
if line.startswith(GDB_OOB_LINE):
oob_output.append(line[1:])
if save_in_cache:
self._cache[cmd] = output
return GdbMIResult(output, oob_output)
def _run_for_one(self, cmd):
result = self._run(cmd)
if len(result.lines) != 1:
raise GdbMIException(
cmd, '\n'.join(result.lines + result.oob_lines))
return result.lines[0]
def _run_for_first(self, cmd):
return self._run(cmd).lines[0]
def _run_for_multi(self, cmd):
result = self._run(cmd)
return result.lines
def version(self):
"""Return GDB version"""
return self._run_for_first('show version')
def set_gdbmi_aslr_offset(self):
"""set gdb aslr offset"""
try:
lines = self._run('maintenance info sections').lines
for line in lines:
if re.search(".head.text ALLOC", line):
text_addr = int(self._run_for_one('print /x &_text').split(' ')[-1], 16)
if len(line.split("->")[0]) > 1 :
head_text_addr = int(line.split("->")[0].split() [-1], 16)
else:
head_text_addr = int(line.split("->")[0], 16)
aslr_offset = head_text_addr - text_addr
if aslr_offset != 0:
self.gdbmi_aslr_offset = aslr_offset
print_out_str("gdbmi_aslr_offset : 0x{0:x}".format(self.gdbmi_aslr_offset))
break
except Exception as err:
print (err)
self.gdbmi_aslr_offset = 0
def setup_aarch(self,type):
self.aarch_set = True
cmd = 'set architecture ' + type
result = self._run_for_one(cmd)
return
def getStructureData(self, the_type):
cmd = 'ptype /o {0}'.format(the_type)
result = self._run_for_multi(cmd)
return result
def frame_field_offset(self, frame_name, the_type, field):
"""Returns the offset of a field in a struct or type of selected frame
if there are two vairable with same na,e in source code.
"""
cmd = 'frame 0 {0}'.format(frame_name)
self._run_for_one(cmd)
cmd = 'print /x (int)&(({0} *)0)->{1}'.format(the_type, field)
result = self._run_for_one(cmd)
return gdb_hex_to_dec(result)
def type_of(self, symbol):
""" Returns the type of symbol.
Example:
>>> gdbmi.type_of("kgsl_driver")
struct kgsl_driver
"""
cmd = 'print &{0}'.format(symbol)
result = self._run_for_one(cmd)
return result.split("*)")[0].split("= (")[1]
def print_type(self, type_or_var):
cmd = 'ptype {0}'.format(type_or_var)
result = self._run(cmd)
result = '\n'.join(result.lines)
if len(result) > 0:
ptype = result.split("=")[1].strip()
return ptype
return None
def field_offset(self, the_type, field):
"""Returns the offset of a field in a struct or type.
Example:
>>> gdbmi.field_offset("struct ion_buffer", "heap")
20
``the_type``
struct or type (note that if it's a struct you should
include the word ``"struct"`` (e.g.: ``"struct
ion_buffer"``))
``field``
the field whose offset we want to return
"""
cmd = 'print /x (int)&(({0} *)0)->{1}'.format(the_type, field)
result = self._run_for_one(cmd)
return gdb_hex_to_dec(result)
def container_of(self, ptr, the_type, member):
"""Like ``container_of`` from the kernel."""
return ptr - self.field_offset(the_type, member)
def sibling_field_addr(self, ptr, parent_type, member, sibling):
"""Returns the address of a sibling field within the parent
structure.
Example:
Given a dump containing an instance of the following struct::
struct pizza {
int price;
int qty;
};
If you have a pointer to qty, you can get a pointer to price with:
>>> addr = sibling_field_addr(qty, 'struct pizza', 'qty', 'price')
>>> price = dump.read_int(addr)
>>> price
10
"""
return self.container_of(ptr, parent_type, member) + \
self.field_offset(parent_type, sibling)
def sizeof(self, the_type):
"""Returns the size of the type specified by ``the_type``."""
result = self._run_for_one('print /x sizeof({0})'.format(the_type))
return gdb_hex_to_dec(result)
def address_of(self, symbol):
"""Returns the address of the specified symbol.
>>> hex(dump.address_of('linux_banner'))
'0xc0b0006a'
"""
result = self._run_for_one('print /x &{0}'.format(symbol))
addr = int(result.split(' ')[-1], 16) + self.kaslr_offset + self.gdbmi_aslr_offset
if (addr >> 64):
return addr - self.gdbmi_aslr_offset
else:
return addr
def get_symbol_info(self, address):
"""Returns a GdbSymbol representing the nearest symbol found at
``address``."""
result = self._run_for_one('info symbol ' + hex(address))
parts = result.split(' ')
if len(parts) < 2:
raise GdbMIException('Output looks bogus...', result)
symbol = parts[0]
section = parts[-1]
try:
offset = int(parts[1] + parts[2])
except ValueError:
offset = 0
return GdbSymbol(symbol, section, address, offset)
def symbol_at(self, address):
"""Get the symbol at the given address (using ``get_symbol_info``)"""
return self.get_symbol_info(address).symbol
def get_enum_name(self, enum, val):
result = self._run_for_first('print ((enum {0}){1})'.format(enum, val))
parts = result.split(' ')
if len(parts) < 3:
raise GdbMIException(
"can't parse enum {0} {1}\n".format(enum, val), result)
return parts[2].rstrip()
def get_enum_lookup_table(self, enum, upperbound):
"""Return a table translating enum values to human readable
strings.
>>> dump.gdbmi.get_enum_lookup_table('ion_heap_type', 10)
['ION_HEAP_TYPE_SYSTEM',
'ION_HEAP_TYPE_SYSTEM_CONTIG',
'ION_HEAP_TYPE_CARVEOUT',
'ION_HEAP_TYPE_CHUNK',
'ION_HEAP_TYPE_CUSTOM',
'ION_NUM_HEAPS',
'6',
'7',
'8',
'9']
"""
table = []
for i in range(0, upperbound):
result = self._run_for_first(
'print ((enum {0}){1})'.format(enum, i))
parts = result.split(' ')
if len(parts) < 3:
raise GdbMIException(
"can't parse enum {0} {1}\n".format(enum, i), result)
table.append(parts[2].rstrip())
return table
def get_func_info(self, address):
"""Returns the function info at a particular address, specifically
line and file.
>>> dump.gdbmi.get_func_info(dump.gdbmi.address_of('panic'))
'Line 78 of \\"kernel/kernel/panic.c\\"'
"""
address = address - self.kaslr_offset
result = self._run_for_one('info line *0x{0:x}'.format(address))
m = re.search(r'(Line \d+ of \\?\".*\\?\")', result)
if m is not None:
return m.group(0)
else:
return '(unknown info for address 0x{0:x})'.format(address)
def get_value_of(self, symbol):
"""Returns the value of a symbol (in decimal)"""
result = self._run_for_one('print /d {0}'.format(symbol))
return int(result.split(' ')[-1], 10)
def get_value_of_string(self, symbol):
"""Returns the value of a symbol (as a string)"""
self._run("set print elements 0")
cmd = 'print /s {0}'.format(symbol)
result = self._run(cmd)
if len(result.lines) == 0:
raise GdbMIException(
cmd, '\n'.join(result.lines + result.oob_lines))
match = re.search(r'^[$]\d+ = \\"(.*)(\\\\n\\")', result.lines[0])
match_1 = re.search(r'^[$]\d+ = 0x[0-9a-fA-F]+ .* \\"(.*)(\\\\n\\")', result.lines[0])
match_2 = re.search(r'^[$]\d+ = 0x[0-9a-fA-F]+ \\"(.*)(\\\\n\\")', result.lines[0])
if match:
return match.group(1).replace('\\\\n\\"',"")
elif match_1:
return match_1.group(1)
elif match_2:
return match_2.group(1).replace('\\\\n\\"', "")
elif result.lines[0] != None:
return result.lines[0]
else:
return None
def read_memory(self, start, end):
"""Reads memory from within elf (e.g. const data). start and end should be kaslr-offset values"""
tmpfile = NamedTemporaryFile(mode='rb')
self._run("dump binary memory {} {}-{} {}-{}".format(tmpfile.name, start, self.kaslr_offset, end, self.kaslr_offset))
return tmpfile.read()
def read_elf_memory(self, start, end, temp_file):
self._run("dump binary memory {} {} {}".format(temp_file.name, start, end))
return temp_file.read()
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: gdbmi.py gdb_path elf')
sys.exit(1)
gdb_path, elf = sys.argv[1:]
with GdbMI(gdb_path, elf) as g:
print('GDB Version: ' + g.version())
print('ion_buffer.heap offset: ' + str(g.field_offset('struct ion_buffer', 'heap')))
print('atomic_t.counter offset: ' + str(g.field_offset('atomic_t', 'counter')))
print('sizeof(struct ion_buffer): ' + str(g.sizeof('struct ion_buffer')))
addr = g.address_of('kernel_config_data')
print('address of kernel_config_data: ' + hex(addr))
symbol = g.get_symbol_info(addr)
print('symbol at ' + hex(addr) + ' : ' + symbol.symbol + \
' which is in section ' + symbol.section)

View File

@@ -0,0 +1,437 @@
# Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import rb_tree
import linux_list as llist
from mm import phys_to_virt
from print_out import print_out_str
ARM_SMMU_DOMAIN = 0
MSM_SMMU_DOMAIN = 1
MSM_SMMU_AARCH64_DOMAIN = 2
class Domain(object):
def __init__(self, pg_table, redirect, ctx_list, client_name,
domain_type=MSM_SMMU_DOMAIN, level=3, domain_num=-1):
self.domain_num = domain_num
self.pg_table = pg_table
self.redirect = redirect
self.ctx_list = ctx_list
self.client_name = client_name
self.level = level
self.domain_type = domain_type
def __repr__(self):
return "#%d: %s" % (self.domain_num, self.client_name)
class IommuLib(object):
def __init__(self, ramdump):
self.ramdump = ramdump
self.domain_list = []
self.arm_smmu_v12 = False
try:
if self.find_iommu_domains_msm_iommu():
pass
elif self.find_iommu_domains_debug_attachments():
pass
elif self.find_iommu_domains_device_core():
pass
else:
print_out_str("Unable to find any iommu domains")
except:
if self.ramdump.arm_smmu_v12:
self.arm_smmu_v12 = True
self.find_iommu_domains_device_core()
"""
legacy code - pre-8996/kernel 4.4?
"""
def find_iommu_domains_msm_iommu(self):
domains = list()
root = self.ramdump.read_word('domain_root')
if root is None:
return False
rb_walker = rb_tree.RbTreeWalker(self.ramdump)
rb_walker.walk(root, self._iommu_domain_func, self.domain_list)
return True
def use_only_iommu_debug_attachments(self, debug_attachment):
has_pgtbl_info = self.ramdump.read_structure_field(debug_attachment,
'struct iommu_debug_attachment', 'fmt')
has_client_name = self.ramdump.read_structure_field(debug_attachment,
'struct iommu_debug_attachment', 'client_name')
if has_pgtbl_info and has_client_name:
return True;
return False
def find_iommu_domains_legacy(self, debug_attachment):
domain_ptr = self.ramdump.read_structure_field( debug_attachment,
'struct iommu_debug_attachment', 'domain')
if not domain_ptr:
return
ptr = self.ramdump.read_structure_field(
debug_attachment, 'struct iommu_debug_attachment', 'group')
if ptr is not None:
dev_list = ptr + self.ramdump.field_offset(
'struct iommu_group', 'devices')
dev = self.ramdump.read_structure_field(
dev_list, 'struct list_head', 'next')
if self.ramdump.kernel_version >= (4, 14):
client_name = self.ramdump.read_structure_cstring(
dev, 'struct group_device', 'name')
else:
client_name = self.ramdump.read_structure_cstring(
dev, 'struct iommu_device', 'name')
else:
"""Older kernel versions have the field 'dev'
instead of 'iommu_group'.
"""
ptr = self.ramdump.read_structure_field(
debug_attachment, 'struct iommu_debug_attachment', 'dev')
kobj_ptr = ptr + self.ramdump.field_offset('struct device', 'kobj')
client_name = self.ramdump.read_structure_cstring(
kobj_ptr, 'struct kobject', 'name')
has_pgtbl_info = self.ramdump.read_structure_field(debug_attachment,\
'struct iommu_debug_attachment', 'fmt') is not None
if self.ramdump.kernel_version >= (5, 4, 0) and has_pgtbl_info:
self._find_iommu_domains_debug_attachments(debug_attachment,\
client_name, self.domain_list)
else:
self._find_iommu_domains_arm_smmu(domain_ptr, client_name,\
self.domain_list)
def find_iommu_domains(self, debug_attachment):
client_name = self.ramdump.read_structure_cstring(debug_attachment,
'struct iommu_debug_attachment', 'client_name')
self._find_iommu_domains_debug_attachments(debug_attachment,
client_name,
self.domain_list)
"""
depends on CONFIG_IOMMU_DEBUG_TRACKING
"""
def find_iommu_domains_debug_attachments(self):
list_head_attachments = self.ramdump.address_of(
'iommu_debug_attachments')
if list_head_attachments is None:
return False
offset = self.ramdump.field_offset('struct iommu_debug_attachment',
'list')
list_walker = llist.ListWalker(self.ramdump, list_head_attachments, offset)
for debug_attachment in list_walker:
if self.use_only_iommu_debug_attachments(debug_attachment):
self.find_iommu_domains(debug_attachment)
else:
self.find_iommu_domains_legacy(debug_attachment)
return True
"""
will generate domains using only the information stored in the debug
attachments structure.
"""
def _find_iommu_domains_debug_attachments(self, debug_attachment,\
client_name, domain_list):
levels = self.ramdump.read_structure_field(debug_attachment,\
'struct iommu_debug_attachment', 'levels')
pg_table = self.ramdump.read_structure_field(debug_attachment,\
'struct iommu_debug_attachment', 'ttbr0')
domain = Domain(pg_table, 0, [], client_name, ARM_SMMU_DOMAIN,
levels)
domain_list.append(domain)
"""
will only find active iommu domains. This means it will exclude most gpu domains.
"""
def find_iommu_domains_device_core(self):
domains = set()
devices_kset = self.ramdump.read_pointer('devices_kset')
if not devices_kset:
return False
list_head = devices_kset + self.ramdump.field_offset('struct kset',
'list')
offset = self.ramdump.field_offset('struct device', 'kobj.entry')
list_walker = llist.ListWalker(self.ramdump, list_head, offset)
for dev in list_walker:
iommu_group = self.ramdump.read_structure_field(dev, 'struct device', 'iommu_group')
if not iommu_group:
continue
domain_ptr = self.ramdump.read_structure_field(iommu_group, 'struct iommu_group', 'domain')
if not domain_ptr:
continue
if domain_ptr in domains:
continue
domains.add(domain_ptr)
client_name_addr = self.ramdump.read_structure_field(dev, 'struct device', 'kobj.name')
client_name = self.ramdump.read_cstring(client_name_addr)
if self.arm_smmu_v12:
self._find_iommu_domains_arm_smmu_v12(domain_ptr, client_name, self.domain_list)
else:
self._find_iommu_domains_arm_smmu(domain_ptr, client_name, self.domain_list)
return True
def _find_iommu_domains_arm_smmu_v12(self, domain_ptr, client_name, domain_list):
if self.ramdump.field_offset('struct iommu_domain', 'priv') \
is not None:
priv_ptr = self.ramdump.read_structure_field(
domain_ptr, 'struct iommu_domain', 'priv')
if not priv_ptr:
return
else:
priv_ptr = None
arm_smmu_ops_data = self.ramdump.address_of('arm_smmu_ops')
smmu_iommu_ops_offset = self.ramdump.field_offset('struct iommu_ops','default_domain_ops')
arm_smmu_ops = arm_smmu_ops_data + smmu_iommu_ops_offset
iommu_domain_ops = self.ramdump.read_structure_field(
domain_ptr, 'struct iommu_domain', 'ops')
if iommu_domain_ops is None or iommu_domain_ops == 0:
return
if priv_ptr is not None:
arm_smmu_domain_ptr = priv_ptr
else:
arm_smmu_domain_offset = 0x88 #0x60
arm_smmu_domain_ptr = domain_ptr - arm_smmu_domain_offset
pgtbl_ops_ptr = self.ramdump.read_u64(arm_smmu_domain_ptr + 0x8)
if pgtbl_ops_ptr is None or pgtbl_ops_ptr == 0:
return
level = 0
fn = self.ramdump.read_structure_field(pgtbl_ops_ptr,
'struct io_pgtable_ops', 'map')
if fn == self.ramdump.address_of('av8l_fast_map'):
level = 3
else:
arm_lpae_io_pgtable_ptr = self.ramdump.container_of(
pgtbl_ops_ptr, 'struct arm_lpae_io_pgtable', 'iop.ops')
level = self.ramdump.read_structure_field(
arm_lpae_io_pgtable_ptr, 'struct arm_lpae_io_pgtable',
'levels')
io_pgtable_ptr = self.ramdump.container_of(pgtbl_ops_ptr , 'struct io_pgtable', 'ops')
pg_table = self.ramdump.read_structure_field(io_pgtable_ptr, 'struct io_pgtable','cfg.arm_lpae_s1_cfg.ttbr')
pg_table = phys_to_virt(self.ramdump, pg_table)
domain_create = Domain(pg_table, 0, [], client_name,
ARM_SMMU_DOMAIN, level)
domain_list.append(domain_create)
def _find_iommu_domains_arm_smmu(self, domain_ptr, client_name, domain_list):
if self.ramdump.field_offset('struct iommu_domain', 'priv') \
is not None:
priv_ptr = self.ramdump.read_structure_field(
domain_ptr, 'struct iommu_domain', 'priv')
if not priv_ptr:
return
else:
priv_ptr = None
if self.ramdump.kernel_version >= (5, 4, 0):
smmu_iommu_ops_offset = self.ramdump.field_offset('struct msm_iommu_ops','iommu_ops')
arm_smmu_ops_data = self.ramdump.address_of('arm_smmu_ops')
arm_smmu_ops = arm_smmu_ops_data + smmu_iommu_ops_offset
else:
arm_smmu_ops = self.ramdump.address_of('arm_smmu_ops')
iommu_domain_ops = self.ramdump.read_structure_field(
domain_ptr, 'struct iommu_domain', 'ops')
if iommu_domain_ops is None or iommu_domain_ops == 0:
return
if iommu_domain_ops == arm_smmu_ops:
if priv_ptr is not None:
arm_smmu_domain_ptr = priv_ptr
elif self.ramdump.kernel_version >= (5, 4, 0):
arm_smmu_domain_ptr_wrapper = self.ramdump.container_of(
domain_ptr, 'struct msm_iommu_domain', 'iommu_domain')
arm_smmu_domain_ptr = self.ramdump.container_of(
arm_smmu_domain_ptr_wrapper, 'struct arm_smmu_domain', 'domain')
else:
arm_smmu_domain_ptr = self.ramdump.container_of(
domain_ptr, 'struct arm_smmu_domain', 'domain')
pgtbl_ops_ptr = self.ramdump.read_structure_field(
arm_smmu_domain_ptr, 'struct arm_smmu_domain', 'pgtbl_ops')
if pgtbl_ops_ptr is None or pgtbl_ops_ptr == 0:
return
level = 0
fn = self.ramdump.read_structure_field(pgtbl_ops_ptr,
'struct io_pgtable_ops', 'map')
if fn == self.ramdump.address_of('av8l_fast_map'):
level = 3
else:
arm_lpae_io_pgtable_ptr = self.ramdump.container_of(
pgtbl_ops_ptr, 'struct arm_lpae_io_pgtable', 'iop.ops')
level = self.ramdump.read_structure_field(
arm_lpae_io_pgtable_ptr, 'struct arm_lpae_io_pgtable',
'levels')
if self.ramdump.kernel_version >= (5, 4, 0):
pgtbl_info_offset = self.ramdump.field_offset('struct arm_smmu_domain','pgtbl_info')
pgtbl_info_data = arm_smmu_domain_ptr + pgtbl_info_offset
pg_table = self.ramdump.read_structure_field(pgtbl_info_data,'struct msm_io_pgtable_info','pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0]')
else:
pg_table = self.ramdump.read_structure_field(
arm_smmu_domain_ptr, 'struct arm_smmu_domain',
'pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0]')
pg_table = phys_to_virt(self.ramdump, pg_table)
domain_create = Domain(pg_table, 0, [], client_name,
ARM_SMMU_DOMAIN, level)
domain_list.append(domain_create)
else:
priv_pt_offset = self.ramdump.field_offset('struct msm_iommu_priv',
'pt')
pgtable_offset = self.ramdump.field_offset('struct msm_iommu_pt',
'fl_table')
redirect_offset = self.ramdump.field_offset('struct msm_iommu_pt',
'redirect')
if priv_pt_offset is not None:
pg_table = self.ramdump.read_u64(
priv_ptr + priv_pt_offset + pgtable_offset)
redirect = self.ramdump.read_u64(
priv_ptr + priv_pt_offset + redirect_offset)
if (self.ramdump.is_config_defined('CONFIG_IOMMU_AARCH64')):
domain_create = Domain(pg_table, redirect, [], client_name,
MSM_SMMU_AARCH64_DOMAIN)
else:
domain_create = Domain(pg_table, redirect, [], client_name,
MSM_SMMU_DOMAIN)
domain_list.append(domain_create)
def _iommu_list_func(self, node, ctx_list):
ctx_drvdata_name_ptr = self.ramdump.read_word(
node + self.ramdump.field_offset('struct msm_iommu_ctx_drvdata',
'name'))
ctxdrvdata_num_offset = self.ramdump.field_offset(
'struct msm_iommu_ctx_drvdata', 'num')
num = self.ramdump.read_u32(node + ctxdrvdata_num_offset)
if ctx_drvdata_name_ptr != 0:
name = self.ramdump.read_cstring(ctx_drvdata_name_ptr, 100)
ctx_list.append((num, name))
def _iommu_domain_func(self, node, domain_list):
domain_num = self.ramdump.read_u32(self.ramdump.sibling_field_addr(
node, 'struct msm_iova_data', 'node', 'domain_num'))
domain = self.ramdump.read_word(self.ramdump.sibling_field_addr(
node, 'struct msm_iova_data', 'node', 'domain'))
priv_ptr = self.ramdump.read_word(
domain + self.ramdump.field_offset('struct iommu_domain', 'priv'))
client_name_offset = self.ramdump.field_offset(
'struct msm_iommu_priv', 'client_name')
if client_name_offset is not None:
client_name_ptr = self.ramdump.read_word(
priv_ptr + self.ramdump.field_offset(
'struct msm_iommu_priv', 'client_name'))
if client_name_ptr != 0:
client_name = self.ramdump.read_cstring(client_name_ptr, 100)
else:
client_name = '(null)'
else:
client_name = 'unknown'
list_attached_offset = self.ramdump.field_offset(
'struct msm_iommu_priv', 'list_attached')
if list_attached_offset is not None:
list_attached = self.ramdump.read_word(priv_ptr +
list_attached_offset)
else:
list_attached = None
priv_pt_offset = self.ramdump.field_offset('struct msm_iommu_priv',
'pt')
pgtable_offset = self.ramdump.field_offset('struct msm_iommu_pt',
'fl_table')
redirect_offset = self.ramdump.field_offset('struct msm_iommu_pt',
'redirect')
if priv_pt_offset is not None:
pg_table = self.ramdump.read_word(
priv_ptr + priv_pt_offset + pgtable_offset)
redirect = self.ramdump.read_u32(
priv_ptr + priv_pt_offset + redirect_offset)
else:
# On some builds we are unable to look up the offsets so hardcode
# the offsets.
pg_table = self.ramdump.read_word(priv_ptr + 0)
redirect = self.ramdump.read_u32(priv_ptr +
self.ramdump.sizeof('void *'))
# Note: On some code bases we don't have this pg_table and redirect
# in the priv structure (see msm_iommu_sec.c). It only contains
# list_attached. If this is the case we can detect that by checking
# whether pg_table == redirect (prev == next pointers of the
# attached list).
if pg_table == redirect:
# This is a secure domain. We don't have access to the page
# tables.
pg_table = 0
redirect = None
ctx_list = []
if list_attached is not None and list_attached != 0:
list_walker = llist.ListWalker(
self.ramdump, list_attached,
self.ramdump.field_offset('struct msm_iommu_ctx_drvdata',
'attached_elm'))
list_walker.walk(list_attached, self._iommu_list_func, ctx_list)
if (self.ramdump.is_config_defined('CONFIG_IOMMU_AARCH64')):
domain_create = Domain(pg_table, redirect, ctx_list, client_name,
MSM_SMMU_AARCH64_DOMAIN, domain_num=domain_num)
else:
domain_create = Domain(pg_table, redirect, ctx_list, client_name,
MSM_SMMU_DOMAIN, domain_num=domain_num)
domain_list.append(domain_create)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,591 @@
# Copyright (c) 2018, 2020 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# Cachedump parser
import os
import sys
import getopt
from struct import unpack
from math import log10, ceil, log
import ast
#-------------------------------------------------------------------------------
# Generic data structure to hold a single cache-line(can be tag or data)
# Position of the cacheline is indicated by 'set' and 'way'
# Holds an array of 32bit(4Byte) elements
#-------------------------------------------------------------------------------
class CacheLine():
def __init__(self):
self.set = -1
self.way = -1
self.elements = [] # array of unsigned integers(4B)
def setSet(self, s):
self.set = s
def getSet(self):
return self.set
def setWay(self, w):
self.way = w
def getWay(self):
return self.way
def addElement(self, e):
self.elements.append(e)
def getNumElements(self):
return len(self.elements)
def numElements(self):
return self.getNumElements()
def getElement(self, i, j = None):
if j is None:
return self.elements[i]
else:
return self.elements[i] | (self.elements[j] << 32)
def getElementStr(self, i):
return '{:>08X}'.format(self.getElement(i))
def getElements(self):
return self.elements
def setElements(self, e):
self.elements = e
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join([self.getElementStr(i) for i in range(self.getNumElements())])
# Generic data structure to hold an array of cacheLines
class CacheRam():
def __init__(self, numWays, numSets, numOffsets, cacheLines, cls):
self.sets = numSets
self.ways = numWays
self.offsets = numOffsets
self.lines = cacheLines
self.cls = cls
def morph(self):
arr = []
for i in self.lines:
arr.append(self.cls(i))
del(self.lines[:])
self.lines = arr
return self
def __str__(self):
s = ''
for l in self.lines:
s = s + str(l) + '\n'
return s
class Attribute():
def __init__(self, name, offset, width, val=None, h=None):
self.name = name
self.offset = offset%32 # in bits
self.width = width # no. of bits
if val != None:
self.val = self.getFromWord(val)
else:
self.val = None
self.hex = h
def getMask(self):
mask = 0
for i in range(0, self.width):
mask = mask + 2**i
return mask
def getBitMask(self):
return self.getMask() << self.offset
def getFromWord(self, w):
return (w >> self.offset) & self.getMask()
def dispWidth(self):
if self.hex != None:
width = ceil(log(self.getMask(),16))
else:
width = ceil(log10(self.getMask()))
return int(width)
def __str__(self):
s = ''
width = self.dispWidth()
if self.hex != None:
s = '{:>0{width}X}'.format(self.val, width=width)
else:
s = '{:>{width}}'.format(self.val, width=width)
return s
def getAttrStr(line):
s = ''
for attr in line.attrArr:
s = s + str(attr.val) + ' '
return s
def getHeaderStr(line):
try:
return 'Set Way ' + ' '.join(a.name + ('(0x)' if a.hex else '') for a in line.attrArr) + '\n'
except AttributeError:
return 'Set Way ' + ' '.join('{:>8X}'.format(i*4) for i in range(0,len(line.elements))) + '\n'
#-------------------------------------------------------------------------------
# Kryo-3 Silver cache descriptor data structures
#-------------------------------------------------------------------------------
class Kryo3SilverL1DCacheTagLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('OuterAllocationHint', 0, 1, cl.getElement(0)))
self.attrArr.append(Attribute('Age', 1, 2, cl.getElement(0)))
self.attrArr.append(Attribute('Shareability', 3, 1, cl.getElement(0)))
self.attrArr.append(Attribute('Dirty', 4, 1, cl.getElement(0)))
self.attrArr.append(Attribute('TagAddr', 1, 28, cl.getElement(1), 1))
self.attrArr.append(Attribute('NS', 29, 1, cl.getElement(1)))
self.attrArr.append(Attribute('MESI', 30, 2, cl.getElement(1)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo3SilverL1ICacheTagLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('TagAddr', 0, 28, cl.getElement(0), 1))
self.attrArr.append(Attribute('NS', 28, 1, cl.getElement(0)))
self.attrArr.append(Attribute('ValidAndSetMode', 29, 2, cl.getElement(0)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo3SilverL2TLBTagLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('Valid', 0, 1, cl.getElement(0), 1))
self.attrArr.append(Attribute('NS', 1, 1, cl.getElement(0)))
self.attrArr.append(Attribute('ASID', 2, 16, cl.getElement(0), 1))
self.attrArr.append(Attribute('VMID', 18, 16, cl.getElement(0), 1))
self.attrArr.append(Attribute('size', 34, 3, cl.getElement(1)))
self.attrArr.append(Attribute('nG', 37, 1, cl.getElement(1)))
self.attrArr.append(Attribute('APHyp', 38, 3, cl.getElement(1)))
self.attrArr.append(Attribute('S2AP', 41, 2, cl.getElement(1)))
self.attrArr.append(Attribute('Dom', 43, 4, cl.getElement(1)))
self.attrArr.append(Attribute('S1Size', 47, 3, cl.getElement(1)))
self.attrArr.append(Attribute('AddrSignBit', 50, 1, cl.getElement(1)))
self.attrArr.append(Attribute('VA', 51, 28, (cl.getElement(2)<<32)|cl.getElement(1), 1))
self.attrArr.append(Attribute('DBM', 79, 1, cl.getElement(2)))
self.attrArr.append(Attribute('Parity', 80, 2, cl.getElement(2)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo3SilverL2TLBDataLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('XS1Usr', 0, 1, cl.getElement(0)))
self.attrArr.append(Attribute('XS1NonUsr', 1, 1, cl.getElement(0)))
self.attrArr.append(Attribute('XS2Usr', 2, 1, cl.getElement(0)))
self.attrArr.append(Attribute('XS2NonUsr', 3, 1, cl.getElement(0)))
self.attrArr.append(Attribute('MemTypeAndShareability', 4, 8, cl.getElement(0), 1))
self.attrArr.append(Attribute('S2Level', 12, 2, cl.getElement(0)))
self.attrArr.append(Attribute('NS', 14, 1, cl.getElement(0)))
self.attrArr.append(Attribute('PA', 15, 28, (cl.getElement(1)<<32)|cl.getElement(0), 1))
self.attrArr.append(Attribute('Parity', 43, 1, cl.getElement(1)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
#-------------------------------------------------------------------------------
# Kryo-3 Gold cache descriptor data structures
#-------------------------------------------------------------------------------
class Kryo3GoldL1DCacheTagLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('PBHA', 0, 2, cl.getElement(0)))
self.attrArr.append(Attribute('LineState', 2, 2, cl.getElement(0)))
self.attrArr.append(Attribute('MemAttr', 4, 3, cl.getElement(0)))
self.attrArr.append(Attribute('RRIP', 7, 3, cl.getElement(0)))
self.attrArr.append(Attribute('TagAddr', 10, 32, (cl.getElement(1)<<32)|cl.getElement(0), 1))
self.attrArr.append(Attribute('NS', 10, 1, cl.getElement(1)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo3GoldL1ICacheTagLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('TagAddr', 0, 28, cl.getElement(0), 1))
self.attrArr.append(Attribute('NS', 28, 1, cl.getElement(0)))
self.attrArr.append(Attribute('Valid', 29, 1, cl.getElement(0)))
self.attrArr.append(Attribute('Reg1', 0, 32, cl.getElement(1), 1))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo3GoldL2TLBTagLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('S1Level', 0, 2, cl.getElement(0)))
self.attrArr.append(Attribute('S1TrMode', 2, 2, cl.getElement(0)))
self.attrArr.append(Attribute('TrRegime', 4, 2, cl.getElement(0)))
self.attrArr.append(Attribute('PageSize', 6, 2, cl.getElement(0)))
self.attrArr.append(Attribute('nG', 9, 1, cl.getElement(0)))
self.attrArr.append(Attribute('ASID', 10, 8, cl.getElement(0), 1))
self.attrArr.append(Attribute('ASID', 18, 4, cl.getElement(0), 1))
self.attrArr.append(Attribute('ASID', 22, 4, cl.getElement(0), 1))
self.attrArr.append(Attribute('VMID', 26, 16, (cl.getElement(1)<<32)|cl.getElement(0), 1))
self.attrArr.append(Attribute('VA', 42, 74-42+1, (cl.getElement(2)<<32)|cl.getElement(1), 1))
self.attrArr.append(Attribute('Valid', 75, 1, cl.getElement(2)))
self.attrArr.append(Attribute('Parity', 76, 2, cl.getElement(2)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo3GoldL2TLBDataLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('PA', 0, 32, cl.getElement(0), 1))
self.attrArr.append(Attribute('NS', 32, 1, cl.getElement(1)))
self.attrArr.append(Attribute('AP', 33, 3, cl.getElement(1)))
self.attrArr.append(Attribute('S2UXN', 36, 1, cl.getElement(1)))
self.attrArr.append(Attribute('S2PXN', 37, 1, cl.getElement(1)))
self.attrArr.append(Attribute('S1UXN', 38, 1, cl.getElement(1)))
self.attrArr.append(Attribute('S1PXN', 39, 1, cl.getElement(1)))
self.attrArr.append(Attribute('S2Level', 40, 2, cl.getElement(1)))
self.attrArr.append(Attribute('HAP', 42, 2, cl.getElement(1)))
self.attrArr.append(Attribute('S2DBM', 44, 1, cl.getElement(1)))
self.attrArr.append(Attribute('MemAttr', 45, 4, cl.getElement(1), 1))
self.attrArr.append(Attribute('ITH', 49, 1, cl.getElement(1)))
self.attrArr.append(Attribute('Split', 50, 1, cl.getElement(1)))
self.attrArr.append(Attribute('Parity', 53, 1, cl.getElement(1)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
#-------------------------------------------------------------------------------
# Kryo-4 Gold cache descriptor data structures
#-------------------------------------------------------------------------------
class Kryo4GoldL2TLBLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('Valid', 2, 4, cl.getElement(0), 1))
self.attrArr.append(Attribute('Coalesced', 6, 1, cl.getElement(0)))
self.attrArr.append(Attribute('PageSize', 17, 3, cl.getElement(0), 1))
self.attrArr.append(Attribute('PA', 20, 28, cl.getElement(0, 1), 1))
self.attrArr.append(Attribute('MemAttr', 52, 3, cl.getElement(1)))
self.attrArr.append(Attribute('InnerShared', 56, 1, cl.getElement(1)))
self.attrArr.append(Attribute('OuterShared', 57, 1, cl.getElement(1)))
self.attrArr.append(Attribute('nonGlobal', 58, 1, cl.getElement(1)))
self.attrArr.append(Attribute('NS', 6, 1, cl.getElement(2)))
self.attrArr.append(Attribute('VA', 7, 29, cl.getElement(2, 3), 1))
self.attrArr.append(Attribute('Prefetched', 36, 1, cl.getElement(3), 1))
self.attrArr.append(Attribute('walkCache', 37, 1, cl.getElement(3), 1))
self.attrArr.append(Attribute('PBHA', 38, 1, cl.getElement(3), 1))
self.attrArr.append(Attribute('ASID', 40, 16, cl.getElement(3), 1))
self.attrArr.append(Attribute('VMID', 56, 16, cl.getElement(3, 4), 1))
self.attrArr.append(Attribute('TxlnRegime', 8, 2, cl.getElement(4), 1))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo4GoldL1DTLBLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('Valid', 0, 1, cl.getElement(0)))
self.attrArr.append(Attribute('VMID', 1, 16, cl.getElement(0), 1))
self.attrArr.append(Attribute('ASID', 17, 16, cl.getElement(0, 1), 1))
self.attrArr.append(Attribute('TxlnRegime', 33, 2, cl.getElement(1), 1))
self.attrArr.append(Attribute('NS', 35, 1, cl.getElement(1)))
self.attrArr.append(Attribute('PageSize', 36, 3, cl.getElement(1)))
self.attrArr.append(Attribute('MemAttr', 50, 3, cl.getElement(1)))
self.attrArr.append(Attribute('InnerShared', 57, 1, cl.getElement(1)))
self.attrArr.append(Attribute('OuterShared', 58, 1, cl.getElement(1)))
self.attrArr.append(Attribute('VA', 62, 37, (cl.getElement(3) << 64 ) | cl.getElement(1, 2), 1))
self.attrArr.append(Attribute('PA', 35, 28, cl.getElement(3), 1))
self.attrArr.append(Attribute('PBHA', 63, 2, cl.getElement(3, 4)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo4GoldL1ITLBLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('Valid', 0, 1, cl.getElement(0)))
self.attrArr.append(Attribute('Attr1', 1, 4, cl.getElement(0), 1))
self.attrArr.append(Attribute('TxlnRegime', 5, 2, cl.getElement(0), 1))
self.attrArr.append(Attribute('VMID', 7, 16, cl.getElement(0), 1))
self.attrArr.append(Attribute('ASID', 23, 16, cl.getElement(0, 1), 1))
self.attrArr.append(Attribute('Attr2', 39, 5, cl.getElement(1), 1))
self.attrArr.append(Attribute('InnerShared', 44, 1, cl.getElement(1)))
self.attrArr.append(Attribute('OuterShared', 45, 1, cl.getElement(1)))
self.attrArr.append(Attribute('Attr3', 46, 4, cl.getElement(1)))
self.attrArr.append(Attribute('PageSize', 50, 3, cl.getElement(1)))
self.attrArr.append(Attribute('MemAttr', 53, 3, cl.getElement(1)))
self.attrArr.append(Attribute('Attr4', 56, 1, cl.getElement(1)))
self.attrArr.append(Attribute('PBHA', 57, 2, cl.getElement(1)))
self.attrArr.append(Attribute('VA', 59, 37, cl.getElement(1, 2), 1))
self.attrArr.append(Attribute('PA', 0, 28, cl.getElement(3), 1))
self.attrArr.append(Attribute('NS', 60, 1, cl.getElement(3)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo4GoldL1DCacheTagLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('MESI', 0, 2, cl.getElement(0)))
self.attrArr.append(Attribute('WBNA', 2, 1, cl.getElement(0)))
self.attrArr.append(Attribute('PA', 5, 28, cl.getElement(0, 1), 1))
self.attrArr.append(Attribute('NS', 33, 1, cl.getElement(1)))
self.attrArr.append(Attribute('ECC', 34, 7, cl.getElement(1)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo4GoldL1ICacheTagLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('Parity', 0, 1, cl.getElement(0), 0))
self.attrArr.append(Attribute('ISA', 1, 2, cl.getElement(0)))
self.attrArr.append(Attribute('PA', 3, 28, cl.getElement(0), 1))
self.attrArr.append(Attribute('NS', 31, 1, cl.getElement(0), 0))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo4GoldL2DCache256KBTagLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('MESI', 0, 3, cl.getElement(0), 1))
self.attrArr.append(Attribute('Valid', 3, 1, cl.getElement(0)))
self.attrArr.append(Attribute('OA', 4, 1, cl.getElement(0)))
self.attrArr.append(Attribute('Shareable', 5, 1, cl.getElement(0)))
self.attrArr.append(Attribute('VirtIndex', 9, 2, cl.getElement(0)))
self.attrArr.append(Attribute('NS', 11, 1, cl.getElement(0)))
self.attrArr.append(Attribute('PA', 12, 25, cl.getElement(0, 1), 1))
self.attrArr.append(Attribute('PBHA', 37, 2, cl.getElement(1)))
self.attrArr.append(Attribute('ECC', 39, 7, cl.getElement(1)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
class Kryo4GoldL2DCache512KBTagLine(CacheLine):
def __init__(self, cl):
self.setWay(cl.getWay())
self.setSet(cl.getSet())
self.attrArr = []
self.attrArr.append(Attribute('MESI', 0, 3, cl.getElement(0), 1))
self.attrArr.append(Attribute('Valid', 3, 1, cl.getElement(0)))
self.attrArr.append(Attribute('OA', 4, 1, cl.getElement(0)))
self.attrArr.append(Attribute('Shareable', 5, 1, cl.getElement(0)))
self.attrArr.append(Attribute('VirtIndex', 9, 2, cl.getElement(0)))
self.attrArr.append(Attribute('NS', 11, 1, cl.getElement(0)))
self.attrArr.append(Attribute('PA', 12, 24, cl.getElement(0, 1), 1))
self.attrArr.append(Attribute('PBHA', 36, 2, cl.getElement(1)))
self.attrArr.append(Attribute('ECC', 38, 7, cl.getElement(1)))
def __str__(self):
return '{:>4}'.format(self.set) + ' ' + '{:>2}'.format(self.way) + ' ' + ' '.join(str(a) for a in self.attrArr)
#-------------------------------------------------------------------------------
# This is a generic function to extract words(4B) from a file
# It is assumed that 'inputFileName' is valid and exists
# The words are extracted in a nested for loop as follows:
# for i in (0: loopLimitA-1)
# for j in (0: loopLimitB-1)
# for k in (0: loopLimitC-1)
# read 4B word
#-------------------------------------------------------------------------------
def extractFileContents(inputFileName, offset, loopLimitA, loopLimitB, loopLimitC, cr):
cacheLineArr = []
inFile = open(inputFileName, 'rb')
inFile.seek(offset)
for iterWay in range(loopLimitA):
for iterSet in range(loopLimitB):
cacheLine = CacheLine()
cacheLine.setWay(iterWay)
cacheLine.setSet(iterSet)
for iterOffset in range(loopLimitC):
word = inFile.read(4)
cacheLine.addElement(unpack('I', word)[0])
cacheLineArr.append(cacheLine)
inFile.close()
cr.sets = loopLimitB
cr.ways = loopLimitA
cr.offsets = loopLimitC
cr.lines = cacheLineArr
return
def main(argv):
# Parse the command line args
inputFileName=''
outputFileName=''
dumpType = 'DCD'
offset = 0
cpu = ''
try:
opts, args = getopt.getopt(argv,"hi:o:t:s:c:",["help","ifile=","ofile=","type=","seek=","cpu="])
except getopt.GetoptError:
print('ERROR: Incorrect arguments. Run with -h for help')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print('parse.py -i <inputfile> -c <cpu> -t <cachedump type> -s <offset>')
print(' -c can take kryo3gold, kryo3silver')
print(" -t can take DCD(for D$ Data), DCT(for D$ Tag), ICD(for I$ Data), ICT(for I$ Tag), TLBD(for TLB Data), TLBT(for TLB Tag)")
print(' -s is a byte offset into the input file')
print('All output gets written to stdout')
sys.exit()
elif opt in ("-i", "--ifile"):
inputFileName = arg
elif opt in ("-o", "--ofile"):
outputFileName = arg
elif opt in ("-t", "--type"):
dumpType = arg.upper()
elif opt in ("-c", "--cpu"):
cpu = arg.lower()
elif opt in ("-s", "--seek"):
offset = ast.literal_eval(arg)
if inputFileName == '' or not os.path.isfile(inputFileName):
sys.stderr.write('Error: You need to provide a valid input file name\n')
sys.exit(2)
sys.stderr.write("Input file :" + inputFileName + '\n')
sys.stderr.write("Output file :" + outputFileName + '\n')
sys.stderr.write("CPU :" + cpu + '\n')
sys.stderr.write("Dump type :" + dumpType + '\n')
sys.stderr.write("Offset :" + str(offset) + '\n')
if (cpu == 'kryo3silver' or cpu == 'kryo4silver'):
if (dumpType == 'DCD'):
cache = CacheRam(None, None, None, None, None)
extractFileContents(inputFileName, offset, 4, 128, 16, cache)
sys.stdout.write(getHeaderStr((cache.lines[0])))
sys.stdout.write(str(cache))
elif (dumpType == 'DCT'):
cache = CacheRam(None, None, None, None, Kryo3SilverL1DCacheTagLine)
extractFileContents(inputFileName, offset, 4, 128, 2, cache)
sys.stdout.write(getHeaderStr(Kryo3SilverL1DCacheTagLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'ICD'):
cache = CacheRam(None, None, None, None, None)
extractFileContents(inputFileName, offset, 4, 128, 16, cache)
sys.stdout.write(getHeaderStr((cache.lines[0])))
sys.stdout.write(str(cache))
elif (dumpType == 'ICT'):
cache = CacheRam(None, None, None, None, Kryo3SilverL1ICacheTagLine)
extractFileContents(inputFileName, offset, 4, 128, 2, cache)
sys.stdout.write(getHeaderStr(Kryo3SilverL1ICacheTagLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'TLBT'):
cache = CacheRam(None, None, None, None, Kryo3SilverL2TLBTagLine)
extractFileContents(inputFileName, offset, 4, 256, 3, cache)
sys.stdout.write(getHeaderStr(Kryo3SilverL2TLBTagLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'TLBD'):
cache = CacheRam(None, None, None, None, Kryo3SilverL2TLBDataLine)
extractFileContents(inputFileName, offset, 4, 256, 2, cache)
sys.stdout.write(getHeaderStr(Kryo3SilverL2TLBDataLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (cpu == 'kryo3gold'):
if (dumpType == 'DCD'):
cache = CacheRam(None, None, None, None, None)
extractFileContents(inputFileName, offset, 16, 64, 16, cache)
sys.stdout.write(getHeaderStr((cache.lines[0])))
sys.stdout.write(str(cache))
elif (dumpType == 'DCT'):
cache = CacheRam(None, None, None, None, Kryo3GoldL1DCacheTagLine)
extractFileContents(inputFileName, offset, 16, 64, 2, cache)
sys.stdout.write(getHeaderStr(Kryo3GoldL1DCacheTagLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'ICD'):
cache = CacheRam(None, None, None, None, None)
extractFileContents(inputFileName, offset, 4, 256, 16, cache)
sys.stdout.write(getHeaderStr((cache.lines[0])))
sys.stdout.write(str(cache))
elif (dumpType == 'ICT'):
cache = CacheRam(None, None, None, None, Kryo3GoldL1ICacheTagLine)
extractFileContents(inputFileName, offset, 4, 256, 2, cache)
sys.stdout.write(getHeaderStr(Kryo3GoldL1ICacheTagLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'TLBT'):
cache = CacheRam(None, None, None, None, Kryo3GoldL2TLBTagLine)
extractFileContents(inputFileName, offset, 4, 256, 3, cache)
sys.stdout.write(getHeaderStr(Kryo3GoldL2TLBTagLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'TLBD'):
cache = CacheRam(None, None, None, None, Kryo3GoldL2TLBDataLine)
extractFileContents(inputFileName, offset, 4, 256, 3, cache)
sys.stdout.write(getHeaderStr(Kryo3GoldL2TLBDataLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (cpu == 'kryo4gold'):
if (dumpType == 'DCD'):
cache = CacheRam(None, None, None, None, None)
extractFileContents(inputFileName, offset, 4, 256, 16, cache)
sys.stdout.write(getHeaderStr((cache.lines[0])))
sys.stdout.write(str(cache))
elif (dumpType == 'DCT'):
cache = CacheRam(None, None, None, None, Kryo4GoldL1DCacheTagLine)
extractFileContents(inputFileName, offset, 4, 256, 2, cache)
sys.stdout.write(getHeaderStr(Kryo4GoldL1DCacheTagLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'ICD'):
cache = CacheRam(None, None, None, None, None)
extractFileContents(inputFileName, offset, 4, 256, 16, cache)
sys.stdout.write(getHeaderStr((cache.lines[0])))
sys.stdout.write(str(cache))
elif (dumpType == 'ICT'):
cache = CacheRam(None, None, None, None, Kryo4GoldL1ICacheTagLine)
extractFileContents(inputFileName, offset, 4, 256, 1, cache)
sys.stdout.write(getHeaderStr(Kryo4GoldL1ICacheTagLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'L2256CD'):
cache = CacheRam(None, None, None, None, None)
extractFileContents(inputFileName, offset, 8, 512, 16, cache)
sys.stdout.write(getHeaderStr((cache.lines[0])))
sys.stdout.write(str(cache))
elif (dumpType == 'L2512CD'):
cache = CacheRam(None, None, None, None, None)
extractFileContents(inputFileName, offset, 8, 1024, 16, cache)
sys.stdout.write(getHeaderStr((cache.lines[0])))
sys.stdout.write(str(cache))
elif (dumpType == 'L2256CT'):
cache = CacheRam(None, None, None, None, Kryo4GoldL2DCache256KBTagLine)
extractFileContents(inputFileName, offset, 8, 512, 2, cache)
sys.stdout.write(getHeaderStr(Kryo4GoldL2DCache256KBTagLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'L2512CT'):
cache = CacheRam(None, None, None, None, Kryo4GoldL2DCache512KBTagLine)
extractFileContents(inputFileName, offset, 8, 1024, 2, cache)
sys.stdout.write(getHeaderStr(Kryo4GoldL2DCache512KBTagLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'L1ITLB'):
cache = CacheRam(None, None, None, None, Kryo4GoldL1ITLBLine)
extractFileContents(inputFileName, offset, 1, 48, 4, cache)
sys.stdout.write(getHeaderStr(Kryo4GoldL1ITLBLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'L1DTLB'):
cache = CacheRam(None, None, None, None, Kryo4GoldL1DTLBLine)
extractFileContents(inputFileName, offset, 1, 48, 6, cache)
sys.stdout.write(getHeaderStr(Kryo4GoldL1DTLBLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
elif (dumpType == 'TLBD'):
cache = CacheRam(None, None, None, None, Kryo4GoldL2TLBLine)
extractFileContents(inputFileName, offset, 5, 256, 6, cache)
sys.stdout.write(getHeaderStr(Kryo4GoldL2TLBLine(cache.lines[0])))
sys.stdout.write(str(cache.morph()))
else:
sys.stderr.write("Incorrect CPU\n")
sys.exit(2)
if __name__ == "__main__":
main(sys.argv[1:])

View File

@@ -0,0 +1,499 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
import re
import struct
import copy
from typing import Any
from print_out import printd
from print_out import print_out_str
class Param():
def __init__(self, name, offset, size, prefix="") -> None:
'''
parameters to store outputs of
'ptype /o struct [Struct_Name]
each line was a Prarm object
/* offset | size */ type = struct elf64_hdr {
/* 0 | 16 */ unsigned char e_ident[16];
/* 16 | 2 */ Elf64_Half e_type;
:_p_name text: field name eg: e_type or e_ident
:_p_offset int: field offset eg: 16
:_p_size int: field size eg: 2
:_p_prefix text: print indent for sub-struct or sub-field
:_p_type_name: type name defined in struct eg: Elf64_Half
:_p_rel_type_name: basic type name, it's used for determine the formats to pack or unpack the data
--> eg: Elf64_Half e_type;
-->_p_type_name = Elf64_Half
-->_p_rel_type_name = unsigned short
:_p_data: the raw data content
'''
#
self._p_name = name
self._p_offset = int(offset)
self._p_size = size
self._p_prefix = prefix
self._p_type_name = "" #elf_gregset_t
self._p_rel_type_name = "" #long [34]
self._p_data = 0
class StructObj(Param):
'''
represent struct/union structure
:field_mapping: all the field defined in the structure
dict attr_name --> FieldObj
'''
def __init__(self, type_name, offset, size, prefix) -> None:
super().__init__("", offset, size, prefix)
self._p_type_name = type_name
self.field_mapping = {}
def add_field(self, attr_name, fieldobj):
self.field_mapping[attr_name] = fieldobj
def fields(self):
''' retrun field list'''
return self.field_mapping
def get_field(self, attr_name):
return self.field_mapping[attr_name]
def __repr__(self) -> str:
'''
string output for object
used for str() or repr()
'''
ret ="/* {} | {} */ {} {} {}=".format(
self._p_offset, self._p_size, self._p_prefix, self._p_type_name, self._p_name)
ret += "{\n"
for obj in self.field_mapping.values():
obj._p_prefix = self._p_prefix + " "
ret += repr(obj)
ret += self._p_prefix + "}\n"
return ret
def __getattr__(self, name):
if "__" in name: ## private attribute
name = "__" + name.split("__")[-1]
obj = self.field_mapping[name]
if isinstance(obj, StructObj):
return obj
else:
return obj._p_data
def __setattr__(self, __name: str, __value: Any):
# struct task_struct->__state
# this field was a private field in python
# here need to pre-handle it.
if "__" in __name:
__name = "__" + __name.split("__")[-1]
try:
if __name in self.__dict__["field_mapping"].keys():
obj = self.__dict__["field_mapping"][__name]
if isinstance(obj, FieldObj):
self.__dict__["field_mapping"][__name]._p_data = __value
else:
raise Exception("Set to {} {} was not allowed".format(self._p_type_name, self._p_name))
else:
super().__setattr__(__name, __value)
except:
return super().__setattr__(__name, __value)
def __bytes__(self):
'''
generate bytes for object
used for bytes(object)
'''
pbytes = bytearray(b'\x00' * self._p_size)
for obj in self.field_mapping.values():
pos_s = obj._p_offset-self._p_offset
pos_e = pos_s + obj._p_size
o_bytes =bytes(obj)
pbytes[pos_s : pos_e] = o_bytes
if len(pbytes) != self._p_size:
print_out_str("++++++++ ERROR ++++++++++++++++")
print_out_str(self)
raise Exception("{} {} size is incorrect {} expected {}".format(
self._p_type_name, self._p_name, len(pbytes), self._p_size))
return bytes(pbytes)
def __deepcopy__(self, src):
dst = StructObj(self._p_type_name, self._p_offset, self._p_size, self._p_prefix)
for key, value in self.__dict__.items():
setattr(dst, key, copy.deepcopy(value))
return dst
class FieldObj(Param):
'''
represent a field in structure
'''
def __init__(self, ramdump, name, offset, size, type_name, rel_type_name, prefix="") -> None:
super().__init__(name, offset, size, prefix)
self._p_type_name = type_name
self._p_rel_type_name = rel_type_name
self.is_array_type = False
self.preset_array_field()
# formats to pack or unpack for sturct
if ramdump:
self.formats = self.unpack_formats(ramdump)
def unpack_formats(self, ramdump):
fmt = None
size = self._p_size
if self.is_array_type:
size = int(self._p_size / len(self._p_data))
try:
fmt = getattr(ramdump, "_RamDump__unpack_format")(size, self._p_rel_type_name)
fmt = fmt.strip() ## to test fmt is None
except Exception as e:
msg = "type {} rel type {} size {}".format(self._p_type_name, self._p_rel_type_name, size)
raise Exception(msg)
return fmt
def get_array_len(self):
# unsigned char e_ident[16];
array_len = 0
match = re.search("(.*)\[(\d+)\]", self._p_name)
if match:
self._p_name = match.group(1)
array_len = int(match.group(2))
## ptype elf_gregset_t type = unsigned long [34]
if self._p_rel_type_name != self._p_type_name:
match = re.search("(.*)\[(\d+)\]", self._p_rel_type_name)
if match:
self._p_rel_type_name = match.group(1)
array_len = int(match.group(2)) if array_len == 0 else int(match.group(2)) * array_len
return array_len
def preset_array_field(self):
### specifix for __uint128_t
array_len = self.get_array_len()
if array_len >1:
item_size = int(self._p_size /array_len)
if item_size > 8:
printd(self, "{} has a huge type {} size:{} need a list to store data".format(
self._p_data, self._p_type_name, self._p_size ))
array_len = int(item_size / 8) * array_len
elif self._p_size > 8:
printd(self, "{} has a huge type {} size:{} need a list to store data".format(
self._p_data, self._p_type_name, self._p_size ))
array_len = int(self._p_size / 8)
if array_len > 1:
self.is_array_type = True
self._p_data = [0 for i in range(0, array_len)]
def fill_data(self, dbytes):
if self.is_array_type:
object_size = int(self._p_size / len(self._p_data))
for i in range(0, len(self._p_data)):
self._p_data[i] = struct.unpack(self.formats, dbytes[(i * object_size) : ((i+1) * object_size)])[0]
else:
self._p_data = struct.unpack(self.formats, dbytes)[0]
def __bytes__(self):
dbytes = b''
if self.is_array_type:
for data in self._p_data:
dbytes += struct.pack(self.formats, data)
else:
dbytes += struct.pack(self.formats, self._p_data)
if len(dbytes) != self._p_size:
print_out_str("++++++++ ERROR ++++++++++++++++")
print_out_str(self)
raise Exception("Fieldobj {} {}__bytes size is incorrect {} expected {}"
.format(self._p_type_name, self._p_name, len(dbytes), self._p_size))
return dbytes
def __repr__(self) -> str:
ret = "/* {} | {} {} */ {} {} {}= ".format(
self._p_offset, self._p_size, self.formats,
self._p_prefix, self._p_type_name, self._p_name)
if self._p_rel_type_name == "char" and type(self._p_data) == list:
#char * array
ret += "{} ".format(bytes(self._p_data).decode('ascii', 'ignore').split("\0")[0])
ret += " "+"{}".format(self._p_data)
elif self._p_rel_type_name == "char" and type(self._p_data) == int:
if "unsigned" in self._p_rel_type_name:
ret += "{:c}".format(self._p_data)
else:
ret += "{}".format(self._p_data)
elif type(self._p_data) == int:
if self._p_name in ["pr_pid", "pr_uid", "pr_gid", "pr_ppid", "pr_pgrp", "pr_sid"]:
ret += "{}".format(self._p_data)
else:
ret += "0x{:x}".format(self._p_data)
elif type(self._p_data) == bytes:
ret += "{}".format(self._p_data)
else:
ret += "{}".format(self._p_data)
ret += "\n"
return ret
def __deepcopy__(self, src):
dst = FieldObj(None, self._p_name, self._p_offset, self._p_size,
self._p_type_name, self._p_rel_type_name, self._p_prefix)
for key, value in self.__dict__.items():
setattr(dst, key, copy.deepcopy(value))
return dst
class StructParser:
def __init__(self, ramdump) -> None:
self.ramdump = ramdump
self.datatype_dict = {}
self.addr_struct_dict = {}
def read_struct(self, struct_addr, type_name, attr_list=None):
size = self.ramdump.sizeof(type_name)
data = getattr(self.ramdump, "_RamDump__get_bin_data")(struct_addr, size)
if not data:
raise Exception("Error!! read_struct get None data from address 0x{} with size {}".format(
struct_addr, size))
if len(data) != size:
raise Exception("Error!! read_struct get data from address 0x{} with size {}, but got size {}".format(
struct_addr, size, len(data)))
var_obj = self.parser_struct_def(type_name)
self.fill_pdata(var_obj, data, attr_list)
_p_data = bytes(var_obj)
if len(_p_data) != len(data):
raise Exception("Error!! read_struct size is not same {} {}".format(len(_p_data), len(data)))
self.addr_struct_dict[struct_addr] = var_obj
#return copy.deepcopy(var_obj)
return var_obj
def parser_struct_def(self, type_name, attr_list=None):
size = self.ramdump.sizeof(type_name)
var_obj, vsize, tem_p_name = self.__parser_struct_def(type_name)
if vsize != size:
raise Exception("sizeof({}) size={} parser type info got invalid size {}".format(
type_name, size, vsize))
if not var_obj:
raise Exception("Parse {} type failed".format(type_name))
return copy.deepcopy(var_obj)
def fill_pdata(self, var_type, data, attr_list=None):
for attr_name, fieldobj in var_type.field_mapping.items():
if attr_list and attr_name not in attr_list:
continue
if isinstance(fieldobj, StructObj):
self.fill_pdata(fieldobj, data)
elif isinstance(fieldobj, FieldObj):
fieldobj.fill_data(data[fieldobj._p_offset: fieldobj._p_offset + fieldobj._p_size])
def __parser_struct_def(self, the_type, offset=0):
"""
Function to return type info for the type.
:param the_type: type of the structure field.
:type the_type: str
:return: d_type, size
"""
if the_type in self.datatype_dict.keys():
return self.datatype_dict[the_type]
else:
text = []
try:
text = self.ramdump.gdbmi.getStructureData(the_type)
size = self.ramdump.sizeof(the_type)
except Exception as e:
raise Exception("Parse %s failed!! %s" % (the_type, str(e)))
if text:
d_type = text[0].split("type = ")[1]
d_type = d_type.replace("{", "").strip()
d_type = getattr(self.ramdump, "_RamDump__ignore_storage_class")(d_type)
d_type = getattr(self.ramdump, "_RamDump__ignore_expanded_pointer")(text, d_type)
if d_type == "struct":
'''
(gdb) ptype atomic_t
type = struct {
int counter;
}
'''
d_type = the_type
if not self.is_general_type(d_type):
master_obj, attr_name, _ = self.__create_object(text, offset, 0, size)
self.datatype_dict[the_type] = master_obj, size, attr_name
return master_obj, size, attr_name
self.datatype_dict[the_type] = d_type, size, None
return d_type, size, None
return None, 0, None
def is_general_type(self, datatype):
if "*" in datatype:
# a pointer type
return True
datalist = self.ramdump.gdbmi.getStructureData(datatype)
if datalist[-1].strip() == "} *" and self.ramdump.sizeof(datatype) == 8:
# struct pointer type eg: ptype lockdep_map_p
return True
return len(datalist) == 1
def __create_object(self, text, base_offset, curr_index, obj_size, prefix="",):
'''
Function to create a python object from the gdb text output with meta data
like size and offset of all the members, needed to populate the values from
the binary dump files.
:param text: text gdb output for a particular symbol/type.
:type the_type: str
:param base_offset: base offset value.
:type field: int
:param curr_index: current line index in 'text'.
:type field: int
:return: py object created based on 'text', array check flag, current index
/* offset | size */ type = struct elf_prstatus_common {
/* 0 | 12 */ struct elf_siginfo {
'''
if curr_index == 0:
d_type = text[0].split("{")[0]
else:
d_type = text[curr_index-1].split("{")[0]
d_type = d_type.split("[")[0]
d_type = d_type.split("*/")[-1].split("type =")[-1].strip()
newclass = type(d_type, (StructObj,), {})
curr_obj = newclass(d_type, base_offset, obj_size, prefix)
curr_offset = base_offset
total_size = len(text)
size = 0
attr_name = None
while total_size > curr_index:
line = text[curr_index]
curr_index = curr_index + 1
if line is None:
break
if "(" in line and ")" in line:
continue
if re.search("\/\* offset\s+\|\s+ size \*\/", line.lstrip().rstrip()):
continue
is_struct, curr_offset, size = self.is_struct_field(line, curr_offset)
if is_struct:
### structobj, struct name,
obj, attr_name, curr_index = self.__create_object(text, curr_offset, curr_index, size, prefix + " ")
'''
/* 2176 | 0 */ struct syscall_user_dispatch {
<no data fields>
/* total size (bytes): 0 */
} syscall_dispatch;
'''
## put condition here
if size == 0:
continue
if attr_name is not None:
curr_obj.add_field(attr_name, obj)
else:
# adding anonimous union members to parent
for attr, value in obj.field_mapping.items():
curr_obj.add_field(attr, value)
else:
is_field, curr_offset, size, datatype, attr_name = self.is_gen_field(line, curr_offset)
# /* 1088 | 0 */ unsigned long cpu_bitmap[];
if size == 0: #skip padding
continue
if is_field:
if ")(" in datatype:
attr_name = datatype.split(")(")[0].split("(")[1]
if attr_name.lstrip()[0] == '*':
datatype = datatype + " *"
attr_name = attr_name.lstrip('*')
curr_field = None
if not self.is_general_type(datatype):
# /* 112 | 272 */ elf_gregset_t pr_reg;
var_type = self.parser_struct_def(datatype)
var_type._p_name = attr_name
var_type._p_prefix = prefix + " "
var_type._p_offset = curr_offset
self.adjust_offset(var_type, curr_offset)
curr_obj.add_field(attr_name, var_type)
else:
newclass = type(attr_name, (FieldObj,), {})
if "*" in datatype:
rel_type = datatype
else:
rel_type = self.ramdump.gdbmi.getStructureData(datatype)[0].split("type = ")[1]
curr_field = newclass(self.ramdump, attr_name, curr_offset, size, datatype, rel_type)
#setattr(curr_obj, attr_name, curr_field)
curr_obj.add_field(curr_field._p_name, curr_field)
continue
re_obj = re.search('\s*} (\S+);', line)
if re_obj is not None:
curr_obj._p_name = re_obj.group(1)
return curr_obj, re_obj.group(1), curr_index
re_obj = re.search('\s*};', line)
if re_obj:
return curr_obj, None, curr_index
re_obj = re.search('\s*}\s*(\[\d+\])', line)
if re_obj:
return curr_obj, re_obj.group(1), curr_index
# None means unnamed union or struct
return curr_obj, None, curr_index
def adjust_offset(self, var_type, offset):
for _name, fieldobj in var_type.field_mapping.items():
fieldobj._p_offset += offset
if isinstance(fieldobj, StructObj):
self.adjust_offset(fieldobj, offset)
def is_struct_field(self, line, curr_offset):
# sample match : "/* 0 | 40 */ struct thread_info {"
re1 = re.search('\s+(\d+)\s+[|]\s+(\d+) \*\/\s+(struct|union) .*{', line)
if re1:
curr_offset = int(re1.group(1))
size = int(re1.group(2))
return True, curr_offset, size
# sample match : "/* 8 */ struct {"
re2 = re.search('\/\*\s+(\d+) \*\/\s+(struct|union) .*{', line)
if re2:
size = int(re2.group(1))
return True, curr_offset, size
return False, curr_offset, None
def is_gen_field(self, line, curr_offset):
# sample match : "/* 20 | 4 */ u32 need_resched;"
re1 = re.search('/\*\s+(\d+)\s+[|]\s+(\d+)\s\*/\s+([^:]+) (\S+);', line)
if re1 is not None:
curr_offset = int(re1.group(1))
size = int(re1.group(2))
datatype = re1.group(3)
attr_name = (re1.group(4))
return True, curr_offset, size, datatype, attr_name
# sample match : "/* 4 */ uint32_t v;"
re2 = re.search('/\*\s+(\d+)\s\*/\s+([^:]+) (\S+);', line)
if re2 is not None:
size = int(re2.group(1))
datatype = re2.group(2)
attr_name = (re2.group(3))
return True, curr_offset, size, datatype, attr_name
return False, curr_offset, None, None, None

View File

@@ -0,0 +1,21 @@
Summary: linux-ramdump-parser-v2 RPM
Name: linux-ramdump-parser-v2
Version: 1.0
Release: 1%{?dist}
Source0: %{name}-%{version}.tar.gz
License: GPL-2.0
Group: Development/Tools
%description
The linux-ramdump-parser-v2 RPM for parsing ramdumps
%prep
%setup -q
%install
rm -rf %{buildroot}
mkdir -p %{buildroot}/%{_bindir}
cp -fr ../%{name}-%{version} %{buildroot}/%{_bindir}/%{name}
%files
%{_bindir}/%{name}*

View File

@@ -0,0 +1,49 @@
#Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License version 2 and
#only version 2 as published by the Free Software Foundation.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
from print_out import print_out_str
class hListWalker(object):
'''
ramdump: Reference to the ram dump
node_addr: The address of the first element of the list
list_elem_offset: The offset of the list_head in the structure that this list is container for.
'''
def __init__(self, ramdump, head_addr, list_elem_offset):
self.ramdump = ramdump
self.list_elem_offset = list_elem_offset
self.first_node = head_addr
self.next_node = None
def __iter__(self):
return self
def __next__(self):
if self.first_node:
offset = self.ramdump.field_offset('struct hlist_head', 'first')
first_node_addr = self.first_node + offset
self.next_node = self.ramdump.read_word(first_node_addr)
self.first_node = None
else:
if self.next_node:
next_node_addr = self.next_node + self.ramdump.field_offset('struct hlist_node', 'next')
self.next_node = self.ramdump.read_word(next_node_addr)
else:
raise StopIteration()
if self.next_node is not None and self.next_node != 0:
return self.next_node - self.list_elem_offset
else:
raise StopIteration()
def next(self):
return self.__next__

View File

@@ -0,0 +1,133 @@
# Copyright (c) 2013-2014, 2016-2017, 2020 The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from print_out import print_out_str
'''
struct list_head {
struct list_head *next, *prev;
};
'''
class ListWalker(object):
'''
ram_dump: Reference to the ram dump
node_addr: The address of the first element of the list
list_elem_offset: The offset of the list_head in the structure that this list is container for.
'''
def __init__(self, ram_dump, node_addr, list_elem_offset):
self.ram_dump = ram_dump
self.list_elem_offset = list_elem_offset
self.last_node = node_addr
self.seen_nodes = []
self.curr_node = node_addr
def __iter__(self):
return self
def __next__(self):
next_node_addr = self.curr_node + \
self.ram_dump.field_offset('struct list_head', 'next')
next_node = self.ram_dump.read_word(next_node_addr)
self.curr_node = next_node
if next_node == self.last_node:
raise StopIteration()
elif next_node in self.seen_nodes:
print_out_str(
'[!] WARNING: Cycle found in attach list. List is corrupted!')
raise StopIteration()
elif not next_node:
raise StopIteration()
else:
self.seen_nodes.append(next_node)
return next_node - self.list_elem_offset
def next(self):
return self.__next__()
def is_empty(self):
"""Return True if the list is empty, False otherwise.
"""
if self.last_node is None:
return True
next_node_addr = self.last_node + self.ram_dump.field_offset('struct list_head', 'next')
next_node = self.ram_dump.read_word(next_node_addr)
if next_node == self.last_node:
return True
else:
return False
def walk(self, node_addr, func, *args):
"""Walk the linked list starting at `node_addr', calling `func' on
each node. `func' will be passed the current node and *args,
if given.
"""
if self.is_empty() == True:
return
while True:
if node_addr == 0 or node_addr == None :
break
funcargs = [node_addr - self.list_elem_offset] + list(args)
func(*funcargs)
next_node_addr = node_addr + self.ram_dump.field_offset('struct list_head', 'next')
next_node = self.ram_dump.read_word(next_node_addr)
if next_node == self.last_node:
break
if next_node in self.seen_nodes:
print_out_str(
'[!] WARNING: Cycle found in attach list. List is corrupted!')
break
node_addr = next_node
self.seen_nodes.append(node_addr)
def walk_prev(self, node_addr, func, *args):
"""Walk the linked list starting at `node_addr' previous node and traverse the list in
reverse order, calling `func' on each node. `func' will be passed the current node and *args,
if given.
"""
if self.is_empty() == True:
return
node_addr = self.ram_dump.read_word(node_addr + self.ram_dump.field_offset('struct list_head', 'prev'))
while True:
if node_addr == 0:
break
funcargs = [node_addr - self.list_elem_offset] + list(args)
func(*funcargs)
prev_node_addr = node_addr + self.ram_dump.field_offset('struct list_head', 'prev')
prev_node = self.ram_dump.read_word(prev_node_addr)
if prev_node == self.last_node:
break
if prev_node in self.seen_nodes:
print_out_str(
'[!] WARNING: Cycle found in attach list. List is corrupted!')
break
node_addr = prev_node
self.seen_nodes.append(node_addr)

View File

@@ -0,0 +1,116 @@
# Copyright (c) 2020 The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
'''
: radix_tree_root is the address of root node
struct radix_tree_root {
[0x0] spinlock_t xa_lock;
[0x18] gfp_t gfp_mask;
[0x20] struct radix_tree_node *rnode;
}
get_radix_tree_root will return the rnode
walk_radix_tree_node start with this rnode
'''
class RadixTreeWalker(object):
def __init__(self, ramdump):
self.ramdump = ramdump
self.RADIX_TREE_ENTRY_MASK = 3
self.RADIX_TREE_INTERNAL_NODE = 1 # 1 for 4.19; 2 for 5.4
self.RADIX_TREE_MAP_SHIFT = 6
if int(self.ramdump.get_config_val("CONFIG_BASE_SMALL")) == 1:
self.RADIX_TREE_MAP_SHIFT = 4
self.RADIX_TREE_MAP_SIZE = (1 << self.RADIX_TREE_MAP_SHIFT)
if (self.ramdump.kernel_version == (0, 0, 0) or
self.ramdump.kernel_version >= (5, 4, 0)):
self.root_struct = 'xarray'
self.head_struct = 'xa_head'
self.node_struct = 'xa_node'
self.RADIX_TREE_INTERNAL_NODE = 2
else:
self.root_struct = 'radix_tree_root'
self.head_struct = 'rnode'
self.node_struct = 'radix_tree_node'
self.node_shift_offset = self.ramdump.field_offset('struct ' + self.node_struct, 'shift')
self.node_slots_offset = self.ramdump.field_offset('struct ' + self.node_struct, 'slots')
self.node_size = self.ramdump.sizeof('struct ' + self.node_struct + ' *')
def get_radix_tree_root(self, radix_tree_root):
rnode_offset = self.ramdump.field_offset('struct ' + self.root_struct,self.head_struct)
rnode_addr = self.ramdump.read_word(radix_tree_root + rnode_offset)
return rnode_addr
def entry_to_node(self, rbnode):
return rbnode & ~self.RADIX_TREE_INTERNAL_NODE
def is_internal_node(self, rbnode):
# 10: Internal entry
return (rbnode & self.RADIX_TREE_ENTRY_MASK) == self.RADIX_TREE_INTERNAL_NODE
def walk_radix_tree_node(self, tree_node, height,func, *args):
for off in range(0, self.RADIX_TREE_MAP_SIZE):
slot = self.ramdump.read_word(tree_node + self.node_slots_offset + self.node_size * off)
if slot is None or slot == 0:
continue
# RADIX_TREE_INTERNAL_NODE mean we are not leaf
if self.is_internal_node(slot):
slot = self.entry_to_node(slot)
if height == 1:
func(slot, *args)
else:
radix_tree_node_next = slot
self.walk_radix_tree_node(radix_tree_node_next,(height -1), func, *args)
def walk_radix_tree(self, radix_tree_root, func, *args):
height = 0
radix_tree_node = self.get_radix_tree_root(radix_tree_root)
if radix_tree_node is None or radix_tree_node == 0:
return
if self.is_internal_node(radix_tree_node):
radix_tree_node = self.entry_to_node(radix_tree_node)
shift = self.ramdump.read_byte(radix_tree_node + self.node_shift_offset)
height = (shift // self.RADIX_TREE_MAP_SHIFT) + 1
if height == 0:
func(radix_tree_node, *args)
else:
self.walk_radix_tree_node(radix_tree_node,height, func, *args)
def walk_radix_tree_node_with_offset(self, tree_node, height,index, func, *args):
for off in range(0, self.RADIX_TREE_MAP_SIZE):
shift = (height - 1) * self.RADIX_TREE_MAP_SHIFT
slot = self.ramdump.read_word(tree_node + self.node_slots_offset + self.node_size * off)
if slot is None or slot == 0:
continue
# RADIX_TREE_INTERNAL_NODE mean we are not leaf
if self.is_internal_node(slot):
slot = self.entry_to_node(slot)
if height == 1:
func(slot, (index | off), *args)
else:
child_index = index | (off << shift)
radix_tree_node_next = slot
self.walk_radix_tree_node_with_offset(radix_tree_node_next,(height -1), child_index, func, *args)
def walk_radix_tree_with_offset(self, radix_tree_root, func, *args):
height = 0
radix_tree_node = self.get_radix_tree_root(radix_tree_root)
if radix_tree_node is None or radix_tree_node == 0:
return
if self.is_internal_node(radix_tree_node):
radix_tree_node = self.entry_to_node(radix_tree_node)
shift = self.ramdump.read_byte(radix_tree_node + self.node_shift_offset)
height = (shift // self.RADIX_TREE_MAP_SHIFT) + 1
if height == 0:
func(radix_tree_node, 0, *args)
else:
self.walk_radix_tree_node_with_offset(radix_tree_node,height, 0,func, *args)

View File

@@ -0,0 +1,191 @@
# Copyright (c) 2013-2014, 2020 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from print_out import print_out_str
from register import Register
from mmu import Armv7LPAEMMU
import sizes
NUM_FL_PTE = 4
NUM_SL_PTE = 512
NUM_TL_PTE = 512
def print_lpae_mappings(mappings, outfile):
"""Dump some page tables. `mappings' should already be sorted."""
fmt = '[0x{vstart:08x}--0x{vend:08x}] [0x{size:08x}] [A:0x{pstart:08x}--0x{pend:08x}] [{attrs}][{sizestring}]\n'
fmt_unmapped = '[0x{vstart:08x}--0x{vend:08x}] [0x{size:08x}] [UNMAPPED]\n'
for ((virt_start, virt_end), info) in mappings.items():
if info is None:
outfile.write(fmt_unmapped.format(
vstart=virt_start,
vend=virt_end,
size=virt_end - virt_start,
))
else:
outfile.write(fmt.format(
vstart=virt_start,
vend=virt_end,
size=info.page_size,
pstart=info.phys,
pend=info.phys + info.page_size,
attrs=','.join(info.get_attributes_strings()),
sizestring=sizes.get_size_string(info.page_size)
))
def get_flat_mappings(domain, mmu):
"""Walk some LPAE IOMMU page tables by iterating over all possible
page table entries at each level. Returns a dictionary of the
form: {(virt_start, virt_end): LeafMapping object, ...}
"""
mappings = {}
n = mmu.input_addr_split
virt_r = Register(fl_index=(n + 26, 30),
sl_index=(29, 21),
tl_index=(20, 12),
page_index=(11, 0))
for fl_index in range(0, NUM_FL_PTE):
virt_r.zero()
virt_r.fl_index = fl_index
info1 = mmu.translate_first_level(virt_r)
if info1 is None:
continue
if info1.leaf:
virt = virt_r.value
mappings[virt, virt + info1.page_size] = info1
continue
# this is a table. do the second-level lookup:
for sl_index in range(0, NUM_SL_PTE):
virt_r.sl_index = sl_index
info2 = mmu.translate_second_level(virt_r, info1.next_table_addr)
if info2 is None:
continue
if info2.leaf:
virt = virt_r.value
mappings[virt, virt + info2.page_size] = info2
continue
# this is a table. do the third-level lookup:
for tl_index in range(0, NUM_TL_PTE):
virt_r.tl_index = tl_index
info3 = mmu.translate_third_level(virt_r, info2.next_table_addr)
if info3 is None:
continue
if not info3.leaf:
raise Exception('Non-leaf third-level PTE???')
virt = virt_r.value
mappings[virt, virt + info3.page_size] = info3
return OrderedDict(sorted(mappings.items()))
def get_coalesced_mappings(flat_mappings):
"""Convert some "flat" mappings (from `get_flat_mappings') to a more
compact representation where contiguous ranges are coalesced.
"""
# fair warning: things are about to get a little hairy. have fun.
flat_items = flat_mappings.items()
# samers maps indices into flat_items to coalesced virtual
# starting addresses for those items.
samers = {}
# mark adjacent equivalent mappings
for i, (virt_range, info) in enumerate(flat_items):
virt_start, virt_end = virt_range
if i == 0:
cur_virt = virt_start
continue
prev_range, prev_info = flat_items[i - 1]
prev_start, prev_end = prev_range
if virt_start == prev_end and \
info.attributes == prev_info.attributes:
samers[i] = cur_virt
else:
cur_virt = virt_start
# merge adjacent equivalent mappings. coalesced_mappings will be
# keyed by starting virtual address alone.
coalesced_mappings = {}
for i, (virt_range, info) in enumerate(flat_items):
virt_start, virt_end = virt_range
page_size = virt_end - virt_start
if i in samers:
coalesced_mappings[samers[i]].page_size += page_size
continue
if virt_start not in coalesced_mappings:
coalesced_mappings[virt_start] = info
continue
else:
raise ValueError('We should have either gotten a samer or something not in coalesced_mappings...')
# convert coalesced_mappings to cc, which is keyed by a 2-tuple of
# the form: (virt_start, virt_end). Still mapping to the same
# LeafMapping objects.
cc = dict(((virt_start, virt_start + info.page_size), info)
for virt_start,info in coalesced_mappings.items())
# maintain order to facilitate finding unmapped gaps
cc = OrderedDict(sorted(cc.items()))
# fill in the unmapped gaps by adding mappings to `None':
if len(cc) > 0:
(first_vstart, first_vend), info = cc.items()[0]
(last_vstart, last_vend), info = cc.items()[-1]
if first_vstart != 0:
cc[0, first_vstart] = None
if last_vend != 0xffffffff:
cc[last_vend, 0xffffffff] = None
cc = OrderedDict(sorted(cc.items()))
keys = cc.keys()
for i, ((vstart, vend), info) in enumerate(cc.items()[1:-1]):
prev_start, prev_end = keys[i] # no need for -1 since we're iterating starting at 1
if prev_end != vstart:
cc[prev_end, vstart] = None
cc = OrderedDict(sorted(cc.items()))
return cc
def parse_long_form_tables(dump, d, domain_num):
fname = 'msm_iommu_domain_%02d_0x%12X.txt' % (domain_num, d.pg_table)
with dump.open_file(fname) as outfile:
print_out_str('LPAE Iommu page tables: ' + fname)
t0sz = 0
mmu = Armv7LPAEMMU(dump, d.pg_table, t0sz, virt_for_fl=True)
redirect = 'OFF'
if d.redirect is None:
redirect = 'UNKNOWN'
elif d.redirect > 0:
redirect = 'ON'
iommu_context = ' '.join('%s (%s)' % (name, num)
for (name, num) in d.ctx_list)
iommu_context = iommu_context or 'None attached'
outfile.write('IOMMU Context: %s. Domain: %s (%d) [L2 cache redirect for page tables is %s]\n' % (
iommu_context, d.client_name, d.domain_num, redirect))
outfile.write(
'[VA Start -- VA End ] [Size ] [PA Start -- PA End ] [Attributes][Page Table Entry Size]\n')
if d.pg_table == 0:
outfile.write(
'No Page Table Found. (Probably a secure domain)\n')
else:
mappings = get_flat_mappings(d, mmu)
print_lpae_mappings(get_coalesced_mappings(mappings), outfile)
outfile.write('\n-------------\nRAW Dump\n')
outfile.write('Raw: ' + str(d) + '\n')
print_lpae_mappings(mappings, outfile)

View File

@@ -0,0 +1,384 @@
#SPDX-License-Identifier: GPL-2.0-only
#Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
import os
import sys
import struct
import traceback
class Lzo1xParser():
LZO_E_OK = 0
LZO_E_ERROR = -1
LZO_E_OUT_OF_MEMORY = -2
LZO_E_NOT_COMPRESSIBLE = -3
LZO_E_INPUT_OVERRUN = -4
LZO_E_OUTPUT_OVERRUN = -5
LZO_E_LOOKBEHIND_OVERRUN = 6
LZO_E_EOF_NOT_FOUND = -7
LZO_E_INPUT_NOT_CONSUMED = -8
LZO_E_NOT_YET_IMPLEMENTED = -9
LZO_E_INVALID_ARGUMENT = -10
MAX_255_COUNT = 0x10100ff
MIN_ZERO_RUN_LENGTH = 4
M1_MAX_OFFSET = 0x0400
M2_MAX_OFFSET = 0x0800
M3_MAX_OFFSET = 0x4000
def __init__(self, config_have_efficient_unaligned_access=True):
self.indata = None
self.oudata = None
self.ip = 0
self.op = 0
self.ou_len = 0
self.in_len = 0
self.error = self.LZO_E_ERROR
self.CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS = config_have_efficient_unaligned_access
def read_bytes(self, data, offset, len):
if len == 8:
s = struct.unpack('<Q', data[offset:offset+8])
return s[0]
elif len == 4:
s = struct.unpack('<I', data[offset:offset+4])
return s[0]
elif len == 2:
s = struct.unpack('<H', data[offset:offset+2])
return s[0]
elif len == 1:
s = struct.unpack('<B', data[offset:offset+1])
return s[0]
else:
exit(-1)
def HAVE_IP(self, x):
return self.in_len - self.ip >= x
def HAVE_OP(self, x):
return self.ou_len - self.op >= x
def NEED_IP(self, x):
if not self.HAVE_IP(x):
self.input_overrun()
def NEED_OP(self, x):
if not self.HAVE_OP(x):
self.output_overrun()
def TEST_LB(self,m_pos):
if m_pos < 0:
self.lookbehind_overrun()
def input_overrun(self):
self.error = self.LZO_E_INPUT_OVERRUN
self.ou_len = self.op
raise Exception("LZO_E_INPUT_OVERRUN")
def output_overrun(self):
self.error = self.LZO_E_INPUT_OVERRUN
self.ou_len = self.op
raise Exception("LZO_E_OUTPUT_OVERRUN")
def lookbehind_overrun(self):
self.error = self.LZO_E_LOOKBEHIND_OVERRUN
self.ou_len = self.op
raise Exception("LZO_E_LOOKBEHIND_OVERRUN")
# need to copy one by one
def COPY(self, odata, op, idata, ip, size):
idx = 0
while idx < size:
odata[op + idx] = idata[ip + idx]
idx += 1
def COPY4(self, odata, op, idata, ip):
self.COPY(odata, op, idata, ip, 4)
def COPY8(self, odata, op, idata, ip):
self.COPY(odata, op, idata, ip, 8)
def match_next(self, next):
t = next
if self.CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS:
if self.HAVE_IP(6) and self.HAVE_OP(4):
self.COPY4(self.oudata, self.op, self.indata, self.ip)
self.op += t
self.ip += t
else:
self.NEED_IP(t + 3)
self.NEED_OP(t)
self.COPY(self.oudata, self.op, self.indata, self.ip, t)
self.ip = self.ip + t
self.op = self.op + t
t = 0
else:
self.NEED_IP(t + 3)
self.NEED_OP(t)
self.COPY(self.oudata, self.op, self.indata, self.ip, t)
self.ip = self.ip + t
self.op = self.op + t
t = 0
return t
def error_to_str(self, err):
if err == self.LZO_E_ERROR:
return "LZO_E_ERROR"
elif err == self.LZO_E_OK:
return "LZO_E_OK"
elif err == self.LZO_E_INPUT_NOT_CONSUMED:
return "LZO_E_INPUT_NOT_CONSUMED"
elif err == self.LZO_E_INPUT_OVERRUN:
return "LZO_E_INPUT_OVERRUN"
def eof_found(self, t):
self.ou_len = self.op
if t != 3:
self.error = self.LZO_E_ERROR
elif self.ip == self.in_len:
self.error = self.LZO_E_OK
elif self.ip < self.in_len:
self.error = self.LZO_E_INPUT_NOT_CONSUMED
else:
self.error = self.LZO_E_INPUT_OVERRUN
raise Exception(self.error_to_str(self.error))
def lzo1x_decompress_safe(self, indata, in_len, oudata, ou_len):
self.indata = indata
self.oudata = oudata
self.in_len = in_len
self.ou_len = ou_len
self.ip = 0
self.op = 0
copy_literal_run = False
SIZEOF_CHAR = 1
m_pos = 0
next = 0
state = 0
if self.in_len < 3:
self.input_overrun()
if self.in_len >= 5 and self.read_bytes(self.indata, self.ip, SIZEOF_CHAR) == 17:
bitstream_version = self.read_bytes(self.indata, (self.ip + SIZEOF_CHAR), SIZEOF_CHAR)
self.ip += SIZEOF_CHAR * 2
else:
bitstream_version = 0
if self.read_bytes(self.indata, self.ip, SIZEOF_CHAR) > 17:
t = self.read_bytes(self.indata, self.ip, SIZEOF_CHAR) - 17
self.ip += SIZEOF_CHAR
if t < 4:
next = t
state = next
t = self.match_next(next)
else:
copy_literal_run = True
_bc_t = t
_bc_ip =self.ip
while True:
t = self.read_bytes(self.indata, self.ip, SIZEOF_CHAR)
self.ip += SIZEOF_CHAR
if t < 16 or copy_literal_run:
if state == 0 or copy_literal_run:
if t == 0 and not copy_literal_run:
ip_last = self.ip
while self.read_bytes(self.indata, self.ip, SIZEOF_CHAR) == 0:
self.ip += 1
self.NEED_IP(1)
offset = self.ip - ip_last
if offset > self.MAX_255_COUNT:
self.error = self.LZO_E_ERROR
return self.LZO_E_ERROR, self.ou_len
offset = (offset << 8) - offset
t = t + offset + 15 + self.read_bytes(self.indata, self.ip, SIZEOF_CHAR)
self.ip += 1
t += 3
if copy_literal_run:
copy_literal_run = False
t = _bc_t
self.ip = _bc_ip
if self.CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS:
if self.HAVE_IP(t + 15) and self.HAVE_OP(t + 15):
ie = self.ip + t
oe = self.op + t
while True:
self.COPY8(self.oudata, self.op, self.indata, self.ip)
self.ip += 8
self.op += 8
self.COPY8(self.oudata, self.op, self.indata, self.ip)
self.ip += 8
self.op += 8
if self.ip >= ie:
break
self.ip = ie
self.op = oe
else:
self.NEED_OP(t)
self.NEED_IP(t + 3)
self.COPY(self.oudata, self.op, self.indata, self.ip, t)
self.ip = self.ip + t
self.op = self.op + t
t = 0
else:
self.NEED_OP(t)
self.NEED_IP(t + 3)
self.COPY(self.oudata, self.op, self.indata, self.ip, t)
self.ip = self.ip + t
self.op = self.op + t
t = 0
state = 4
continue
elif state != 4:
next = t & 3
m_pos = self.op - 1
m_pos -= t >> 2
m_pos -= self.read_bytes(self.indata, self.ip, SIZEOF_CHAR) << 2
self.ip += 1
self.TEST_LB(m_pos)
self.NEED_OP(2)
self.oudata[self.op] = self.oudata[m_pos]
self.oudata[self.op+1] = self.oudata[m_pos+1]
self.op += 2
state = next
t = self.match_next(next)
continue
else:
next = t & 3
m_pos = self.op - (1 + self.M2_MAX_OFFSET)
m_pos -= t >> 2
m_pos -= self.read_bytes(self.indata, self.ip, SIZEOF_CHAR) << 2
self.ip += 1
t = 3
elif t >= 64:
next = t & 3
m_pos = self.op - 1
m_pos -= (t >> 2) & 7
m_pos -= self.read_bytes(self.indata, self.ip, SIZEOF_CHAR) << 3
self.ip += 1
t = (t >> 5) - 1 + (3 - 1)
elif t >= 32:
t = (t & 31) + (3 - 1)
if t == 2:
ip_last = self.ip
while self.read_bytes(self.indata, self.ip, SIZEOF_CHAR) == 0:
self.ip += 1
self.NEED_IP(1)
offset = self.ip -ip_last
if offset > self.MAX_255_COUNT:
self.error = self.LZO_E_ERROR
return self.LZO_E_ERROR, self.ou_len
offset = (offset << 8) - offset
t = t + offset + 31 + self.read_bytes(self.indata, self.ip, SIZEOF_CHAR)
self.ip += 1
self.NEED_IP(2)
m_pos = self.op - 1
next = self.read_bytes(self.indata, self.ip, 2)
self.ip += 2
m_pos -= next >> 2
next &= 3
else:
self.NEED_IP(2)
next = self.read_bytes(self.indata, self.ip, 2)
if ((next & 0xfffc) == 0xfffc) and ((t & 0xf8) == 0x18) and bitstream_version:
self.NEED_IP(3)
t &= 7
t |= self.indata[self.ip + 2] << 3
t += self.MIN_ZERO_RUN_LENGTH
self.NEED_OP(t)
idx = 0
while idx < t:
self.oudata[self.op + idx] = 0
idx += 1
idx = 0
self.op += t
next &= 3
self.ip += 3
state = next
t = self.match_next(next)
continue
else:
m_pos = self.op
m_pos -= (t & 8) << 11
t = (t & 7) + (3 - 1)
if t == 2:
ip_last = self.ip
while self.read_bytes(self.indata, self.ip, SIZEOF_CHAR) == 0:
self.ip += 1
self.NEED_IP(1)
offset = self.ip - ip_last
if offset > self.MAX_255_COUNT:
self.error = self.LZO_E_ERROR
return self.LZO_E_ERROR, self.ou_len
offset = (offset << 8) - offset
t += offset + 7 + self.read_bytes(self.indata, self.ip, SIZEOF_CHAR)
self.ip += 1
self.NEED_IP(2)
next = self.read_bytes(self.indata, self.ip, 2)
self.ip += 2
m_pos -= next >> 2
next &= 3
if (m_pos == self.op):
self.eof_found(t)
m_pos -= 0x4000
self.TEST_LB(m_pos)
if self.CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS:
if (self.op - m_pos >= 8):
oe = self.op + t
if self.HAVE_OP(t + 15):
oe = self.op + t
while True:
self.COPY8(self.oudata, self.op, self.oudata, m_pos)
m_pos += 8
self.op += 8
self.COPY8(self.oudata, self.op, self.oudata, m_pos)
m_pos += 8
self.op += 8
if self.op >= oe:
break
self.op = oe
if self.HAVE_IP(6):
state = next
self.COPY4(self.oudata, self.op, self.indata, self.ip)
self.op += next
self.ip += next
continue
else:
self.NEED_OP(t)
offset = oe - self.op
self.COPY(self.oudata, self.op, self.oudata, m_pos, offset)
self.op += offset
m_pos += offset
else:
oe = self.op + t
self.NEED_OP(t)
self.oudata[self.op] = self.oudata[m_pos]
self.oudata[self.op+1] = self.oudata[m_pos+1]
self.op += 2
m_pos += 2
offset = oe - self.op
self.COPY(self.oudata, self.op, self.oudata, m_pos, offset)
self.op += offset
m_pos += offset
else:
oe = self.op + t
self.NEED_OP(t)
self.oudata[self.op] = self.oudata[m_pos]
self.oudata[self.op+1] = self.oudata[m_pos+1]
self.op += 2
m_pos += 2
offset = oe - self.op
self.COPY(self.oudata, self.op, self.oudata, m_pos, offset)
self.op += offset
m_pos += offset
state = next
t = self.match_next(next)
continue
self.eof_found(t)

View File

@@ -0,0 +1,55 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
#! /bin/bash
RPM_DIR="`pwd`/rpmbuild"
SOURCE_DIR="`pwd`"
RPMBUILDOPTS="-bb"
DIR="linux-ramdump-parser-v2"
VERSION="1.0"
TARBALL="${DIR}-${VERSION}.tar.gz"
# Setting Default ARCH as aarch64, can be overridden by cmdline arg "-a <Architecture>"
ARCH="aarch64"
while getopts a: arg
do
case "${arg}" in
a)
ARCH=${OPTARG}
;;
*)
echo "Usage: $0 [-a <Architecture>]"
exit 1
;;
esac
done
clean_rpmbuild()
{
rm -rf ${RPM_DIR}
}
clean_rpms()
{
rm -rf *.rpm
}
clean_rpmbuild
clean_rpms
mkdir -p ${RPM_DIR}/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
cd .. && git archive --prefix="${DIR}-${VERSION}"/ --format=tar HEAD:${DIR} | gzip -v > ${TARBALL} && cd ${DIR}
mv ../${TARBALL} ${RPM_DIR}/SOURCES
cp -f *.spec ${RPM_DIR}/SPECS
rpmbuild --define "_sourcedir ${RPM_DIR}/SOURCES" --define "_builddir ${RPM_DIR}/BUILD" --define "_srcrpmdir ${RPM_DIR}/SRPMS" --define "_rpmdir ${RPM_DIR}/RPMS" --define "_specdir ${RPM_DIR}/SPECS" --target ${ARCH} ${RPMBUILDOPTS} ${RPM_DIR}/SPECS/*.spec
if [ $? -eq 0 ]; then
cp -f ${RPM_DIR}/RPMS/${ARCH}/*.rpm .
clean_rpmbuild
echo
echo "------------------------------------------------------------"
echo "Generated RPM: `ls *.rpm`"
echo "------------------------------------------------------------"
echo
else
exit 1
fi

View File

@@ -0,0 +1,184 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
# Maple Tree implementation
# Copyright (c) 2018-2022 Oracle Corporation
# Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
# Matthew Wilcox <willy@infradead.org>
import ctypes
from enum import Enum
from print_out import print_out_str
class MapleTreeWalker(object):
def __init__(self, ram_dump, debug=False):
self.ram_dump = ram_dump
self.init_mtvars()
def init_mtvars(self):
self.ULONG_MAX = 0xFFFFFFFFFFFFFFFF
self.XA_ZERO_ENTRY = self.xa_mk_internal(257)
self.MAPLE_RESERVED_RANGE = 4096
self.MAPLE_NODE_SLOTS = 31
self.MAPLE_RANGE64_SLOTS = 16
self.MAPLE_ARANGE64_SLOTS = 10
self.MAPLE_NODE_MASK = 255
self.MAPLE_NODE_TYPE_SHIFT = 0x03
self.MAPLE_NODE_TYPE_MASK = 0x0F
maple_type_list = self.ram_dump.gdbmi.get_enum_lookup_table('maple_type', self.ram_dump.sizeof('enum maple_type'))
self.maple_type_enum = Enum('maple_type', maple_type_list, start=0)
self.mt_max = [None] * len(self.maple_type_enum)
self.mt_max[self.maple_type_enum.maple_dense.value] = self.MAPLE_NODE_SLOTS
self.mt_max[self.maple_type_enum.maple_leaf_64.value] = self.ULONG_MAX
self.mt_max[self.maple_type_enum.maple_range_64.value] = self.ULONG_MAX
self.mt_max[self.maple_type_enum.maple_arange_64.value] = self.ULONG_MAX
return
def xa_mk_internal(self, val):
return ((val << 2) | 2)
def get_unsigned64(self, signedval):
return ctypes.c_ulonglong(signedval).value
def xa_is_internal(self, entry):
return (entry & 3) == 2
def xa_is_node(self, entry):
return self.xa_is_internal(entry) and self.get_unsigned64(entry) > 4096
def xa_is_value(self, entry):
return (entry & 1)
def xa_to_value(self, entry):
return (entry >> 1)
def xa_is_zero(self, entry):
return (entry == self.XA_ZERO_ENTRY)
def xa_to_internal(self, entry):
return (entry >> 2)
def mt_is_reserved(self, entry):
return self.get_unsigned64(entry) > self.MAPLE_RESERVED_RANGE and self.xa_is_internal(entry)
def mte_node_type(self, entry):
return (entry >> self.MAPLE_NODE_TYPE_SHIFT) & self.MAPLE_NODE_TYPE_MASK
def mte_to_node(self, entry):
return (entry & ~self.MAPLE_NODE_MASK)
def ma_is_leaf(self, type):
return type < self.maple_type_enum.maple_range_64.value
def mte_is_leaf(self, entry):
return self.ma_is_leaf(self.mte_node_type(entry))
def mt_dump_range(self, min, max, depth):
return
def mt_dump_entry(self, entry, min, max, depth, func, *args):
self.mt_dump_range(min, max, depth)
if self.xa_is_value(entry):
return
elif self.xa_is_zero(entry):
return
elif self.mt_is_reserved(entry):
return
else:
pass
if func:
func(entry, *args)
return
def mt_dump_range64(self, mt, entry, min, max, depth, func, *args):
node = self.mte_to_node(entry)
node = self.ram_dump.struct_field_addr(node, 'struct maple_node', 'mr64')
leaf = self.mte_is_leaf(entry)
first = min
for i in range(0, self.MAPLE_RANGE64_SLOTS):
last = max
slot = self.ram_dump.read_structure_field(node, 'struct maple_range_64', 'slot[{}]'.format(i))
if i < (self.MAPLE_RANGE64_SLOTS - 1):
last = self.ram_dump.read_structure_field(node, 'struct maple_range_64', 'pivot[{}]'.format(i))
elif slot != 0 and max != self.mt_max[self.mte_node_type(entry)]:
break
if last == 0 and i > 0:
break
if leaf:
mt_slot = self.ram_dump.read_structure_field(node, 'struct maple_range_64', 'slot[{}]'.format(i))
self.mt_dump_entry(mt_slot, first, last, depth + 1, func, *args)
elif slot:
mt_slot = self.ram_dump.read_structure_field(node, 'struct maple_range_64', 'slot[{}]'.format(i))
self.mt_dump_node(mt, mt_slot, first, last, depth + 1, func, *args)
if last == max:
break
if self.get_unsigned64(last) > max:
print_out_str("\n node 0x{0:x} last ({1:d}) > max ({2:d}) at pivot {3:d}!".format(node, last, max, i))
break
first = last + 1
return
def mt_dump_arange64(self, mt, entry, min, max, depth, func, *args):
node = self.mte_to_node(entry)
node = self.ram_dump.struct_field_addr(node, 'struct maple_node', 'ma64')
leaf = self.mte_is_leaf(entry)
first = min
for i in range(0, self.MAPLE_ARANGE64_SLOTS):
last = max
slot = self.ram_dump.read_structure_field(node, 'struct maple_arange_64', 'slot[{}]'.format(i))
if i < (self.MAPLE_ARANGE64_SLOTS - 1):
last = self.ram_dump.read_structure_field(node, 'struct maple_arange_64', 'pivot[{}]'.format(i))
elif slot == 0:
break
if last == 0 and i > 0:
break
if leaf:
mt_slot = self.ram_dump.read_structure_field(node, 'struct maple_arange_64', 'slot[{}]'.format(i))
self.mt_dump_entry(mt_slot, first, last, depth + 1, func, *args)
elif slot:
mt_slot = self.ram_dump.read_structure_field(node, 'struct maple_arange_64', 'slot[{}]'.format(i))
self.mt_dump_node(mt, mt_slot, first, last, depth + 1, func, *args)
if last == max:
break
if self.get_unsigned64(last) > max:
print_out_str("\n node 0x{0:x} last ({1:d}) > max ({2:d}) at pivot {3:d}!".format(node, last, max, i))
break
first = last + 1
return
def mt_dump_node(self, mt, entry, min, max, depth, func, *args):
node = self.mte_to_node(entry)
type = self.mte_node_type(entry)
self.mt_dump_range(min, max, depth)
if type == self.maple_type_enum.maple_dense.value:
for i in range(0, self.MAPLE_NODE_SLOTS):
if min + i > max:
print_out_str("OUT OF RANGE: ")
mt_slot = self.ram_dump.read_structure_field(node, 'struct maple_node', 'slot[{}]'.format(i))
self.mt_dump_entry(mt_slot, min + i, min + i, depth, func, *args)
elif type == self.maple_type_enum.maple_leaf_64.value or type == self.maple_type_enum.maple_range_64.value:
self.mt_dump_range64(mt, entry, min, max, depth, func, *args)
elif type == self.maple_type_enum.maple_arange_64.value:
self.mt_dump_arange64(mt, entry, min, max, depth, func, *args)
else:
print_out_str("Unknown type entry({})".format(hex(entry)))
return
def walk(self, mt, func, *args):
entry = self.ram_dump.read_structure_field(mt, 'struct maple_tree', 'ma_root')
if not self.xa_is_node(entry):
self.mt_dump_entry(entry, 0, 0, 0, func, *args)
elif entry:
self.mt_dump_node(mt, entry, 0, self.mt_max[self.mte_node_type(entry)], 0, func, *args)
return

View File

@@ -0,0 +1,189 @@
# Copyright (c) 2012-2017, 2020 The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import re
import os
import struct
from print_out import print_out_str
from fnmatch import fnmatch
def minidump_virt_to_phys(ebi_files,addr):
if addr is None:
return None
pa_addr = None
for a in ebi_files:
idx, pa, end_addr, va,size = a
if addr >= va and addr <= va + size:
offset = addr - va
pa_addr = pa + offset
return pa_addr
return pa_addr
def read_physical_minidump(ebi_files,ebi_files_ramfile,elffile,addr,length):
ebi = [-1, -1, -1, -1, -1]
for a in ebi_files:
idx, start, end, va, size = a
if addr >= start and addr <= end:
ebi = a
break
if ebi[0] != -1:
idx = ebi[0]
textSec = elffile.get_segment(idx)
off = addr - ebi[1]
endoff = off + length
if endoff > ebi[4]:
endoff = ebi[4]
textSec.stream.seek(textSec['p_offset'] + off)
return textSec.stream.read(endoff - off)
else:
ebi = (-1, -1, -1)
for a in ebi_files_ramfile:
fd, start, end, path = a
if addr >= start and addr <= end:
ebi = a
break
if ebi[0] == -1:
return None
offset = addr - ebi[1]
ebi[0].seek(offset)
a = ebi[0].read(length)
return a
def add_file(fo, outdir, path):
try:
path = os.path.join(outdir, path)
fi = open(path, "rb")
except:
print_out_str("Not able to open file %s" % (path))
return -1
while True:
buf = fi.read(4096)
if len(buf) == 0:
break
fo.write(buf)
fi.close()
return 0
def get_strings(buf, length):
offset = 0
string = ""
nlist = []
begin = False
str_tbl = ""
while offset < length:
str_tbl = ""
if not begin:
(ch,) = struct.unpack_from("<B", buf, offset)
str_tbl += chr(ch)
(ch,) = struct.unpack_from("<B", buf, offset+1)
str_tbl += chr(ch)
(ch,) = struct.unpack_from("<B", buf, offset+2)
str_tbl += chr(ch)
(ch,) = struct.unpack_from("<B", buf, offset+3)
str_tbl += chr(ch)
(ch,) = struct.unpack_from("<B", buf, offset+4)
str_tbl += chr(ch)
(ch,) = struct.unpack_from("<B", buf, offset+5)
str_tbl += chr(ch)
(ch,) = struct.unpack_from("<B", buf, offset+6)
str_tbl += chr(ch)
if str_tbl == "STR_TBL":
begin = True
offset += 6
else:
offset += 1
continue
(ch,) = struct.unpack_from("<B", buf, offset)
offset += 1
if ch >= 0x30 and ch < 0x80:
string += chr(ch)
elif (string != "" and len(string) >= 3 and
string != 'linux_banner' and string != 'minidump_table'):
nlist.append(string)
string = ""
else:
string = ""
if ch == 0:
(ch1,) = struct.unpack_from("<B", buf, offset+1)
if ch1 == 0:
(ch2,) = struct.unpack_from("<B", buf, offset+2)
if ch2 == 0:
return nlist
return nlist
def generate_elf(autodump, outdir, vm, kernel_version):
if "autogvm" in vm:
if kernel_version >= (5, 15, 0):
vmid = str(vm.split("-")[-1]) + "_"
else:
vmid = ""
elif vm == "oemvm":
vmid = "31_"
elif vm == "autoghgvm":
vmid = "34_"
elif vm:
vmid = "2d_"
else:
vmid = ""
elfhd_old = os.path.join(autodump, "md_" + vmid + "KELF_HEADER.BIN")
elfhd_new = os.path.join(autodump, "md_" + vmid + "KELF_HDR.BIN")
if os.path.exists(elfhd_old):
elfhd = elfhd_old
fi = open(elfhd, "rb")
elif os.path.exists(elfhd_new):
elfhd = elfhd_new
fi = open(elfhd, "rb")
else:
print_out_str("ELF header binary is missing")
return 1
outfile = os.path.join(outdir, "ap_minidump.elf")
fo = open(outfile, "wb")
hsize = os.path.getsize(elfhd)
print_out_str("ELF header size %d" % hsize)
buf = fi.read(hsize)
fo.write(buf)
nlist = get_strings(buf, len(buf))
files = os.listdir(autodump)
for names in nlist:
if vm:
for file in files:
if "autogvm" in vm:
if re.match(r'^[0-9]_', names) is None:
filepath = "md_" + vmid + names + ".BIN"
is_found = fnmatch(file, "md_" + vmid + names + "*.BIN")
else:
filepath = "md_" + names + ".BIN"
is_found = fnmatch(file, "md_" + names + "*.BIN")
else:
filepath = "md_" + vmid + names + ".BIN"
is_found = fnmatch(file, "md_" + vmid + names + "*.BIN")
if is_found:
break;
if not is_found:
return 1
ret = add_file(fo, autodump, file)
else:
filepath = "md_" + names + ".BIN"
ret = add_file(fo, autodump, filepath)
if ret == -1:
fo.close()
fi.close()
return 1
fi.close()
fo.close()
return 0

View File

@@ -0,0 +1,521 @@
# Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import bitops
from ctypes import *
import math
def page_buddy(ramdump, page):
if ramdump.kernel_version >= (4, 19, 0):
# Check if the PG_buddy bit is unset in the page->page_type field
# Initial value of page_type is -1, hence cleared bit indicates type
page_type = ramdump.read_structure_field(page, 'struct page', 'page_type')
return (page_type & 0xf0000080) == 0xf0000000
else:
mapcount_offset = ramdump.field_offset('struct page', '_mapcount')
val = ramdump.read_int(page + mapcount_offset)
# -128 is the magic for in the buddy allocator
return val == 0xffffff80
def page_count(ramdump, page):
"""Commit: 0139aa7b7fa12ceef095d99dc36606a5b10ab83a
mm: rename _count, field of the struct page, to _refcount"""
if (ramdump.kernel_version < (4, 6, 0)):
count = ramdump.read_structure_field(page, 'struct page',
'_count.counter')
else:
count = ramdump.read_structure_field(page, 'struct page',
'_refcount.counter')
return count
def page_ref_count(ramdump, page):
return page_count(ramdump, page)
def get_debug_flags(ramdump, page):
debug_flag_offset = ramdump.field_offset('struct page', 'debug_flags')
flagval = ramdump.read_word(page + debug_flag_offset)
return flagval
def page_zonenum(page_flags):
# save this in a variable somewhere...
return (page_flags >> 26) & 3
def page_to_nid(page_flags):
return 0
def page_zone(ramdump, page):
contig_page_data = ramdump.address_of('contig_page_data')
node_zones_offset = ramdump.field_offset(
'struct pglist_data', 'node_zones')
page_flags_offset = ramdump.field_offset('struct page', 'flags')
zone_size = ramdump.sizeof('struct zone')
page_flags = ramdump.read_word(page + page_flags_offset)
if page_flags is None:
return None
zone = contig_page_data + node_zones_offset + \
(page_zonenum(page_flags) * zone_size)
return zone
def zone_is_highmem(ramdump, zone):
if not ramdump.is_config_defined('CONFIG_HIGHMEM'):
return False
if zone is None:
return False
# not at all how linux does it but it works for our purposes...
zone_name_offset = ramdump.field_offset('struct zone', 'name')
zone_name_addr = ramdump.read_word(zone + zone_name_offset)
if zone_name_addr is None:
return False
zone_name = ramdump.read_cstring(zone_name_addr, 48)
if zone_name is None:
# XXX do something?
return False
if zone_name == 'HighMem':
return True
else:
return False
def hash32(val, bits):
chash = c_uint(val * 0x9e370001).value
return chash >> (32 - bits)
def page_slot(ramdump, page):
hashed = hash32(page, 7)
htable = ramdump.address_of('page_address_htable')
htable_size = ramdump.sizeof('page_address_htable[0]')
return htable + htable_size * hashed
def page_to_section(page_flags):
# again savefn8n variable
return (page_flags >> 28) & 0xF
def section_mem_map_addr(ramdump, section):
map_offset = ramdump.field_offset('struct mem_section', 'section_mem_map')
result = ramdump.read_word(section + map_offset)
return result & ~((1 << 2) - 1)
def pfn_to_section(ramdump, pfn):
return ramdump.mm.sparsemem.pfn_to_section(pfn)
def pfn_to_page_sparse(ramdump, pfn):
sec = pfn_to_section(ramdump, pfn)
sizeof_page = ramdump.sizeof('struct page')
return section_mem_map_addr(ramdump, sec) + pfn * sizeof_page
def page_to_pfn_sparse(ramdump, page):
page_flags_offset = ramdump.field_offset('struct page', 'flags')
sizeof_page = ramdump.sizeof('struct page')
flags = ramdump.read_word(page + page_flags_offset)
if flags is None:
return 0
section = page_to_section(flags)
nr = nr_to_section(ramdump, section)
addr = section_mem_map_addr(ramdump, nr)
# divide by struct page size for division fun
return (page - addr) // sizeof_page
def get_vmemmap(ramdump):
if ramdump.vmemmap is not None:
return ramdump.vmemmap
# See: include/asm-generic/pgtable-nopud.h,
# arch/arm64/include/asm/pgtable-hwdef.h,
# arch/arm64/include/asm/pgtable.h
# kernel/arch/arm64/include/asm/memory.h
nlevels = ramdump.pgtable_levels
page_shift = ramdump.page_shift
va_bits = ramdump.va_bits
pgdir_shift = ramdump.mmu.pgdir_shift
pud_shift = pgdir_shift
pud_size = 1 << pud_shift
spsize = ramdump.sizeof('struct page')
vmemmap_size = bitops.align((1 << (va_bits - page_shift)) * spsize,
pud_size)
memstart_addr = ramdump.read_s64('memstart_addr')
page_section_mask = ~((1 << 18) - 1)
memstart_offset = (memstart_addr >> page_shift) & page_section_mask
memstart_offset *= spsize
if (ramdump.kernel_version < (3, 18, 31)):
# vmalloc_end = 0xFFFFFFBC00000000
vmemmap = ramdump.page_offset - pud_size - vmemmap_size
elif (ramdump.kernel_version < (4, 9, 0)):
# for version >= 3.18.31,
# vmemmap is shifted to base addr (0x80000000) pfn.
vmemmap = (ramdump.page_offset - pud_size - vmemmap_size -
memstart_offset)
elif ramdump.kernel_version >= (5, 15):
struct_page_max_shift = int(math.log2(spsize))
vmemmap_shift = page_shift - struct_page_max_shift
vmemstart = -(1 << (va_bits - vmemmap_shift)) % (1 << 64)
vmemmap = vmemstart - (memstart_addr >> page_shift)*spsize
elif ramdump.kernel_version >= (5, 10):
struct_page_max_shift = int(math.log2(spsize))
SZ_2M = 0x00200000
page_end = -(1 << (va_bits - 1)) % (1 << 64)
vmemsize = ((page_end - ramdump.page_offset) >> (page_shift - struct_page_max_shift))
vmemstart = ((-vmemsize) % (1 << 64)) - SZ_2M
vmemmap = vmemstart - (memstart_addr >> page_shift)*spsize
elif ramdump.kernel_version >= (5, 4, 0):
vmemmap = ramdump.read_u64('vmemmap')
else:
# for version >= 4.9.0,
# vmemmap_size = ( 1 << (39 - 12 - 1 + 6))
struct_page_max_shift = int(math.log2(spsize))
vmemmap_size = ( 1 << (va_bits - page_shift - 1 + struct_page_max_shift))
vmemmap = ramdump.page_offset - vmemmap_size - memstart_offset
ramdump.vmemmap = vmemmap
return vmemmap
def page_to_pfn_vmemmap(ramdump, page, vmemmap=None):
if vmemmap is None:
vmemmap = get_vmemmap(ramdump)
page_size = ramdump.sizeof('struct page')
return ((page - vmemmap) // page_size)
def pfn_to_page_vmemmap(ramdump, pfn, vmemmap=None):
if vmemmap is None:
vmemmap = get_vmemmap(ramdump)
page_size = ramdump.sizeof('struct page')
return vmemmap + (pfn * page_size)
def page_to_pfn_flat(ramdump, page):
mem_map_addr = ramdump.address_of('mem_map')
mem_map = ramdump.read_word(mem_map_addr)
page_size = ramdump.sizeof('struct page')
# XXX Needs to change for LPAE
pfn_offset = ramdump.phys_offset >> ramdump.page_shift
return ((page - mem_map) // page_size) + pfn_offset
def pfn_to_page_flat(ramdump, pfn):
mem_map_addr = ramdump.address_of('mem_map')
mem_map = ramdump.read_word(mem_map_addr)
page_size = ramdump.sizeof('struct page')
# XXX Needs to change for LPAE
pfn_offset = ramdump.phys_offset >> ramdump.page_shift
return mem_map + ((pfn - pfn_offset) * page_size)
def page_to_pfn(ramdump, page, vmemmap=None):
if ramdump.arm64:
return page_to_pfn_vmemmap(ramdump, page, vmemmap)
if ramdump.is_config_defined('CONFIG_SPARSEMEM'):
return page_to_pfn_sparse(ramdump, page)
else:
return page_to_pfn_flat(ramdump, page)
def pfn_to_page(ramdump, pfn, vmemmap=None):
if ramdump.arm64:
return pfn_to_page_vmemmap(ramdump, pfn, vmemmap)
if ramdump.is_config_defined('CONFIG_SPARSEMEM'):
return pfn_to_page_sparse(ramdump, pfn)
else:
return pfn_to_page_flat(ramdump, pfn)
def sparsemem_lowmem_page_address(ramdump, page):
membank1_start = ramdump.read_word(ramdump.address_of('membank1_start'))
membank0_size = ramdump.read_word(ramdump.address_of('membank0_size'))
# XXX currently magic
membank0_phys_offset = ramdump.phys_offset
membank0_page_offset = ramdump.page_offset
membank1_phys_offset = membank1_start
membank1_page_offset = membank0_page_offset + membank0_size
phys = page_to_pfn(ramdump, page) << ramdump.page_shift
if phys >= membank1_start:
return phys - membank1_phys_offset + membank1_page_offset
else:
return phys - membank0_phys_offset + membank0_page_offset
def dont_map_hole_lowmem_page_address(ramdump, page):
phys = page_to_pfn(ramdump, page) << ramdump.page_shift
hole_end_addr = ramdump.address_of('memory_hole_end')
if hole_end_addr is None:
hole_end_addr = ramdump.address_of('membank1_start')
hole_offset_addr = ramdump.address_of('memory_hole_offset')
if hole_offset_addr is None:
hole_offset_addr = ramdump.address_of('membank0_size')
hole_end = ramdump.read_word(hole_end_addr)
hole_offset = ramdump.read_word(hole_offset_addr)
if hole_end != 0 and phys >= hole_end:
return phys - hole_end + hole_offset + ramdump.page_offset
else:
return phys - ramdump.phys_offset + ramdump.page_offset
def normal_lowmem_page_address(ramdump, page, vmemmap=None):
phys = page_to_pfn(ramdump, page, vmemmap) << ramdump.page_shift
if ramdump.arm64:
if ramdump.kernel_version >= (5, 10):
memstart_addr = ramdump.read_s64('memstart_addr')
return phys - memstart_addr + ramdump.page_offset
if ramdump.kernel_version >= (5, 4, 0):
phys_addr = phys - ramdump.read_s64('physvirt_offset')
if phys_addr < 0:
phys_addr = phys_addr + (1 << 64)
return phys_addr
else:
memstart_addr = ramdump.read_s64('memstart_addr')
return phys - memstart_addr + ramdump.page_offset
else:
return phys - ramdump.phys_offset + ramdump.page_offset
def lowmem_page_address(ramdump, page, vmemmap=None):
if ramdump.is_config_defined('CONFIG_SPARSEMEM') and not ramdump.arm64:
return sparsemem_lowmem_page_address(ramdump, page)
elif ramdump.is_config_defined('CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0'):
return dont_map_hole_lowmem_page_address(ramdump, page)
else:
return normal_lowmem_page_address(ramdump, page, vmemmap)
def page_address(ramdump, page, vmemmap=None):
global lh_offset, pam_page_offset, pam_virtual_offset
if not zone_is_highmem(ramdump, page_zone(ramdump, page)):
return lowmem_page_address(ramdump, page, vmemmap)
pas = page_slot(ramdump, page)
lh_offset = ramdump.field_offset('struct page_address_slot', 'lh')
start = pas + lh_offset
pam = start
while True:
pam = pam - lh_offset
pam_page_offset = ramdump.field_offset(
'struct page_address_map', 'page')
pam_virtual_offset = ramdump.field_offset(
'struct page_address_map', 'virtual')
pam_page = ramdump.read_word(pam + pam_page_offset)
if pam_page == page:
ret = ramdump.read_word(pam + pam_virtual_offset)
return ret
pam = ramdump.read_word(pam + lh_offset)
if pam == start:
return None
def phys_to_virt(ramdump, phys):
if not ramdump.arm64:
return phys - ramdump.phys_offset + ramdump.page_offset
if ramdump.kernel_version >= (5, 10, 0):
memstart_addr = ramdump.read_s64('memstart_addr')
val = (phys - memstart_addr) | ramdump.page_offset
return val
elif ramdump.kernel_version >= (5, 4, 0):
#as the page_offset value got changed in 5.4 kernel. It is upstream change to support 52 bit
return phys - ramdump.read_s64('physvirt_offset ')
if ramdump.kernel_version < (4, 4, 0):
return None
memstart_addr = ramdump.read_s64('memstart_addr')
val = (phys - memstart_addr) | ramdump.page_offset
return val
def is_zram_page(ramdump, page):
try:
mapping = ramdump.read_structure_field(page, 'struct page', 'mapping')
if mapping & 0x2:
# PAGE_MAPPING_MOVABLE bit is set (ZRAM Page)
return True
except Exception as err:
print(err)
return False
def for_each_pfn(ramdump):
""" creates a generator for looping through valid pfn
Example:
for i in for_each_pfn(ramdump):
page = pfn_to_page(i)
"""
page_size = ramdump.get_page_size()
cnt = ramdump.read_structure_field('memblock', 'struct memblock',
'memory.cnt')
region = ramdump.read_structure_field('memblock', 'struct memblock',
'memory.regions')
memblock_region_size = ramdump.sizeof('struct memblock_region')
for i in range(cnt):
start = ramdump.read_structure_field(region, 'struct memblock_region',
'base')
end = start + ramdump.read_structure_field(
region, 'struct memblock_region', 'size')
pfn = start // page_size
end //= page_size
while pfn < end:
yield pfn
pfn += 1
region += memblock_region_size
"""
All fields should be declared and documented in constructor.
"""
class MemoryManagementSubsystem:
def __init__(self, ramdump):
self.rd = ramdump
self.SECTION_SIZE_BITS = 0
def lookup_page_ext(self, pfn):
if not self.rd.is_config_defined('CONFIG_PAGE_EXTENSION'):
return None
if not self.rd.is_config_defined('CONFIG_SPARSEMEM'):
contig_page_data = self.rd.address_of('contig_page_data')
offset = self.rd.field_offset('struct pglist_data', 'node_page_ext')
page_ext = self.rd.read_word(contig_page_data + offset)
else:
mem_section = pfn_to_section(self.rd, pfn)
offset = self.rd.field_offset('struct mem_section', 'page_ext')
page_ext = self.rd.read_word(mem_section + offset)
return page_ext
class Sparsemem:
def __init__(self, ramdump):
self.rd = ramdump
""" Cache section_nr to mem_section_ptr mapping for fast lookup """
self.memsection_cache = dict()
def pfn_to_section(self, pfn):
ramdump = self.rd
section_nr = pfn >> (self.rd.mm.SECTION_SIZE_BITS - self.rd.page_shift)
if section_nr in self.memsection_cache:
return self.memsection_cache[section_nr]
memsection_struct_size = ramdump.sizeof('struct mem_section')
pointer_size = ramdump.sizeof('struct mem_section *')
if ramdump.is_config_defined('CONFIG_SPARSEMEM_EXTREME'):
sections_per_root = ramdump.get_page_size() // memsection_struct_size
else:
sections_per_root = 1
root_nr = section_nr // sections_per_root
section_nr = section_nr % sections_per_root
if ramdump.is_config_defined('CONFIG_SPARSEMEM_EXTREME') and \
ramdump.kernel_version >= (4, 14):
#struct mem_section **mem_section
mem_section_base = ramdump.read_word('mem_section')
offset = pointer_size * root_nr
ptr = ramdump.read_word(mem_section_base + offset)
offset = memsection_struct_size * section_nr
mem_section_ptr = ptr + offset
elif ramdump.is_config_defined('CONFIG_SPARSEMEM_EXTREME') and \
ramdump.kernel_version < (4, 14):
#struct mem_section *mem_section[NR_SECTION_ROOTS]
mem_section_base = ramdump.address_of('mem_section')
offset = pointer_size * root_nr
ptr = ramdump.read_word(mem_section_base + offset)
offset = memsection_struct_size * section_nr
mem_section_ptr = ptr + offset
else:
#struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
mem_section_base = ramdump.address_of('mem_section')
offset = memsection_struct_size * (section_nr + root_nr * sections_per_root)
mem_section_ptr = mem_section_base + offset
self.memsection_cache[section_nr + root_nr * sections_per_root] = mem_section_ptr
return mem_section_ptr
def sparse_init(mm):
mm.sparsemem = None
if not mm.rd.is_config_defined('CONFIG_SPARSEMEM'):
return True
mm.sparsemem = Sparsemem(mm.rd)
return True
def section_size_init(mm):
SECTION_SIZE_BITS = 0
ramdump = mm.rd
if ramdump.arm64:
if ramdump.kernel_version >= (5, 10, 19):
""" Modified by upstream """
if (ramdump.is_config_defined('CONFIG_ARM64_4K_PAGES') \
or ramdump.is_config_defined('CONFIG_ARM64_16K_PAGES')) \
and not ramdump.is_config_defined('CONFIG_MEMORY_HOTPLUG'):
SECTION_SIZE_BITS = 30
elif ramdump.is_config_defined('CONFIG_ARM64_4K_PAGES') \
or ramdump.is_config_defined('CONFIG_ARM64_16K_PAGES'):
SECTION_SIZE_BITS = 27
else:
SECTION_SIZE_BITS = 29
else:
""" CONFIG_HOTPLUG_SIZE_BITS added on 4.4 by us """
if not ramdump.is_config_defined('CONFIG_MEMORY_HOTPLUG'):
SECTION_SIZE_BITS = 30
else:
SECTION_SIZE_BITS = int(ramdump.get_config_val("CONFIG_HOTPLUG_SIZE_BITS"))
else:
SECTION_SIZE_BITS = 28
mm.SECTION_SIZE_BITS = SECTION_SIZE_BITS
return True
"""
Invoked functions should return True/False on success/Failure
"""
def mm_init(ramdump):
mm = MemoryManagementSubsystem(ramdump)
if not section_size_init(mm):
return False
if not sparse_init(mm):
return False
ramdump.mm = mm
return True
def get_pfn_range(ramdump):
if ramdump.pfn_range is None:
ramdump.pfn_range = {}
memblock = ramdump.read_datatype('memblock')
cnt = memblock.memory.cnt - 1
regions = memblock.memory.regions
first_region = ramdump.read_datatype(regions, 'struct memblock_region')
if cnt > 0:
region_addr = regions + (cnt * ramdump.sizeof('struct memblock_region'))
last_region = ramdump.read_datatype(region_addr, 'struct memblock_region')
else:
last_region = first_region
ramdump.pfn_range['min'] = first_region.base >> ramdump.page_shift
ramdump.pfn_range['max'] = (last_region.base + last_region.size) >> ramdump.page_shift
return ramdump.pfn_range

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,64 @@
# Copyright (c) 2017, 2019, 2020, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
from print_out import print_out_str
class module_table_entry():
def __init__(self):
self.name = ''
self.module_offset = 0
self.sym_lookup_table = []
self.sym_path = ''
self.kallsyms_addr = 0
self.kallsyms_table = []
self.section_offsets = {}
def num_symbols(self):
return len(self.sym_lookup_table)
def set_sym_path(self, sym_path):
if os.path.isfile(sym_path):
self.sym_path = sym_path
return True
else:
print_out_str('sym_path: ' + sym_path + ' not valid or file doesn\'t exist')
return False
def get_sym_path(self):
return self.sym_path
class module_table_class():
def __init__(self):
self.module_table = []
self.sym_path_list = []
def add_entry(self, new_entry):
self.module_table.append(new_entry)
def num_modules(self):
return len(self.module_table)
def add_sym_path(self, sym_path):
if sym_path is None:
print_out_str('sym_path: not specified!')
return False
elif not os.path.exists(sym_path):
print_out_str('sym_path: ' + sym_path + ' not valid or directory doesn\'t exist')
return False
else:
self.sym_path_list.append(sym_path)
return True
def sym_paths_exist(self):
return len(self.sym_path_list) > 0

View File

@@ -0,0 +1,255 @@
# Copyright (c) 2013-2015, 2020 The Linux Foundation. All rights reserved.
# Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from print_out import print_out_str
import os
import platform
import glob
import re
import string
import sys
import time
_parsers = []
class ParserConfig(object):
"""Class to encapsulate a RamParser its desired setup (command-line
options, etc)."""
def __init__(self, cls, longopt, desc, shortopt, optional):
self.cls = cls
self.longopt = longopt
self.desc = desc
self.shortopt = shortopt
self.optional = optional
def cleanupString(unclean_str):
if unclean_str is None:
return unclean_str
else:
return ''.join([c for c in unclean_str if c in string.printable])
def import_all_by_path(path):
"""Imports everyone under the given directory. It is expected that
the modules under the given directory will register themselves with
one of the decorators below.
Note that the import is effectively a noop if the module has already
been imported, so there's no harm in calling with the same path
multiple times"""
dir = os.path.join(os.path.dirname(__file__), path)
if not os.path.isdir(dir):
return
package = path.replace(os.sep, '.')
for f in sorted(glob.glob(os.path.join(dir, '*.py'))):
modname_ext = os.path.basename(f)
if modname_ext == '__init__.py':
continue
modname = os.path.splitext(modname_ext)[0]
__import__(package + '.' + modname)
def register_parser(longopt, desc, shortopt=None, optional=False):
"""Decorator for registering a parser class.
The class being decorated should inherit from the ``RamParser``
class. By using this decorator your parser will automatically be hooked
up to the command-line parsing code.
This makes it very easy and clean to add a new parser:
1. Drop a new file in the ``parsers/`` directory that defines a
class that inherits from ``RamParser``
2. Decorate your class with ``@register_parser``
3. Define a ``parse`` method for your class
All of the command line argument handling and invoking the parse
method of your parser will then be handled automatically.
Example::
# file: parsers/my_banner.py
@register_parser('--banner', 'Print the kernel banner')
class BannerParser(RamParser):
def parse(self):
print(self.ramdump.read_cstring('linux_banner', 256, False))
:param longopt: The longopt command line switch for this parser
:param desc: A short description of the parser (also shown in the
help-text associated with the longopt)
:param shortopt: The shortopt command line switch for this parser.
This should only be used for maintaining backwards compatibility
with legacy parsers. Otherwise shortopts are reserved for core
parser options.
:param optional: Indicates the parser is optional and should not be run
with ``--everything``
"""
def wrapper(cls):
if cls in [p.cls for p in _parsers]:
raise Exception(cls + ' is already registered!')
_parsers.append(ParserConfig(cls, longopt, desc, shortopt, optional))
return cls
return wrapper
def get_parsers():
"""Imports everyone under the ``parsers`` directory. It is expected that
the parsers under the parsers directory will be a collection of
classes that subclass RamParser and use the register_parser
decorator to register themselves with the parser
framework. Therefore, importing all the modules under ``parsers``
should have the side-effect of populating the (internal to
parser_util) _parsers list with the discovered parsers.
Returns the list of ParserConfig instances built as a side-effect
of the importing.
"""
import_all_by_path('parsers')
import_all_by_path(os.path.join('extensions','parsers'))
return _parsers
def time_cost(func):
"""Print the time cost of the decorated function"""
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter()
value = func(*args, **kwargs)
end_time = time.perf_counter()
print_out_str(f"Finished {func.__name__!r} in {(end_time - start_time):.2f} secs")
return value
return wrapper_timer
class RamParser(object):
"""Base class for implementing ramdump parsers. New parsers should inherit
from this class and define a ``parse`` method.
Interesting properties that will be set for usage in derived
classes:
- ramdump:: The RamDump instance being parsed
"""
def __init__(self, ramdump):
self.ramdump = ramdump
def parse(self):
raise NotImplementedError
def parse_param(self):
'''
This function provide an interface to pass a parameter to sub-parser
eg:
--coredump pid=1 log_level=DEBUG then coredump parser could get pid and log_level by user set
--mounts pid=1 dump mount info of process with pid = 1
--mounts proc=surfaceflinger dump mount info of surfaceflinger
'''
param = {}
for arg in sys.argv:
if "=" in arg:
key, val = arg.split('=')
param[key] = val
return param
def which(program):
"""Just like which(1).
Searches the PATH environment variable for a directory containing
program.
"""
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if os.access(exe_file, os.X_OK):
return exe_file
return None
def get_system_type():
"""Returns a "normalized" version of platform.system (transforming CYGWIN
to Windows, for example).
Returns None if not a supported platform.
"""
plat = platform.system()
if plat == 'Windows':
return 'Windows'
if re.search('CYGWIN', plat) is not None:
# On certain installs, the default windows shell
# runs cygwin. Treat cygwin as windows for this
# purpose
return 'Windows'
if plat == 'Linux':
return 'Linux'
if plat == 'Darwin':
return 'Darwin'
def _get_printable(n, fillchar='.'):
if n is None:
return
c = chr(n)
if c in string.printable[:string.printable.index(' ') + 1]:
return c
return fillchar
def _xxd_line(addr, data):
printable = [_get_printable(d) for d in data]
data = ['{:02x}'.format(d) for d in data]
printable += [' '] * (16 - len(printable))
data += [' '] * (16 - len(data))
return "{:08x}: {:}{:} {:}{:} {:}{:} {:}{:} {:}{:} {:}{:} {:}{:} {:}{:} {:}{:}{:}{:}{:}{:}{:}{:}{:}{:}{:}{:}{:}{:}{:}{:}\n".format(
addr, *(data + printable)
)
def xxd(address, data, file_object=None):
"""Dumps data to ``file_object`` or stdout, in the format of ``xxd``. data
should be a list of integers.
>>> xxd(0x1000, [0xde, 0xad, 0xbe, 0xef, 112, 105, 122, 122, 97, 0, 0, 42, 43, 44, 45, 90])
00001000: dead beef 7069 7a7a 6100 002a 2b2c 2d5a ....pizza..*+,-Z
>>> import StringIO
>>> sio = StringIO.StringIO()
>>> xxd(0x400, range(45, 76), sio)
>>> print(sio.getvalue().strip())
00000400: 2d2e 2f30 3132 3334 3536 3738 393a 3b3c -./0123456789:;<
00000410: 3d3e 3f40 4142 4344 4546 4748 494a 4b =>?@ABCDEFGHIJK
"""
f = file_object or sys.stdout
bb = []
n = 0
for i in data:
bb.append(i)
if n == 15:
f.write(_xxd_line(address, bb))
bb = []
n = 0
address += 16
else:
n += 1
if len(bb):
f.write(_xxd_line(address, bb))
if __name__ == "__main__":
import doctest
doctest.testmod()

View File

@@ -0,0 +1,92 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
import os
from parser_util import register_parser, RamParser
from dmesglib import DmesgLib
from print_out import print_out_str
import linux_list as llist
import parsers.linux_devices as ldevices
from struct_print import struct_print_class
def ARM_SMMU_GR0_S2CR(n):
return (0xc00 + ((n) << 2))
@register_parser('--arm_smmu_device', 'arm_smmu_device')
class arm_smmu_device(RamParser):
def get_arm_smmu_device_info(self):
ramdump = self.ramdump
f_path = os.path.join(self.ramdump.outdir, "arm_smmu_devices.txt")
fout = open(f_path, "w")
arm_smmu_devices_sid = os.path.join(self.ramdump.outdir, "arm_smmu_devices_sid.txt")
arm_smmu_devices_sid_fout = open(arm_smmu_devices_sid, "w")
device = ldevices.DevicesList(self.ramdump)
device_lists = device.get_device_list(None)
for item in device_lists:
name = item[1]
device = item[0]
if name == None:
continue
if 'apps-smmu' in name:
drvdata = self.ramdump.read_structure_field(device, 'struct device', 'driver_data')
if drvdata != 0:
try:
print ("v.v (struct device)0x%x %-64s %-32s 0x%-32x 0x%x" % (item[0], item[1], item[2], item[3], item[4]), file = fout)
print (" v.v (struct arm_smmu_device *)0x%x " % (drvdata), file = fout)
arm_smmu_device = drvdata
num_mapping_groups = self.ramdump.read_structure_field(arm_smmu_device, 'struct arm_smmu_device', 'num_mapping_groups')
print("v.v (struct arm_smmu_device *)0x%x num_mapping_groups %d"%(arm_smmu_device, num_mapping_groups), file = fout)
arm_smmu_device_datatype = ramdump.read_datatype(arm_smmu_device, 'struct arm_smmu_device')
s2crs = arm_smmu_device_datatype.s2crs
#s2crs = ramdump.read_structure_field(arm_smmu_device, 'struct arm_smmu_device', 's2crs')
ptr_size = ramdump.sizeof('struct arm_smmu_s2cr')
smrs = arm_smmu_device_datatype.smrs
ptr_size_smrs = ramdump.sizeof('struct arm_smmu_smr')
except Exception as e: print_out_str(str(e))
for i in range(0, num_mapping_groups):
try:
s2crs += i * ptr_size
s2crs_data = struct_print_class(ramdump, 'arm_smmu_s2cr', s2crs, fout)
s2crs_data.append('group', 'ptr')
s2crs_data.append('count', 'u32')
s2crs_data.append('type', 'u32')
s2crs_data.append('privcfg', 'u32')
s2crs_data.append('cbndx', 'u8')
s2crs_data.append('pinned', 'u8')
s2crs_data.process()
cbndx = s2crs_data.get_val('cbndx')
reg_addr = ARM_SMMU_GR0_S2CR(cbndx)
print("=================================================================0x%x index %x"%(reg_addr, i), file = fout)
print("----------------------------- %x"%(s2crs), file = fout)
s2crs_data.print_struct()
smrs += i * ptr_size_smrs
print("----------------------------- %x"%(smrs), file = fout)
smrs_data = struct_print_class(ramdump, 'arm_smmu_smr', smrs, fout)
smrs_data.append('mask', 'u16')
smrs_data.append('id', 'u16')
smrs_data.append('valid', 'u8')
smrs_data.append('pinned', 'u8')
smrs_data.process()
smrs_data.print_struct()
except Exception as e: print_out_str(e)
iommu = self.ramdump.read_structure_field(device, 'struct device', 'iommu')
ids = 0
ARM_SMMU_SMR_ID = 0xaa
ARM_SMMU_SMR_MASK = 0xaa
try:
if iommu != None and iommu != 0:
fwspec = self.ramdump.read_structure_field(iommu, 'struct dev_iommu', ' fwspec')
if fwspec != None and fwspec != 0:
ids_offset = self.ramdump.field_offset('struct iommu_fwspec', 'ids')
ids = self.ramdump.read_u32(ids_offset + fwspec)
ARM_SMMU_SMR_ID = ids & 0xffff
ARM_SMMU_SMR_MASK = (ids >> 16) & 0xffff
print("name = %-64s SID = 0x%-8x" %(name, ARM_SMMU_SMR_ID), file = arm_smmu_devices_sid_fout)
except Exception as e: print_out_str(str(e))
arm_smmu_devices_sid_fout.close()
fout.close()
def parse(self):
self.get_arm_smmu_device_info()

View File

@@ -0,0 +1,231 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
import linux_list
import rb_tree
from print_out import print_out_str
from parser_util import register_parser, RamParser, cleanupString
@register_parser('--binder-dump', 'Extract binder information in binder driver', optional=True)
class BinderParser(RamParser) :
def __init__(self, *args):
super(BinderParser, self).__init__(*args)
self.SEPARATOR = "|=================================================================================================|"
self.TITLE_PROC = "| pid | task | binder_proc | requested | started | dead | todo |"
self.SEPARATOR_SUB1 = "|--------|------------------|------------------|---------------------|-------|--------------------|"
self.TITLE_THREAD = "| | | binder_thread | transaction_stack | pid | |"
self.SEPARATOR_SUB2 = "| | |------------------|---------------------|-------|--------------------|"
self.TITLE_NODE = "| | | binder_node |has_async_transaction| | async_todo |"
self.TITLE_REF = "| | | binder_ref | remote binder_node | | |"
self.PROC_OUTPUT_FMT = "| {0:^6} | {1:^16s} |0x{2:^16X}| {3:^2} | {4:^2} | {5:^1} | |"
self.THREAD_OUTPUT_FMT1 = "| | |0x{0:^16X}|" + " NULL " + "| {1:^6}| |"
self.THREAD_OUTPUT_FMT2 = "| | |0x{0:^16X}|" + " 0x{1:^16X} " + "| {2:^6}| |"
self.NODE_OUTPUT_FMT = "| | |0x{0:^16X}| {1:^1} | | |"
self.REF_OUTPUT_FMT = "| | |0x{0:^16X}| 0x{1:^16X} | desc {2:^8}| |"
self.TRANS_FMT = "| | | | | |"
self.TRANS_FMT1 = self.TRANS_FMT + " NULL |"
self.TRANS_FMT2 = self.TRANS_FMT + " 0x{0:^16X} |"
self.binder_node_offset = self.ramdump.field_offset('struct binder_node', 'rb_node')
self.binder_ref_offset = self.ramdump.field_offset('struct binder_ref', 'rb_node_desc')
self.work_entry_offset = self.ramdump.field_offset('struct binder_work', 'entry')
def transactions_walker(self, work):
if work == self.work_head:
return
transaction_work_offset = self.ramdump.field_offset('struct binder_transaction', 'work')
trans = work - transaction_work_offset
print (self.TRANS_FMT2.format(trans), file=self.outfd)
# Parse binder_thread node one by one.
def binder_threads_walker(self, node, extra):
thread_node_offset = self.ramdump.field_offset('struct binder_thread', 'rb_node')
thread = node - thread_node_offset
thread_pid_offset = self.ramdump.field_offset('struct binder_thread', 'pid')
thread_stack_offset = self.ramdump.field_offset('struct binder_thread', 'transaction_stack')
thread_todo_offset = self.ramdump.field_offset('struct binder_thread', 'todo')
thread_pid = self.ramdump.read_s32(thread + thread_pid_offset)
thread_stack = self.ramdump.read_word(thread + thread_stack_offset)
print (self.SEPARATOR_SUB2, file=self.outfd)
if thread_stack == 0:
print (self.THREAD_OUTPUT_FMT1.format(thread, thread_pid), file=self.outfd)
else:
print (self.THREAD_OUTPUT_FMT2.format(thread, thread_stack, thread_pid), file=self.outfd)
# Walk binder_thread.todo list.
todo_head = thread + thread_todo_offset
fist_node = self.ramdump.read_word(todo_head)
works_walker = linux_list.ListWalker(self.ramdump, fist_node, self.work_entry_offset)
if works_walker.is_empty():
print (self.TRANS_FMT1, file=self.outfd)
else:
self.work_head = todo_head - self.work_entry_offset
works_walker.walk(fist_node, self.transactions_walker)
# Parse binder_thread node one by one.
def binder_nodes_walker(self, node, extra):
bnode_offset = self.ramdump.field_offset('struct binder_node', 'rb_node')
bnode = node - bnode_offset
bnode_async_offset = self.ramdump.field_offset('struct binder_node', 'has_async_transaction')
bnode_todo_offset = self.ramdump.field_offset('struct binder_node', 'async_todo')
has_async = self.ramdump.read_u32(bnode + bnode_async_offset)
has_async = (has_async >> 4) & 1
print (self.SEPARATOR_SUB2, file=self.outfd)
print (self.NODE_OUTPUT_FMT.format(bnode, has_async), file=self.outfd)
# Walk binder_node.async_todo list.
todo_head = bnode + bnode_todo_offset
fist_node = self.ramdump.read_word(todo_head)
works_walker = linux_list.ListWalker(self.ramdump, fist_node, self.work_entry_offset)
if works_walker.is_empty():
print (self.TRANS_FMT1, file=self.outfd)
else:
self.work_head = todo_head - self.work_entry_offset
works_walker.walk(fist_node, self.transactions_walker)
# Parse binder_ref node one by one.
def binder_refs_walker(self, node, extra):
bref_offset = self.ramdump.field_offset('struct binder_ref', 'rb_node_desc')
bref = node - bref_offset
bref_node_offset = self.ramdump.field_offset('struct binder_ref', 'node')
bref_node = self.ramdump.read_word(bref + bref_node_offset)
data_offset = self.ramdump.field_offset('struct binder_ref', 'data')
desc_offset = self.ramdump.field_offset('struct binder_ref_data', 'desc')
desc_addr = bref + desc_offset + data_offset
desc = self.ramdump.read_u32(desc_addr)
print (self.SEPARATOR_SUB2, file=self.outfd)
print (self.REF_OUTPUT_FMT.format(bref, bref_node, desc), file=self.outfd)
def binder_alloc_allocated_buffers_walker(self, node, extra):
rb_node_offset = self.ramdump.field_offset('struct binder_buffer', 'rb_node')
binder_buffer = node - rb_node_offset
print("v.v %s (binder_buffer)0x%-32x" % ("%h", binder_buffer), file=self.outfd, end = ' ')
user_data = self.ramdump.read_structure_field(binder_buffer, 'struct binder_buffer', 'user_data')
free = self.ramdump.read_structure_field(binder_buffer, 'struct binder_buffer', 'free')
if free == None:
free = 2
else:
free = free & 0x1
if user_data == None:
user_data = 0xaa55
print(" user_data 0x%-32x free = %-16d" % (user_data, free), file=self.outfd)
return
def parse_binder_procs(self):
hlist_first_offset = self.ramdump.field_offset('struct hlist_head', 'first')
hlist_next_offset = self.ramdump.field_offset('struct hlist_node', 'next')
proc_node_offset = self.ramdump.field_offset('struct binder_proc', 'proc_node')
proc_pid_offset = self.ramdump.field_offset('struct binder_proc', 'pid')
proc_task_offset = self.ramdump.field_offset('struct binder_proc', 'tsk')
task_comm_offset = self.ramdump.field_offset('struct task_struct', 'comm')
requested_threads_offset = self.ramdump.field_offset('struct binder_proc', 'requested_threads')
started_threads_offset = self.ramdump.field_offset('struct binder_proc', 'requested_threads_started')
is_dead_offset = self.ramdump.field_offset('struct binder_proc', 'is_dead')
proc_todo_offset = self.ramdump.field_offset('struct binder_proc', 'todo')
alloc_offset = self.ramdump.field_offset('struct binder_proc', 'alloc')
binder_procs_addr = self.ramdump.address_of('binder_procs')
first_proc_node = self.ramdump.read_word(binder_procs_addr + hlist_first_offset)
proc_node = first_proc_node
while proc_node != 0:
proc = proc_node - proc_node_offset
# Get binder_proc fields.
proc_pid = self.ramdump.read_s32(proc + proc_pid_offset)
task = self.ramdump.read_word(proc + proc_task_offset)
task_name = cleanupString(self.ramdump.read_cstring(task + task_comm_offset, 16))
requested = self.ramdump.read_s32(proc + requested_threads_offset)
requested_started = self.ramdump.read_s32(proc + started_threads_offset)
is_dead = self.ramdump.read_s32(proc + is_dead_offset)
alloc = proc + alloc_offset
print("v.v %s (binder_proc)0x%x" % ("%h", proc), file=self.outfd)
print("v.v %s (binder_alloc)0x%x" %("%h", alloc), file=self.outfd)
print(" allocated_buffers", file=self.outfd)
allocated_buffers_offset = self.ramdump.field_offset('struct binder_alloc', 'allocated_buffers')
allocated_buffers_node = allocated_buffers_offset + alloc
allocated_buffers_node = self.ramdump.read_pointer(allocated_buffers_node)
rb_walker = rb_tree.RbTreeWalker(self.ramdump)
rb_walker.walk(allocated_buffers_node, self.binder_alloc_allocated_buffers_walker)
print(" free_buffers", file=self.outfd)
allocated_buffers_offset = self.ramdump.field_offset('struct binder_alloc', 'free_buffers')
allocated_buffers_node = allocated_buffers_offset + alloc
allocated_buffers_node = self.ramdump.read_pointer(allocated_buffers_node)
rb_walker = rb_tree.RbTreeWalker(self.ramdump)
rb_walker.walk(allocated_buffers_node, self.binder_alloc_allocated_buffers_walker)
print (self.SEPARATOR, file=self.outfd)
print (self.TITLE_PROC, file=self.outfd)
print (self.SEPARATOR_SUB1, file=self.outfd)
print (self.PROC_OUTPUT_FMT.format(proc_pid, task_name, proc, requested, requested_started, is_dead), file=self.outfd)
# Walk binder_proc.todo list.
todo_head = proc + proc_todo_offset
fist_node = self.ramdump.read_word(todo_head)
works_walker = linux_list.ListWalker(self.ramdump, fist_node, self.work_entry_offset)
if works_walker.is_empty():
print (self.TRANS_FMT1, file=self.outfd)
else:
self.work_head = todo_head - self.work_entry_offset
works_walker.walk(fist_node, self.transactions_walker)
print (self.SEPARATOR_SUB2, file=self.outfd)
print (self.TITLE_THREAD, file=self.outfd)
# Walk binder_proc.threads rb_tree.
proc_threads_offset = self.ramdump.field_offset('struct binder_proc', 'threads')
proc_threads_node = self.ramdump.read_word(proc + proc_threads_offset)
rb_walker = rb_tree.RbTreeWalker(self.ramdump)
rb_walker.walk(proc_threads_node, self.binder_threads_walker)
print (self.SEPARATOR_SUB2, file=self.outfd)
print (self.TITLE_NODE, file=self.outfd)
# Walk binder_proc.nodes rb_tree.
proc_nodes_offset = self.ramdump.field_offset('struct binder_proc', 'nodes')
proc_nodes_node = self.ramdump.read_word(proc + proc_nodes_offset)
rb_walker = rb_tree.RbTreeWalker(self.ramdump)
rb_walker.walk(proc_nodes_node, self.binder_nodes_walker)
print (self.SEPARATOR_SUB2, file=self.outfd)
print (self.TITLE_REF, file=self.outfd)
# Walk binder_proc.refs_by_desc rb_tree.
proc_refs_offset = self.ramdump.field_offset('struct binder_proc', 'refs_by_desc')
proc_refs_node = self.ramdump.read_word(proc + proc_refs_offset)
rb_walker = rb_tree.RbTreeWalker(self.ramdump)
rb_walker.walk(proc_refs_node, self.binder_refs_walker)
# Get the next binder_node to parse.
proc_node = self.ramdump.read_word(proc_node + hlist_next_offset)
print (self.SEPARATOR, file=self.outfd)
def parse(self):
self.outfd = open(self.ramdump.outdir + "/binder_output.txt", "w")
self.parse_binder_procs()
self.outfd.close()
print_out_str("--- Wrote the output to binder_output.txt")

View File

@@ -0,0 +1,124 @@
# Copyright (c) 2012-2015, 2020 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import struct
from parser_util import register_parser, RamParser
from print_out import print_out_str
# assuming cache way size of 8, fix this for badger probably
cache_way = 8
def save_l1_dump(ram_dump, cache_base, size):
with ram_dump.open_file('l1_cache_dump.bin', 'wb') as cache_file:
for i in range(0, size):
val = ram_dump.read_byte(cache_base + i, False)
cache_file.write(struct.pack('<B', val))
print_out_str('--- Wrote cache dump to l1_cache_dump.bin')
def parse_cache_dump(ram_dump, cache_base):
magic_num_offset = ram_dump.field_offset(
'struct l2_cache_dump', 'magic_number')
version_offset = ram_dump.field_offset('struct l2_cache_dump', 'version')
line_size_offset = ram_dump.field_offset(
'struct l2_cache_dump', 'line_size')
total_lines_offset = ram_dump.field_offset(
'struct l2_cache_dump', 'total_lines')
cache_offset_struct = ram_dump.field_offset(
'struct l2_cache_dump', 'cache')
l2dcrtr0_offset_struct = ram_dump.field_offset(
'struct l2_cache_line_dump', 'l2dcrtr0_val')
l2dcrtr1_offset_struct = ram_dump.field_offset(
'struct l2_cache_line_dump', 'l2dcrtr1_val')
cache_line_data_offset_struct = ram_dump.field_offset(
'struct l2_cache_line_dump', 'cache_line_data')
cache_line_struct_size = ram_dump.sizeof('struct l2_cache_line_dump')
magic = ram_dump.read_word(cache_base + magic_num_offset, False)
version = ram_dump.read_word(cache_base + version_offset, False)
line_size = ram_dump.read_word(cache_base + line_size_offset, False)
total_lines = ram_dump.read_word(cache_base + total_lines_offset, False)
cache = ram_dump.read_word(cache_base + cache_offset_struct, False)
cache_file = ram_dump.open_file('l2_cache_dump.txt')
cache_file.write('Magic = {0:x}\n'.format(magic))
cache_file.write('version = {0:x}\n'.format(version))
cache_file.write('line size = {0:x}\n'.format(line_size))
select = 0
lines = total_lines // cache_way
header_str = '({0:4},{1:1}) {2:5} {3:8} '.format(
'Set', 'Way', 'valid', 'Address')
# currently assumes 32 bit word like everything else...
for i in range(0, 32):
header_str = header_str + '{0:8} '.format('Word{0}'.format(i))
header_str = header_str + '{0:8} {1:8}\n'.format('L2DCRTR0', 'L2DCRTR0')
cache_ptr = cache_base + cache_offset_struct
for i in range(0, lines):
cache_file.write(header_str)
for j in range(0, cache_way):
cache_line_ptr = cache_ptr + (i * cache_way + j) * line_size
l2dcrtr0_val = ram_dump.read_word(
cache_line_ptr + l2dcrtr0_offset_struct, False)
l2dcrtr1_val = ram_dump.read_word(
cache_line_ptr + l2dcrtr1_offset_struct, False)
# this is valid for krait, will probably need to be more generic
addr = l2dcrtr1_val & 0xFFFE0000
addr = addr | (select & 0x0001ff80)
valid = (l2dcrtr0_val >> 14) & 0x3
out_str = '({0:4},{1:1}) {2:5} {3:8x} '.format(i, j, valid, addr)
cache_line_data_ptr = cache_line_ptr + \
cache_line_data_offset_struct
for k in range(0, 32):
out_str = out_str + \
'{0:0=8x} '.format(
ram_dump.read_word(cache_line_data_ptr + 4 * k, False))
out_str = out_str + \
'{0:0=8x} {1:0=8x}\n'.format(l2dcrtr0_val, l2dcrtr1_val)
cache_file.write(out_str)
select = select + 0x10
cache_file.close()
print_out_str('--- Wrote cache dump to l2_cache_dump.txt')
@register_parser('--print-cache-dump', 'Print L2 cache dump', optional=True)
class CacheDump(RamParser):
def parse(self):
if not self.ramdump.is_config_defined('CONFIG_MSM_CACHE_DUMP'):
print_out_str(
'!!! Cache dumping was not enabled. No cache will be dumped')
return
cache_base_addr = self.ramdump.address_of('l2_dump')
cache_base = self.ramdump.read_word(cache_base_addr)
parse_cache_dump(self.ramdump, cache_base)

View File

@@ -0,0 +1,76 @@
# Copyright (c) 2018-2019, 2021 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import cleanupString
from parser_util import register_parser, RamParser
from print_out import print_out_str
from re import finditer
# This is hardcoded since coreboot console is always at below physical address
#addr1 = 0xfffde000
#len1 = 0x20000
CBMC_CURSOR_MASK = ((1 << 28) - 1)
CBMC_OVERFLOW = (1 << 31)
COREBOOT_BOOTBLOCK = b'coreboot-[^\n]* bootblock starting.*\\.\\.\\.\n'
@register_parser('--cbmem', 'Print the coreboot console log', shortopt='-z')
class CBMEM(RamParser):
def print_cbmem(self):
#get starting address of cbmem_console
#get the size of console
cbmem_console_addr = self.ramdump.read_u64('cbmem_console')
cbmem_console_size = self.ramdump.read_u32('cbmem_console_size')
if (cbmem_console_addr is None) or (cbmem_console_size is None):
print_out_str('cbmem_console stucture not found')
return
#read the valie of cbmem_console->cursor. getting offset and reading u32
cursor_offset = self.ramdump.field_offset('struct cbmem_cons', 'cursor')
cbmem_console_cusor = self.ramdump.read_u32(cbmem_console_addr+cursor_offset)
#convert the console address to Phy and read full untill size
addr = self.ramdump.virt_to_phys(cbmem_console_addr)
size = self.ramdump.sizeof('struct cbmem_console')
cursor = cbmem_console_cusor & CBMC_CURSOR_MASK
if (not(cbmem_console_cusor & CBMC_OVERFLOW) and cursor < cbmem_console_size):
size = cursor
else:
size = cbmem_console_size
if (cbmem_console_cusor & CBMC_OVERFLOW):
if (cursor >= size):
print_out_str("cbmem: ERROR: CBMEM console struct is illegal, "
"output may be corrupt or out of order!\n\n")
cursor = 0
cbmem = self.ramdump.read_physical(addr+cursor, size-cursor)
cbmemPart1 = self.ramdump.read_physical(addr, cursor)
cbmem = cbmem + cbmemPart1
else:
cbmem = self.ramdump.read_physical(addr, size)
cbmem_console_out = self.ramdump.open_file('cbmem_console.txt')
cbmem_console_out.write(cleanupString(cbmem.decode('ascii', 'ignore')) + '\n')
cbmem_console_out.close()
m = 0
for match in finditer(COREBOOT_BOOTBLOCK, cbmem):
m = match.start()
cbmem = cbmem[m:]
cbmem_out = self.ramdump.open_file('cbmem.txt')
cbmem_out.write(cleanupString(cbmem.decode('ascii', 'ignore')) + '\n')
cbmem_out.close()
print_out_str('Wrote Coreboot console log to cbmem.txt')
def parse(self):
self.print_cbmem()

View File

@@ -0,0 +1,379 @@
# Copyright (c) 2015,2017-2018 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import linux_list
from print_out import print_out_str
from parser_util import register_parser, RamParser
@register_parser('--clock-dump', 'Dump all the clocks in the system')
class ClockDumps(RamParser):
def __init__(self, *args):
super(ClockDumps, self).__init__(*args)
self.enabled_clocks = []
self.disabled_clocks = []
self.prepared_clocks = []
self.head = ''
def print_header(self, type, title):
if type == 'CLK_PROVIDERS':
self.output_file.write("--------------------------------------------\n")
self.output_file.write("{0} from of_clk_providers list\n".format(title))
self.output_file.write("--------------------------------------------\n")
str = " {0:40} {1:21} {2:25} {3:10} {4:45} {5:40}\n"
self.output_file.write(str.format('CLOCK NAME',
'COUNT/PREPARE_COUNT',
'RATE', 'CUR_LEVEL',
'CLOCK STRUCTURE', 'CLOCK_OPS'))
elif type == 'CLOCKS':
self.output_file.write("----------------------------------\n")
self.output_file.write("{0} from clocks list\n".format(title))
self.output_file.write("----------------------------------\n")
self.output_file.write(" {0:40} {1:25} {2:20} {3:21} {4:25} {5:20} {6:40}\n".format('CLOCK NAME', 'DEVID', 'CONID', 'COUNT/PREPARE_COUNT', 'RATE', 'CUR_LEVEL', 'CLOCK STRUCTURE'))
def printclocks(self, type):
if len(self.disabled_clocks):
self.print_header(type, "Disabled Clocks")
for clocks in self.disabled_clocks:
self.output_file.write('D ' + clocks)
if len(self.enabled_clocks):
self.output_file.write("\n")
self.print_header(type, "Enabled Clocks")
for clocks in self.enabled_clocks:
self.output_file.write('E ' + clocks)
if len(self.prepared_clocks):
self.output_file.write("\n")
self.print_header(type, "Prepared Clocks")
for clocks in self.prepared_clocks:
self.output_file.write('P ' + clocks)
def get_clocks(self):
clocks = self.ramdump.address_of('clocks')
if clocks is None:
self.output_file.write("NOTE: 'clocks' list not found to extract the clocks information")
return
head = self.ramdump.read_word(clocks, True)
self.head = clocks
node_offset = self.ramdump.field_offset('struct clk_lookup', 'node')
clocks_walker = linux_list.ListWalker(self.ramdump, head, node_offset)
clocks_walker.walk(head, self.clocks_walker)
def clocks_walker(self, node):
if node == self.head:
return
devid_address = node + self.ramdump.field_offset('struct clk_lookup', 'dev_id')
devid = self.ramdump.read_cstring(self.ramdump.read_word(devid_address, True), 48)
conid_address = node + self.ramdump.field_offset('struct clk_lookup', 'con_id')
conid = self.ramdump.read_cstring(self.ramdump.read_word(conid_address, True), 48)
clock_address = node + self.ramdump.field_offset('struct clk_lookup', 'clk')
clk = self.ramdump.read_word(clock_address, True)
dbg_name_address = clk + self.ramdump.field_offset('struct clk', 'dbg_name')
dbg_name = self.ramdump.read_cstring(self.ramdump.read_word(dbg_name_address, True), 48)
rate_address = clk + self.ramdump.field_offset('struct clk', 'rate')
rate = self.ramdump.read_word(rate_address, True)
count_address = clk + self.ramdump.field_offset('struct clk', 'count')
count = self.ramdump.read_u32(count_address, True)
prepare_count_address = clk + self.ramdump.field_offset('struct clk', 'prepare_count')
prepare_count = self.ramdump.read_u32(prepare_count_address, True)
vdd_class_address = clk + self.ramdump.field_offset('struct clk', 'vdd_class')
vdd_class = self.ramdump.read_word(vdd_class_address, True)
if vdd_class != 0:
cur_level_address = vdd_class + self.ramdump.field_offset('struct clk_vdd_class', 'cur_level')
cur_level = self.ramdump.read_word(cur_level_address, True)
else:
cur_level = "NULL"
output = "{0:40} {1:<25} {2:20} {3:<2}/ {4:<17} {5:<25} {6:<10} v.v (struct clk *)0x{7:<20x}\n".format(
dbg_name, devid, conid, count, prepare_count, rate, cur_level, clk)
if count > 0:
self.enabled_clocks.append(output)
elif prepare_count > 0:
self.prepared_clocks.append(output)
else:
self.disabled_clocks.append(output)
def get_clk_providers(self):
clocks = self.ramdump.address_of('of_clk_providers')
if clocks is None:
self.output_file.write("NOTE: 'of_clk_providers' list not found to extract the clocks information")
return
self.enabled_clocks = []
self.disabled_clocks = []
self.prepared_clocks = []
self.head = clocks
head = self.ramdump.read_word(clocks, True)
node_offset = self.ramdump.field_offset('struct clk_lookup', 'node')
clk_providers_walker = linux_list.ListWalker(self.ramdump, head, node_offset)
clk_providers_walker.walk(head, self.clk_providers_walker)
def print_clk_of_msm_provider_data(self, data):
table_address = data + self.ramdump.field_offset('struct of_msm_provider_data', 'table')
size_address = data + self.ramdump.field_offset('struct of_msm_provider_data', 'size')
table = self.ramdump.read_word(table_address, True)
size = self.ramdump.read_word(size_address, True)
counter = 0
while counter < size:
clock_address = table + self.ramdump.field_offset('struct clk_lookup', 'clk')
clk = self.ramdump.read_word(clock_address, True)
dbg_name_address = clk + self.ramdump.field_offset('struct clk', 'dbg_name')
dbg_name = self.ramdump.read_cstring(self.ramdump.read_word(dbg_name_address, True), 48)
rate_address = clk + self.ramdump.field_offset('struct clk', 'rate')
rate = self.ramdump.read_word(rate_address, True)
count_address = clk + self.ramdump.field_offset('struct clk', 'count')
count = self.ramdump.read_u32(count_address, True)
prepare_count_address = clk + self.ramdump.field_offset('struct clk', 'prepare_count')
prepare_count = self.ramdump.read_u32(prepare_count_address, True)
vdd_class_address = clk + self.ramdump.field_offset('struct clk', 'vdd_class')
vdd_class = self.ramdump.read_word(vdd_class_address, True)
if vdd_class != 0:
cur_level_address = vdd_class + self.ramdump.field_offset('struct clk_vdd_class', 'cur_level')
cur_level = self.ramdump.read_word(cur_level_address, True)
else:
cur_level = "NULL"
output = "{0:40} {1:<2}/ {2:<17} {3:<25} {4:<10} v.v (struct clk *)0x{5:<20x}\n".format(dbg_name, count, prepare_count, rate, cur_level, clk)
if count > 0:
self.enabled_clocks.append(output)
elif prepare_count > 0:
self.prepared_clocks.append(output)
else:
self.disabled_clocks.append(output)
counter = counter + 1
table = table + self.ramdump.sizeof('struct clk_lookup')
def dump_clock(self,clk_core,clk_name):
offset_vdd_cur_level = self.ramdump.field_offset(
'struct clk_vdd_class', 'cur_level')
clk_prepare_count = self.ramdump.read_structure_field(
clk_core, 'struct clk_core', 'prepare_count')
clk_enable_count = self.ramdump.read_structure_field(
clk_core, 'struct clk_core', 'enable_count')
clk_rate = self.ramdump.read_structure_field(
clk_core, 'struct clk_core', 'rate')
vdd_class = self.ramdump.read_structure_field(
clk_core,'struct clk_core','vdd_class')
clk_ops = self.ramdump.read_structure_field(
clk_core,'struct clk_core','ops')
clk_ops = self.ramdump.unwind_lookup(clk_ops)
if clk_ops is None:
clk_ops = ["dynamic module"]
cur_level = 0
if vdd_class != 0 and vdd_class is not None:
cur_level_address = (vdd_class + offset_vdd_cur_level)
cur_level = self.ramdump.read_word(cur_level_address, True)
formatStr = "{0:40} {1:<2}/ {2:<17} {3:<25} {4:<10} " \
"v.v (struct clk_core *)0x{5:<20x} {6:<40}\n"
output = formatStr.format(
clk_name,
clk_enable_count,
clk_prepare_count,
clk_rate, cur_level,
clk_core,clk_ops[0])
if clk_enable_count > 0:
self.enabled_clocks.append(output)
elif clk_prepare_count > 0:
self.prepared_clocks.append(output)
else:
self.disabled_clocks.append(output)
def print_clk_onecell_data(self, data):
offset_clk_onecell_data_clks = (
self.ramdump.field_offset('struct clk_onecell_data', 'clks'))
offset_clk_onecell_data_clknum = (
self.ramdump.field_offset(
'struct clk_onecell_data', 'clk_num'))
clks = self.ramdump.read_word(data + offset_clk_onecell_data_clks)
if (clks == 0 or clks == None):
return
size = self.ramdump.read_int(data + offset_clk_onecell_data_clknum)
sizeof_clk_pointer = self.ramdump.sizeof('struct clk *')
offset_vdd_cur_level = self.ramdump.field_offset(
'struct clk_vdd_class', 'cur_level')
counter = 0
if size > 10000:
return
while counter < size:
clk = self.ramdump.read_word(clks + (sizeof_clk_pointer * counter))
if clk == 0 or clk is None:
counter = counter + 1
continue
clk_core = self.ramdump.read_structure_field(
clk, 'struct clk', 'core')
if clk_core == 0 or clk_core is None:
counter = counter + 1
continue
clk_name_addr = self.ramdump.read_structure_field(
clk_core, 'struct clk_core', 'name')
clk_name = self.ramdump.read_cstring(clk_name_addr, 48)
if (clk_name == 0 or clk_name == None):
break
self.dump_clock(clk_core,clk_name)
counter = counter + 1
# qcom_cc_clk_hw_get clk is added in kernel 4.9
def print_clk_qcom_cc_data(self, data):
size = self.ramdump.read_structure_field(
data,'struct qcom_cc','num_rclks')
clks = self.ramdump.read_structure_field(
data,'struct qcom_cc','rclks')
sizeof_clk_regmap = self.ramdump.sizeof('struct clk_regmap *')
offset_vdd_cur_level = self.ramdump.field_offset(
'struct clk_vdd_class', 'cur_level')
counter = 0
while counter < size:
clk = self.ramdump.read_word(clks +
(sizeof_clk_regmap * counter))
clk_core = self.ramdump.read_structure_field(
clk,'struct clk_regmap','hw.core')
if clk_core == 0 or clk_core is None:
counter = counter + 1
continue
clk_name_addr = self.ramdump.read_structure_field(
clk_core, 'struct clk_core', 'name')
clk_name = self.ramdump.read_cstring(clk_name_addr, 48)
if (clk_name == 0 or clk_name == None):
break
self.dump_clock(clk_core,clk_name)
counter = counter + 1
# spmi_pmic_div_clk_hw_get clk is added kernel 4.14
def print_clk_spmi_pmic_data(self, data):
size = self.ramdump.read_structure_field(
data,'struct spmi_pmic_div_clk_cc','nclks')
clks = self.ramdump.field_offset(
'struct spmi_pmic_div_clk_cc','clks')
clks = data + clks
sizeof_clk_regmap = self.ramdump.sizeof('struct clkdiv')
offset_vdd_cur_level = self.ramdump.field_offset(
'struct clk_vdd_class', 'cur_level')
counter = 0
while counter < size:
clk = clks + (sizeof_clk_regmap * counter)
clk_core = self.ramdump.read_structure_field(
clk,'struct clkdiv','hw.core')
if clk_core == 0 or clk_core is None:
counter = counter + 1
continue
clk_name_addr = self.ramdump.read_structure_field(
clk_core, 'struct clk_core', 'name')
clk_name = self.ramdump.read_cstring(clk_name_addr, 48)
if (clk_name == 0 or clk_name == None):
break
self.dump_clock(clk_core,clk_name)
counter = counter + 1
# of_clk_src_simple_get was added for quite a long time, but
# hasn't been used until clk-dummy.ko is enabled by
# CONFIG_COMMON_CLK_QCOM on Hypervisor based platforms
def print_clk_simple(self, data):
clk = data
clk_core = self.ramdump.read_structure_field(
clk, 'struct clk', 'core')
clk_name_addr = self.ramdump.read_structure_field(
clk_core, 'struct clk_core', 'name')
clk_name = self.ramdump.read_cstring(clk_name_addr, 48)
if clk_name == 0 or clk_name is None:
return
self.dump_clock(clk_core, clk_name)
# of_clk_hw_virtio_get clk was added since kernel 5.4 but
# only on Hypervisor based platforms
def print_clk_virtio(self, data):
size = self.ramdump.read_structure_field(
data, 'struct virtio_clk', 'num_clks')
clks = self.ramdump.read_structure_field(
data, 'struct virtio_clk', 'clks')
sizeof_clk_virtio = self.ramdump.sizeof('struct clk_virtio')
for counter in range(size):
clk = clks + (sizeof_clk_virtio * counter)
clk_core = self.ramdump.read_structure_field(
clk, 'struct clk_virtio', 'hw.core')
if clk_core == 0 or clk_core is None:
continue
clk_name_addr = self.ramdump.read_structure_field(
clk_core, 'struct clk_core', 'name')
clk_name = self.ramdump.read_cstring(clk_name_addr, 48)
if clk_name == 0 or clk_name is None:
break
self.dump_clock(clk_core, clk_name)
def clk_providers_walker(self, node):
if node == self.head:
return
data_address = node + self.ramdump.field_offset(
'struct of_clk_provider', 'data')
data = self.ramdump.read_word(data_address, True)
getfunc = self.ramdump.read_structure_field(
node,'struct of_clk_provider','get')
if getfunc == 0:
getfunchw = self.ramdump.read_structure_field(
node,'struct of_clk_provider','get_hw')
getfunchw = self.ramdump.unwind_lookup(getfunchw)
if "spmi_pmic_div_clk_hw_get" in getfunchw[0]:
self.print_clk_spmi_pmic_data(data)
return
elif "qcom_cc_clk_hw_get" in getfunchw[0]:
self.print_clk_qcom_cc_data(data)
return
elif "of_clk_hw_virtio_get" in getfunchw[0]:
self.print_clk_virtio(data)
return
else:
return
getfunc = self.ramdump.unwind_lookup(getfunc)
if "of_clk_src_simple_get" in getfunc[0]:
self.print_clk_simple(data)
elif self.ramdump.is_config_defined('CONFIG_COMMON_CLK_MSM'):
self.print_clk_of_msm_provider_data(data)
else:
self.print_clk_onecell_data(data)
def print_a7_cpu_frequency(self):
cpu_clks_hws = self. ramdump.read_u32('cpu_clks_hws')
if cpu_clks_hws is not None:
clk_offset = self.ramdump.read_u32(cpu_clks_hws + 0x4)
core_offset = self.ramdump.read_u32(clk_offset)
clk_rate_offset = self.ramdump.field_offset('struct clk_core',
'rate')
dbg_name_address = core_offset + self.ramdump.field_offset('struct clk_core', 'name')
dbg_name = self.ramdump.read_cstring(self.ramdump.read_word(dbg_name_address, True), 48)
cpu_frequency = self.ramdump.read_u32(core_offset + clk_rate_offset)
self.output_file.write("{0} = {1} \n".format(dbg_name, cpu_frequency))
def parse(self):
self.output_file = self.ramdump.open_file('ClockDumps.txt')
if (self.ramdump.kernel_version < (4, 9, 0)):
self.get_clocks()
self.printclocks('CLOCKS')
self.get_clk_providers()
self.printclocks('CLK_PROVIDERS')
self.print_a7_cpu_frequency()
self.output_file.close()
print_out_str("--- Wrote the output to ClockDumps.txt")

View File

@@ -0,0 +1,95 @@
# Copyright (c) 2021 The Linux Foundation. All rights reserved.
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import re
import os
import struct
import string
import glob
from parser_util import register_parser, RamParser
@register_parser('--print-svm-vcpu-ctx', 'Print svm vcpu context')
class svm_vcpu_context_parse(RamParser):
def svm_vcpu_context_parse(self,fop):
opfile = fop
for core in range(0,2):
input_file = "corevcpu{0}_vm_*.cmm".format(core)
file_path_list = glob.glob(os.path.join(self.ramdump.outdir + "\..", input_file))
for each_file in file_path_list:
input_file_cmm = each_file
if "svm" in self.ramdump.hw_id and "vm_45" not in each_file:
continue
if "oemvm" in self.ramdump.hw_id and "vm_49" not in each_file:
continue
if os.path.exists(input_file_cmm):
fd = open(input_file_cmm, "r")
else:
print("not exisit")
continue
fp = 0
sp = 0
lr = 0
pc = 0
pc_flag = False
lr_flag = False
sp_flag = False
fp_flag = False
for line in fd:
columns = line.split()
if "r.s pc" in line and pc_flag is False:
pc_flag = True
pc = int(columns[-1], 16)
if "r.s x30" in line and lr_flag is False:
lr_flag = True
lr = int(columns[-1], 16)
if "r.s x29" in line and fp_flag is False:
fp_flag = True
fp = int(columns[-1], 16)
if "r.s sp_el1" in line and sp_flag is False:
sp_flag = True
sp = int(columns[-1], 16)
opfile.write("Core {0} context\n".format(core))
a = self.ramdump.unwind_lookup(pc)
if a is not None:
symname, offset = a
else:
symname = 'UNKNOWN'
offset = 0
opfile.write(
'Core {3} PC: {0}+{1:x} <0x{2:x}>'.format(symname, offset,
pc, core))
opfile.write("\n")
a = self.ramdump.unwind_lookup(lr)
if a is not None:
symname, offset = a
else:
symname = 'UNKNOWN'
offset = 0
opfile.write(
'Core {3} LR: {0}+{1:x} <0x{2:x}>'.format(symname, offset,
lr, core))
opfile.write("\n")
opfile.write('')
self.ramdump.unwind.unwind_backtrace(sp, fp, pc, lr, '',opfile)
opfile.write('')
opfile.write("\n")
opfile.close()
def parse(self):
with self.ramdump.open_file('vm_vcpu_context.txt') as fop:
self.svm_vcpu_context_parse(fop)
return

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,630 @@
# Copyright (c) 2015-2017, 2020 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import linux_list
from print_out import print_out_str
from parser_util import register_parser, RamParser
from collections import defaultdict
CPRH_CTRL_TYPE = 2
@register_parser('--cpr3-info', 'Print CPR3 information')
class CPR3Info(RamParser):
def __init__(self, *args):
super(CPR3Info, self).__init__(*args)
self.head = ''
self.cprinfo_fields = ['speed_bin', 'cpr_fuse_revision',
'cpr_fuse_map_match', 'num_fuse_corners',
'num_corners', 'corner']
self.voltages = ['ceiling_volt', 'open_loop_volt',
'last_volt', 'floor_volt']
self.corner_info = ['cpr_fuse_target_quot',
'quot_adjust', 'corner_map']
self.value_list = defaultdict(list)
self.attr_list = defaultdict(list)
self.output = []
self.consumer_head = ''
def get_cpr(self):
# Return if the cpr3_regulator_list is not available
cpr = self.ramdump.address_of('cpr3_controller_list')
if cpr is None:
self.output_file.write(
"NOTE: 'cpr3_regulator_list' list " +
"not found to extract cpr information")
return
head = self.ramdump.read_word(cpr)
self.head = cpr
node_offset = self.ramdump.field_offset('struct cpr3_controller',
'list')
c_w = linux_list.ListWalker(self.ramdump, head, node_offset)
c_w.walk(head, self.cpr_walker)
def get_kryo(self):
kryo_addr = self.ramdump.address_of('kryo_regulator_list')
if kryo_addr is None:
print_out_str(
"NOTE: 'kryo_regulator_list' list " +
"not found to extract kryo_addr information")
return
head = self.ramdump.read_word(kryo_addr)
self.head = kryo_addr
node_offset = self.ramdump.field_offset('struct kryo_regulator',
'link')
k_w = linux_list.ListWalker(self.ramdump, head, node_offset)
tmp = "=" * 80 + "\n"
tmp += "Kryo Regulator (LDO/BHS management)\n"
tmp += "=" * 80 + "\n"
self.output.append(tmp)
k_w.walk(head, self.kryo_walker)
def dump_cpr3_regulator_voltages(self, vreg_addr):
corner_count = self.ramdump.read_int(
vreg_addr +
self.ramdump.field_offset(
'struct cpr3_regulator',
'corner_count'))
tmp = 'CPR voltages(uV) and Target Quotients\n'
tmp += "%-7s%-3s%-10s%-10s%-10s%-21s%-40s\n" % (
"Corner", "-", "Floor", "Open-Loop", "Ceiling",
"Cached closed-loop",
"Target Quotients")
self.output.append(tmp)
base_addr = self.ramdump.read_word(
vreg_addr +
self.ramdump.field_offset('struct cpr3_regulator', 'corner'))
size = self.ramdump.sizeof('struct cpr3_corner')
for i in range(corner_count):
corner_addr = base_addr + size * i
try:
self.dump_cpr3_corner_info(corner_addr, i + 1, 1, 1)
except:
self.output.append(
"Note: Failed to dump cpr3 regulator voltage\n")
return
def dump_cpr3_corner_info(self, aggr_corner_addr, corner_num,
print_quots, list_format):
if aggr_corner_addr is None:
return
ceiling = self.ramdump.read_int(
aggr_corner_addr +
self.ramdump.field_offset(
'struct cpr3_corner', 'ceiling_volt'))
open_loop = self.ramdump.read_int(
aggr_corner_addr +
self.ramdump.field_offset(
'struct cpr3_corner', 'open_loop_volt'))
last = self.ramdump.read_int(
aggr_corner_addr +
self.ramdump.field_offset(
'struct cpr3_corner', 'last_volt'))
floor = self.ramdump.read_int(
aggr_corner_addr +
self.ramdump.field_offset(
'struct cpr3_corner', 'floor_volt'))
quots = ""
if print_quots == 1:
t0 = aggr_corner_addr + self.ramdump.field_offset(
'struct cpr3_corner', 'target_quot')
size = self.ramdump.sizeof('u32')
num = self.ramdump.sizeof('((struct cpr3_corner*)0)->target_quot') \
// size
for i in range(num):
quot = self.ramdump.read_u32(t0 + i * size)
quots = quots + " " + str(quot)
if list_format == 0:
tmp = '%-30s = %d uV\n' % ("Ceiling volt", ceiling)
tmp += '%-30s = %d uV\n' % ("Open-loop", open_loop)
tmp += '%-30s = %d uV\n' % ("Cached closed-loop", last)
tmp += '%-30s = %d uV\n' % ("Floor", floor)
self.output.append(tmp)
if print_quots == 1:
tmp = "\n%-30s = %s\n" % ("Target quotients", quots)
self.output.append(tmp)
self.output.append("\n")
else:
tmp = "%-7d%-3s%-10d%-10d%-10d%-20d" % (
corner_num, "-", floor, open_loop, ceiling, last)
tmp += quots
tmp += "\n"
self.output.append(tmp)
def dump_vdd_regulator(self, ctrl_addr):
tmp = ""
vdd_reg_addr = self.ramdump.read_word(
ctrl_addr +
self.ramdump.field_offset(
'struct cpr3_controller', 'vdd_regulator'))
if vdd_reg_addr is None:
return
rdev_addr = self.ramdump.read_word(
vdd_reg_addr +
self.ramdump.field_offset(
'struct regulator',
'rdev'))
reg_data_addr = self.ramdump.read_word(
rdev_addr +
self.ramdump.field_offset('struct regulator_dev', 'reg_data'))
desc_addr = self.ramdump.read_word(
rdev_addr +
self.ramdump.field_offset('struct regulator_dev', 'desc'))
desc_ops_addr = self.ramdump.read_word(
desc_addr +
self.ramdump.field_offset('struct regulator_desc', 'ops'))
name_addr = self.ramdump.read_word(
desc_addr +
self.ramdump.field_offset('struct regulator_desc', 'name'))
name = self.ramdump.read_cstring(name_addr, 48)
tmp += '\n%-30s = %s\n' % ("PMIC supply", name)
duple = self.ramdump.unwind_lookup(desc_ops_addr)
function_name = duple[0]
if "qpnp" in function_name:
# QPNP-regulator
set_points_addr = self.ramdump.read_word(
reg_data_addr + self.ramdump.field_offset(
'struct qpnp_regulator',
'set_points'))
range_addr = self.ramdump.read_word(
set_points_addr +
self.ramdump.field_offset(
'struct qpnp_voltage_set_points', 'range'))
step_uV = self.ramdump.read_int(
range_addr +
self.ramdump.field_offset(
'struct qpnp_voltage_range',
'step_uV'))
min_uV = self.ramdump.read_int(
range_addr +
self.ramdump.field_offset(
'struct qpnp_voltage_range', 'min_uV'))
volt_sel = self.ramdump.read_byte(
reg_data_addr + 1 +
self.ramdump.field_offset(
'struct qpnp_regulator', 'ctrl_reg'))
volt = (volt_sel * step_uV) + min_uV
tmp += "%-30s = %d uV\n" % ("PMIC voltage", volt)
if "spm" in function_name:
last_set_volt = self.ramdump.read_int(
reg_data_addr + self.ramdump.field_offset(
'struct spm_vreg', 'last_set_uV'))
tmp += "%-30s = %d uV\n" % ("PMIC last set voltage",
last_set_volt)
self.output.append(tmp)
def get_apm_threshold(self, addr_ctrl):
apm_addr = self.ramdump.read_word(
addr_ctrl + self.ramdump.field_offset('struct cpr3_controller',
'apm'))
if apm_addr is None:
return
apm_thresh_volt = self.ramdump.read_int(
addr_ctrl + self.ramdump.field_offset(
'struct cpr3_controller', 'apm_threshold_volt'))
if apm_thresh_volt == 0:
return
tmp = '%-30s = %d uV\n' % ("APM threshold", apm_thresh_volt)
if apm_addr == 0:
self.output.append(tmp)
return
apm_supply = self.ramdump.read_int(
apm_addr + self.ramdump.field_offset('struct msm_apm_ctrl_dev',
'supply'))
if apm_supply is None:
print_out_str("could not read APM supply")
elif apm_supply == 0:
tmp += '%-30s = %s\n' % ("APM supply", "APCC")
elif apm_supply == 1:
tmp += '%-30s = %s\n' % ("APM supply", "MX")
self.output.append(tmp)
def get_aging_info(self, ctrl_addr):
test = self.ramdump.field_offset(
'struct cpr3_controller', 'aging_required')
if test is None:
return
aging_required = self.ramdump.read_bool(
ctrl_addr +
self.ramdump.field_offset(
'struct cpr3_controller', 'aging_required'))
aging_succeeded = self.ramdump.read_bool(
ctrl_addr +
self.ramdump.field_offset(
'struct cpr3_controller', 'aging_succeeded'))
aging_failed = self.ramdump.read_bool(
ctrl_addr +
self.ramdump.field_offset(
'struct cpr3_controller', 'aging_failed'))
tmp = ""
if (aging_required or aging_succeeded or aging_failed):
if aging_succeeded:
aging_ref_adjust_volt = self.ramdump.read_int(
ctrl_addr +
self.ramdump.field_offset(
'struct cpr3_controller', 'aging_ref_adjust_volt'))
tmp += '%-30s = %s\n' % ("Aging measurement", "succeeded")
tmp += '%-30s = %d uV\n' % ("Aging adjustment voltage",
aging_ref_adjust_volt)
elif aging_failed:
tmp += '%-30s = %s\n' % ("Aging measurement",
"failed")
else:
tmp += '%-30s = %s\n' % ("Aging measurement",
"not yet executed")
self.output.append(tmp)
def dump_cpr3_regulator_state(self, vreg_addr, ctrl_type):
tmp = ""
if vreg_addr is None:
return
name_addr = self.ramdump.read_word(
vreg_addr + self.ramdump.field_offset(
'struct cpr3_regulator', 'name'))
name = self.ramdump.read_cstring(name_addr, 48)
tmp += "-" * 80 + "\n"
tmp += "Regulator: %s\n" % name
tmp += "-" * 80 + "\n"
vreg_enabled = self.ramdump.read_bool(
vreg_addr + self.ramdump.field_offset(
'struct cpr3_regulator',
'vreg_enabled'))
current_corner = self.ramdump.read_int(
vreg_addr + self.ramdump.field_offset(
'struct cpr3_regulator', 'current_corner'))
corner_count = self.ramdump.read_int(
vreg_addr + self.ramdump.field_offset(
'struct cpr3_regulator',
'corner_count'))
ldo_regulator_addr = self.ramdump.read_word(
vreg_addr + self.ramdump.field_offset(
'struct cpr3_regulator', 'ldo_regulator'))
ldo_mode_allowed = self.ramdump.read_bool(
vreg_addr + self.ramdump.field_offset(
'struct cpr3_regulator', 'ldo_mode_allowed'))
cpr_rev_fuse = self.ramdump.read_int(
vreg_addr + self.ramdump.field_offset(
'struct cpr3_regulator', 'cpr_rev_fuse'))
if ldo_regulator_addr != 0:
ldo_mode_bool = self.ramdump.read_bool(
vreg_addr + self.ramdump.field_offset(
'struct cpr3_regulator',
'ldo_regulator_bypass'))
if ldo_mode_bool:
ldo_mode = "LDO"
else:
ldo_mode = "BHS"
tmp += "%-30s = %d\n" % ("CPR fuse revision", cpr_rev_fuse)
fuse_combo = self.ramdump.read_int(
vreg_addr +
self.ramdump.field_offset(
'struct cpr3_regulator', 'fuse_combo'))
tmp += "%-30s = %d\n" % ("CPR fuse combo", fuse_combo)
speed_bin_fuse = self.ramdump.read_int(
vreg_addr +
self.ramdump.field_offset(
'struct cpr3_regulator',
'speed_bin_fuse'))
tmp += "%-30s = %d\n" % ("Speed-bin fuse", speed_bin_fuse)
if ctrl_type != CPRH_CTRL_TYPE:
tmp += "\n%-30s = %d/%d\n" % ("CPR corner", current_corner + 1,
corner_count)
if vreg_enabled is True:
vreg_enabled = 1
else:
vreg_enabled = 0
tmp += "%-30s = %d\n" % ("Enabled", vreg_enabled)
if ldo_regulator_addr != 0:
if ldo_mode_allowed is True:
ldo_mode_allowed = 1
else:
ldo_mode_allowed = 0
tmp += "\n%-30s = %d\n" % ("LDO mode allowed", ldo_mode_allowed)
tmp += "%-30s = %s\n" % ("LDO/BHS mode", ldo_mode)
tmp += "\nCurrent CPR voltages:\n"
self.output.append(tmp)
tmp = ""
corner_addr = self.ramdump.read_word(
vreg_addr + self.ramdump.field_offset(
'struct cpr3_regulator', 'corner'))
size = self.ramdump.sizeof("struct cpr3_corner")
if ctrl_type != CPRH_CTRL_TYPE:
corner_addr = corner_addr + current_corner * size
self.dump_cpr3_corner_info(corner_addr, 0, 1, 0)
self.dump_cpr3_regulator_voltages(vreg_addr)
rdev_addr = self.ramdump.read_word(
vreg_addr + self.ramdump.field_offset('struct cpr3_regulator',
'rdev'))
offset = self.ramdump.field_offset('struct regulator_dev',
'consumer_list')
if ctrl_type != CPRH_CTRL_TYPE:
self.dump_consumer(rdev_addr + offset)
def dump_cpr3_thread_state(self, thread_addr, ctrl_type):
tmp = ""
thread_id = self.ramdump.read_u32(
thread_addr + self.ramdump.field_offset(
'struct cpr3_thread', 'thread_id'))
aggr_corner_addr = thread_addr + self.ramdump.field_offset(
'struct cpr3_thread', 'aggr_corner')
tmp += "-" * 80 + "\n"
tmp += "Thread: %d\n" % thread_id
tmp += "-" * 80 + "\n"
if ctrl_type != CPRH_CTRL_TYPE:
tmp += "CPR aggregated voltages:\n"
self.output.append(tmp)
if ctrl_type != CPRH_CTRL_TYPE:
self.dump_cpr3_corner_info(aggr_corner_addr, 0, 1, 0)
vreg_addr = self.ramdump.read_word(
thread_addr +
self.ramdump.field_offset('struct cpr3_thread', 'vreg'))
vreg_count = self.ramdump.read_int(
thread_addr +
self.ramdump.field_offset(
'struct cpr3_thread', 'vreg_count'))
size_reg = self.ramdump.sizeof('struct cpr3_regulator')
for i in range(vreg_count):
self.dump_cpr3_regulator_state(vreg_addr + i * size_reg, ctrl_type)
def cpr_walker(self, ctrl_addr):
if ctrl_addr == self.head:
return
cpr_controller_name_addr = self.ramdump.read_word(
ctrl_addr + self.ramdump.field_offset('struct cpr3_controller',
'name'))
cpr_controller_name = self.ramdump.read_cstring(
cpr_controller_name_addr, 48)
supports_hw_closed_loop = self.ramdump.read_bool(
ctrl_addr + self.ramdump.field_offset(
'struct cpr3_controller',
'supports_hw_closed_loop'))
use_hw_closed_loop = self.ramdump.read_bool(
ctrl_addr + self.ramdump.field_offset(
'struct cpr3_controller',
'use_hw_closed_loop'))
cpr_allowed_sw = self.ramdump.read_bool(
ctrl_addr + self.ramdump.field_offset(
'struct cpr3_controller',
'cpr_allowed_sw'))
cpr_allowed_hw = self.ramdump.read_bool(
ctrl_addr + self.ramdump.field_offset(
'struct cpr3_controller',
'cpr_allowed_hw'))
cpr_enabled = self.ramdump.read_bool(
ctrl_addr + self.ramdump.field_offset(
'struct cpr3_controller', 'cpr_enabled'))
ctrl_type = self.ramdump.read_int(
ctrl_addr + self.ramdump.field_offset(
'struct cpr3_controller',
'ctrl_type'))
if cpr_allowed_sw == 0 or cpr_allowed_hw == 0:
cpr_mode = "open-loop"
elif supports_hw_closed_loop == 1 and ctrl_type != CPRH_CTRL_TYPE:
if use_hw_closed_loop == 0:
cpr_mode = "SW closed-loop"
else:
cpr_mode = "HW closed-loop"
elif supports_hw_closed_loop == 1 and ctrl_type == CPRH_CTRL_TYPE:
if use_hw_closed_loop == 0:
cpr_mode = "open-loop"
else:
cpr_mode = "full HW closed-loop"
else:
cpr_mode = "closed-loop"
thread_addr = self.ramdump.read_word(
ctrl_addr +
self.ramdump.field_offset(
'struct cpr3_controller', 'thread'))
if thread_addr is None:
return
thread_ctrl_addr = self.ramdump.read_word(
thread_addr +
self.ramdump.field_offset('struct cpr3_thread', 'ctrl'))
if cpr_controller_name is None or thread_ctrl_addr != ctrl_addr:
return
tmp = ""
tmp += "=" * 80 + "\n"
tmp += 'CPR3 controller state: %s\n' % cpr_controller_name
tmp += "=" * 80 + "\n"
tmp += '%-30s = %s\n' % ("CPR mode", cpr_mode)
tmp += '%-30s = %d\n' % ("CPR loop currently operating",
cpr_enabled)
self.output.append(tmp)
tmp = ""
self.get_apm_threshold(ctrl_addr)
self.get_aging_info(ctrl_addr)
if ctrl_type != CPRH_CTRL_TYPE:
self.dump_vdd_regulator(ctrl_addr)
if cpr_allowed_sw == 1 and use_hw_closed_loop == 1 and ctrl_type != CPRH_CTRL_TYPE:
tmp = "* The actual voltage at the PMIC may be anywhere " \
"between the aggregated ceiling and floor voltage when"\
" using CPR HW closed-loop mode.\n"
elif ctrl_type == CPRH_CTRL_TYPE:
tmp = "* With full HW closed-loop operation, the expected PMIC " \
"voltage can be checked via the CPRH_STATUS and " \
"L2_SAW4_PMIC_STS registers in the DCC register dump.\n"
self.output.append(tmp)
tmp = ""
if ctrl_type != CPRH_CTRL_TYPE:
aggr_corner_addr = ctrl_addr + self.ramdump.field_offset(
'struct cpr3_controller', 'aggr_corner')
self.output.append("\nCPR aggregated voltages:\n")
self.dump_cpr3_corner_info(aggr_corner_addr, 0, 0, 0)
thread_count = self.ramdump.read_int(
ctrl_addr +
self.ramdump.field_offset(
'struct cpr3_controller', 'thread_count'))
size_thr = self.ramdump.sizeof('struct cpr3_thread')
for i in range(thread_count):
self.dump_cpr3_thread_state(thread_addr + i * size_thr, ctrl_type)
# print new line for each regulator struct
tmp += '\n'
self.output.append(tmp)
def kryo_walker(self, kryo_addr):
if kryo_addr == self.head:
return
retention_mode = self.ramdump.read_int(
kryo_addr +
self.ramdump.field_offset(
'struct kryo_regulator', 'retention_mode'))
mode = self.ramdump.read_int(
kryo_addr + self.ramdump.field_offset('struct kryo_regulator',
'mode'))
if mode == 0:
mode = "BHS"
else:
mode = "LDO"
if retention_mode == 0:
retention_mode = "BHS"
else:
retention_mode = "LDO"
volt = self.ramdump.read_int(kryo_addr + self.ramdump.field_offset(
'struct kryo_regulator', 'volt'))
retention_volt = self.ramdump.read_int(
kryo_addr +
self.ramdump.field_offset(
'struct kryo_regulator',
'retention_volt'))
vreg_en = self.ramdump.read_bool(
kryo_addr +
self.ramdump.field_offset(
'struct kryo_regulator',
'vreg_en'))
vref_func_step_volt = self.ramdump.read_int(
kryo_addr +
self.ramdump.field_offset(
'struct kryo_regulator',
'vref_func_step_volt'))
vref_func_min_volt = self.ramdump.read_int(
kryo_addr +
self.ramdump.field_offset(
'struct kryo_regulator',
'vref_func_min_volt'))
vref_func_max_volt = self.ramdump.read_int(
kryo_addr + self.ramdump.field_offset(
'struct kryo_regulator', 'vref_func_max_volt'))
vref_ret_step_volt = self.ramdump.read_int(
kryo_addr + self.ramdump.field_offset(
'struct kryo_regulator', 'vref_ret_step_volt'))
vref_ret_min_volt = self.ramdump.read_int(
kryo_addr + self.ramdump.field_offset(
'struct kryo_regulator', 'vref_ret_min_volt'))
vref_ret_max_volt = self.ramdump.read_int(
kryo_addr + self.ramdump.field_offset(
'struct kryo_regulator', 'vref_ret_max_volt'))
name_addr = self.ramdump.read_word(
kryo_addr +
self.ramdump.field_offset('struct kryo_regulator', 'name'))
name = self.ramdump.read_cstring(name_addr, 48)
tmp = ""
tmp += "-" * 80 + "\n"
tmp += "Regulator: %s\n" % name
tmp += "-" * 80 + "\n"
tmp += "%-30s = %d\n" % ("Enabled", vreg_en)
tmp += "%-30s = %s\n" % ("Mode", mode)
tmp += "%-30s = %d uV\n" % ("Voltage", volt)
tmp += "%-30s = %s\n" % ("Retention Mode", retention_mode)
tmp += "%-30s = %d uV\n" % ("Retention Voltage", retention_volt)
tmp += "%-30s = %d uV\n" % ("Vref Functional Step Voltage",
vref_func_step_volt)
tmp += "%-30s = %d uV\n" % ("Vref Functional Min Voltage",
vref_func_min_volt)
tmp += "%-30s = %d uV\n" % ("Vref Functional Max Voltage",
vref_func_max_volt)
tmp += "%-30s = %d uV\n" % ("Vref Retention Step Voltage",
vref_ret_step_volt)
tmp += "%-30s = %d uV\n" % ("Vref Retention Min Voltage",
vref_ret_min_volt)
tmp += "%-30s = %d uV\n" % ("Vref Retention Max Voltage",
vref_ret_max_volt)
self.output.append(tmp)
rdev_addr = self.ramdump.read_word(
kryo_addr + self.ramdump.field_offset('struct kryo_regulator',
'rdev'))
offset = self.ramdump.field_offset('struct regulator_dev',
'consumer_list')
self.dump_consumer(rdev_addr + offset)
def dump_consumer(self, consumer_head):
tmp = ""
tmp += "\nConsumers:\n"
tmp += "%-48s%-10s%-10s%-10s\n" % ("Device-Supply", "EN", "Min_Uv",
"Max_Uv")
self.output.append(tmp)
node_offset = self.ramdump.field_offset('struct regulator', 'list')
self.consumer_head = consumer_head
c_w = linux_list.ListWalker(self.ramdump, consumer_head, node_offset)
c_w.walk(consumer_head, self.consumer_walker)
self.output.append("\n")
def consumer_walker(self, reg_addr):
if reg_addr + self.ramdump.field_offset('struct regulator', 'list') \
== self.consumer_head:
return
min_uV = self.ramdump.read_int(
reg_addr +
self.ramdump.field_offset('struct regulator', 'min_uV'))
max_uV = self.ramdump.read_int(
reg_addr +
self.ramdump.field_offset('struct regulator', 'max_uV'))
enabled = self.ramdump.read_int(
reg_addr + self.ramdump.field_offset('struct regulator',
'enabled'))
if enabled == 1:
enabled = 'Y'
else:
enabled = 'N'
name_addr = self.ramdump.read_word(
reg_addr +
self.ramdump.field_offset('struct regulator', 'supply_name'))
name = self.ramdump.read_cstring(name_addr, 64)
tmp = "%-48s%-10s%-10d%-10d\n" % (name, enabled, min_uV, max_uV)
self.output.append(tmp)
def parse(self):
self.output_file = self.ramdump.open_file('cpr3_info.txt')
self.get_cpr()
self.get_kryo()
for i in self.output:
self.output_file.write(i)
print_out_str("--- Wrote the output to cpr3_info.txt")
self.output_file.close()

View File

@@ -0,0 +1,145 @@
# Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import linux_list
from print_out import print_out_str
from parser_util import register_parser, RamParser, cleanupString
from collections import defaultdict
@register_parser('--cpr-info', 'Print CPR information')
class CPRInfo(RamParser):
def __init__(self, *args):
super(CPRInfo, self).__init__(*args)
self.head = ''
self.cprinfo_fields = ['speed_bin', 'cpr_fuse_revision', 'cpr_fuse_map_match', 'num_fuse_corners', 'num_corners', 'corner']
self.voltages = ['ceiling_volt', 'open_loop_volt', 'last_volt', 'floor_volt']
self.corner_info = ['cpr_fuse_target_quot', 'quot_adjust', 'corner_map']
self.value_list = defaultdict(list)
self.attr_list = defaultdict(list)
self.output = []
def print_cpr_target_quot(self):
tmp = '{0:20}'.format('Target quotient')
for i in range(self.attr_list['num_corners']):
a = self.value_list['corner_map'][i]
b = self.value_list['cpr_fuse_target_quot'][a-1] - self.value_list['quot_adjust'][i]
tmp += '{0:10} '.format(b)
tmp += '\n'
self.output.append(tmp)
def print_cpr_info(self):
tmp = ''
# Print RO_SEL value
num_fuse_corn = self.attr_list['num_fuse_corners']
if num_fuse_corn is not None:
self.output.append('{:40}{:10d}\n'.format('ro_sel', self.value_list['cpr_fuse_ro_sel'][num_fuse_corn-1]))
# Print all available RO_SEL values
tmp += '{:40}'.format('cpr_fuse_ro_sel')
for ro_sel in self.value_list['cpr_fuse_ro_sel']:
tmp += '{:10} '.format(ro_sel)
tmp += '\n\n'
self.output.append(tmp)
tmp = ''
self.output.append('{:20}'.format('Corner'))
for i in range(self.attr_list['num_corners']):
tmp += '{:10} '.format(i + 1)
tmp += '\n'
for volt in self.voltages:
tmp += '{:20}'.format(volt)
for i in self.value_list[volt]:
tmp += '{:10} '.format(i)
tmp += '\n'
self.output.append(tmp)
def get_cpr(self):
# Return if the cpr_regulator_list is not available
cpr = self.ramdump.address_of('cpr_regulator_list')
if cpr is None:
self.output_file.write("NOTE: 'cpr_regulator_list' list not found to extract cpr information")
return
head = self.ramdump.read_word(cpr)
self.head = cpr
node_offset = self.ramdump.field_offset('struct cpr_regulator', 'list')
cpr_walker = linux_list.ListWalker(self.ramdump, head, node_offset)
cpr_walker.walk(head, self.cpr_walker)
def get_cpr_fuse_ro_sel(self, node):
entry_offset = self.ramdump.sibling_field_addr(node, 'struct cpr_regulator', 'list', 'cpr_fuse_ro_sel')
entry_addr = self.ramdump.read_word(entry_offset)
i = 1
while i <= self.attr_list['num_fuse_corners']:
value = self.ramdump.read_int(self.ramdump.array_index(entry_addr, "int", i))
self.value_list['cpr_fuse_ro_sel'].append(value)
i += 1
def get_cpr_volts(self, node, listing):
i = 1
num_corn = self.attr_list['num_corners']
while i <= num_corn:
i += 1
for entry in listing:
entry_offset = self.ramdump.sibling_field_addr(node, 'struct cpr_regulator', 'list', entry)
entry_addr = self.ramdump.read_word(entry_offset)
i = 1
while i <= num_corn:
value = self.ramdump.read_int(self.ramdump.array_index(entry_addr, "int", i))
self.value_list[entry].append(value)
i += 1
def get_cpr_attrs(self, node):
for attr in self.cprinfo_fields:
attr_offset = self.ramdump.field_offset('struct cpr_regulator', attr)
if attr_offset is not None:
value = self.ramdump.read_s32(node + attr_offset)
self.attr_list[attr] = value
tmp = '{:40}{:10}\n'.format(attr, value)
self.output.append(tmp)
attr_offset = self.ramdump.field_offset('struct cpr_regulator', 'cpr_fuse_redundant')
if attr_offset is not None:
value = self.ramdump.read_bool(node + attr_offset)
# add an extra line here as this is the last attribute before the corner table
tmp = '{:40} {:10}\n'.format('cpr_fuse_redundant', int(value))
self.output.append(tmp)
def cpr_walker(self, node):
if node == self.head:
return
rdesc_addr = self.ramdump.sibling_field_addr(node, 'struct cpr_regulator', 'list', 'rdesc')
rdesc_ptr = self.ramdump.read_word(rdesc_addr + self.ramdump.field_offset('struct regulator_desc', 'name'))
cpr_name = self.ramdump.read_cstring(rdesc_ptr, 48)
cpr_enable = self.ramdump.read_u32(node + self.ramdump.field_offset('struct cpr_regulator', 'enable'))
vdd_apc_addr = self.ramdump.read_word(self.ramdump.sibling_field_addr(node, 'struct cpr_regulator', 'list', 'vdd_apc'))
vdd_apc_uv = self.ramdump.read_u32(vdd_apc_addr + self.ramdump.field_offset('struct regulator', 'min_uV'))
self.output.append("{:40}{:10s}\n".format('CPR Regulator', cpr_name))
self.output.append("{:40}{:10}\n".format('CPR Enabled', cpr_enable))
self.output.append("{:40}{:10d}\n".format('Current Voltage', vdd_apc_uv))
self.get_cpr_attrs(node)
self.get_cpr_volts(node, self.voltages)
self.get_cpr_volts(node, self.corner_info)
self.get_cpr_fuse_ro_sel(node)
self.print_cpr_info()
self.print_cpr_target_quot()
# print new line for each regulator struct
self.output.append('\n')
self.attr_list.clear()
self.value_list.clear()
def parse(self):
self.output_file = self.ramdump.open_file('cprinfo.txt')
self.get_cpr()
for i in self.output:
self.output_file.write(i)
self.output_file.close()

View File

@@ -0,0 +1,65 @@
# Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from itertools import cycle
from parser_util import register_parser, RamParser
from print_out import print_out_str
@register_parser('--cpu-state', "Reads register values of non-panic'ing CPUs")
class CpuState(RamParser):
def parse(self):
regs_before_stop_addr = self.ramdump.address_of('regs_before_stop')
if regs_before_stop_addr is None:
print_out_str('regs_before_stop not found. Nothing to do.')
return
# see pt_regs and associated #defines in
# arch/arm/include/asm/ptrace.h
regs = (
'r0',
'r1',
'r2',
'r3',
'r4',
'r5',
'r6',
'r7',
'r8',
'r9',
'r10',
'fp',
'ip',
'sp',
'lr',
'pc',
'cpsr',
)
max_len = max([len(s) for s in regs])
for cpu in self.ramdump.iter_cpus():
print_out_str('CPU %d' % cpu)
lines = []
for index, reg in enumerate(regs):
reg_addr = self.ramdump.array_index(
regs_before_stop_addr, 'unsigned long', index)
reg_val = self.ramdump.read_word(reg_addr, cpu=cpu)
lines.append(
' {0:{width}} = 0x{1:x}'.format(reg, reg_val, width=max_len))
c = cycle([', ', ', ', ', ', '\n'])
output = ''
for line in lines:
output += line + next(c)
print_out_str(output)

View File

@@ -0,0 +1,175 @@
# Copyright (c) 2014-2015, 2017, 2019-2020, The Linux Foundation. All rights reserved.
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import struct
import re
import sys
import subprocess
from print_out import print_out_str
from parser_util import register_parser, RamParser
@register_parser('--ddr-compare', 'Sanity check the DDR data to find possible corruptions')
class DDRCompare(RamParser) :
def compare_magic(self):
self.output_file.write("----------------------------------------------------------------------------------------\n")
self.output_file.write("Comparing statically initialized lock values from vmlinux and ramdumps\n")
self.output_file.write("----------------------------------------------------------------------------------------\n")
if not self.ramdump.is_config_defined('CONFIG_DEBUG_SPINLOCK'):
self.output_file.write('Kernel Configuration for debug spinlocks is not enabled, cannot comapre the magic values!!\n\n')
return
elif self.ramdump.objdump_path is None:
self.output_file.write("!!! Objdump path is not set, please use --objdump-path option to specify the path\n\n")
return
cmdarr = [self.ramdump.objdump_path, "-D", "-j.data", self.ramdump.vmlinux]
p = subprocess.Popen(cmdarr, stdout=subprocess.PIPE, universal_newlines=True)
output = p.communicate()[0]
foundcorruption = 0;
for line in output.split('\n'):
m = re.search("^(.*?):\s+(dead4ead|deaf1eed?)\s+\.word\s+(0x\\2?)", line)
if m:
virtual = m.group(1)
virtual = int(m.group(1), 16)
bitcheck = virtual & 0x3
if bitcheck:
virtual = virtual - bitcheck
physical = self.ramdump.virt_to_phys(virtual + self.ramdump.get_kaslr_offset())
magic = hex(self.ramdump.read_u32(physical, False)).rstrip("L").lstrip("0x").zfill(8)
if (m.group(2) != magic):
foundcorruption = 1;
self.output_file.write("Magic didn't match for virtual address {0}\n".format("0x"+m.group(1)))
for i in range(2):
physical = physical - 4
dumpvalue = hex(self.ramdump.read_u32(physical, False)).rstrip("L").lstrip("0x").zfill(8)
self.output_file.write("{0}\n".format(dumpvalue))
physical = physical + 8
self.output_file.write("{0}\n".format(magic))
for i in range(2):
physical = physical + 4
dumpvalue = hex(self.ramdump.read_u32(physical, False)).rstrip("L").lstrip("0x").zfill(8)
self.output_file.write("{0}\n".format(dumpvalue))
if (foundcorruption == 0):
self.output_file.write("No Corruption found in the lock values\n\n")
def validate_sched_class(self, address):
sc_stop = self.ramdump.address_of('stop_sched_class')
sc_rt = self.ramdump.address_of('rt_sched_class')
sc_idle = self.ramdump.address_of('idle_sched_class')
sc_fair = self.ramdump.address_of('fair_sched_class')
sched_class_offset = address + self.ramdump.field_offset('struct task_struct', 'sched_class');
sched_class_pointer = self.ramdump.read_word(sched_class_offset, True)
if not ((sched_class_pointer == sc_stop) or (sched_class_pointer == sc_rt) or (sched_class_pointer == sc_idle) or (sched_class_pointer == sc_fair)):
self.output_file.write(hex(address) + " seems to be corrupted! sched_class doesn't match with the defined ones\n")
return -1;
def validate_task_struct(self, address):
thread_info_address = self.ramdump.get_thread_info_addr(address)
if self.ramdump.is_thread_info_in_task():
#Task is no longer found in thread_info
task_struct = address
else:
task_address = thread_info_address + self.ramdump.field_offset('struct thread_info', 'task');
task_struct = self.ramdump.read_word(task_address, True)
cpu_number = self.ramdump.get_task_cpu(task_struct, thread_info_address)
if((address != task_struct) or (thread_info_address == 0x0)):
self.output_file.write(hex(address) + " seems to be corrupted! Please check task_struct and thread_info to find corruptions\n")
return -1
if((cpu_number < 0) or (cpu_number >= self.ramdump.get_num_cpus())):
self.output_file.write(hex(address) + " seems to be corrupted! CPU number " + str(int(cpu_number)) + " seems to be corrupted\n")
return -1
def check_thread_group(self, address, comm_offset_index):
output_str = ""
threads_count = 0
for task_addr in self.ramdump.for_each_thread(address):
threads_count += 1
if(task_addr <= 0x0):
output_str += "task_struct " + hex(task_addr) + " was corrupted\n"
break
comm_offset = task_addr + comm_offset_index
comm = self.ramdump.read_cstring(comm_offset, 16, True)
output_str += "Next = {0} ({1})\n".format(hex(task_addr).rstrip("L"), comm)
if threads_count > 1:
self.output_file.write("-----------------------------------\n")
self.output_file.write("Threads of 0x{0:x}\n".format(address))
self.output_file.write("-----------------------------------\n")
self.output_file.write(output_str)
def corruptionchecker(self):
self.output_file.write("----------------------------------------------------------------------------------------\n")
self.output_file.write("Checking for task list corruption.\n")
self.output_file.write("----------------------------------------------------------------------------------------\n")
init_task = self.ramdump.address_of('init_task')
self.output_file.write("Init Task Address = {0}\n".format(hex(init_task)))
tasks_offset = self.ramdump.field_offset('struct task_struct', 'tasks')
self.output_file.write("Task Offset {0}\n".format(hex(tasks_offset).rstrip("L")))
comm_offset = self.ramdump.field_offset('struct task_struct', 'comm')
self.output_file.write("Comm Offset {0}\n\n".format(hex(comm_offset).rstrip("L")))
seen_task = []
next = init_task;
found_corruption = 0
while 1:
tasks_pointer = self.ramdump.read_word(next + tasks_offset, True)
if(tasks_pointer == 0x0):
found_corruption = 1
break
task_struct = tasks_pointer - tasks_offset
comm = self.ramdump.read_cstring(task_struct + comm_offset, 16, True)
if (self.validate_task_struct(task_struct) == -1):
found_corruption = 1
break
if (self.validate_sched_class(task_struct) == -1):
found_corruption = 1
break
if (self.check_thread_group(task_struct, comm_offset) == -1):
found_corruption = 1
break
if task_struct in seen_task:
self.output_file.write("!!!! Cycle in task group! The list is corrupt!\n")
break
self.output_file.write("Next = {0} ({1})\n".format(hex(task_struct).rstrip("L"), comm))
seen_task.append(task_struct)
next = task_struct;
if (next == init_task):
break
if(found_corruption):
self.output_file.write("----------------------------------------\n")
self.output_file.write("RESULT: Corruption found in the task list\n")
self.output_file.write("----------------------------------------\n")
else:
self.output_file.write("----------------------------------------\n")
self.output_file.write("RESULT: No issues found in the task list\n")
self.output_file.write("----------------------------------------\n")
def parse(self):
self.output_file = self.ramdump.open_file('DDRCacheCompare.txt')
self.compare_magic()
self.corruptionchecker()
self.output_file.close()
print_out_str("--- Wrote the output to DDRCacheCompare.txt")

View File

@@ -0,0 +1,211 @@
# Copyright (c) 2012-2015, 2020 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import struct
from parser_util import register_parser, RamParser
from print_out import print_out_str
from qdss import QDSSDump
from parsers.cachedump import save_l1_dump, parse_cache_dump
from parsers.watchdog import TZRegDump
from debug_image_v2 import DebugImage_v2
QDSS_MAGIC = 0x5D1DB1Bf
print_table = {
'MSM_CPU_CTXT': 'parse_cpu_ctx',
'MSM_L1_CACHE': 'parse_l1_cache',
'MSM_L2_CACHE': 'parse_l2_cache',
'MSM_OCMEM': 'parse_ocmem',
'MSM_TMC0_REG': 'parse_qdss_common',
'MSM_TMC_ETFETB': 'parse_qdss_common',
'MSM_TMC1_REG': 'parse_qdss_common',
'MSM_ETM0_REG': 'parse_qdss_common',
'MSM_ETM1_REG': 'parse_qdss_common',
'MSM_ETM2_REG': 'parse_qdss_common',
'MSM_ETM3_REG': 'parse_qdss_common',
}
tag_to_field_name = {
'MSM_TMC0_REG': 'tmc_etr_start',
'MSM_TMC_ETFETB': 'etf_start',
'MSM_TMC1_REG': 'tmc_etf_start',
'MSM_ETM0_REG': 'etm_regs0',
'MSM_ETM1_REG': 'etm_regs1',
'MSM_ETM2_REG': 'etm_regs2',
'MSM_ETM3_REG': 'etm_regs3',
}
@register_parser('--parse-debug-image', 'Parse the debug image and associated information')
class DebugImage(RamParser):
def __init__(self, *args):
super(DebugImage, self).__init__(*args)
self.qdss = QDSSDump()
self.name_lookup_table = []
def parse_cpu_ctx(self, start, end, tag):
print_out_str(
'Parsing CPU context start {0:x} end {1:x}'.format(start, end))
# For historical reasons, we can't rely on the magic number to indicate if there
# is context dumped. Check the magic number here instead
magic = self.ramdump.read_word(start, False)
if magic is None:
print_out_str(
"!!! Address {0:x} is bogus! Can't parse!".format(start))
return
if magic != 0x44434151:
print_out_str(
"!!! Magic {0:x} doesn't match! No context was dumped!".format(magic))
return
regs = TZRegDump(self.ramdump)
regs.init_regs(start)
for i in range(regs.ncores):
regs.dump_core_pc(i)
regs.dump_all_regs()
def parse_l2_cache(self, start, end, tag):
print_out_str(
'Parsing L2 cache context start {0:x} end {1:x}'.format(start, end))
magic = self.ramdump.read_word(start, False)
if magic is None:
print_out_str(
"!!! Address {0:x} is bogus! Can't parse!".format(start))
return
if magic != 0xcac1ecac:
print_out_str(
"!!! Magic {0:x} doesn't match! No cache was dumped!".format(magic))
return
parse_cache_dump(self.ramdump, start)
def parse_l1_cache(self, start, end, tag):
print_out_str(
'Parsing L1 cache context start {0:x} end {1:x}'.format(start, end))
magic = self.ramdump.read_word(start, False)
if magic is None:
print_out_str(
"!!! Address {0:x} is bogus! Can't parse!".format(start))
return
if magic != 0x314C4151:
print_out_str(
"!!! Magic {0:X} doesn't match! No cache was dumped!".format(magic))
return
print_out_str('Saving L1 cache')
save_l1_dump(self.ramdump, start, end - start)
def parse_ocmem(self, start, end, tag):
print_out_str(
'[!!!] Parsing not implemented yet start {0:x} end {1:x}'.format(start, end))
def parse_qdss_common(self, start, end, tag):
print_out_str(
'Parsing {0} context start {1:x} end {2:x}'.format(tag, start, end))
magic = self.ramdump.read_word(start, False)
if magic is None:
print_out_str(
"!!! Address {0:x} is bogus! Can't parse!".format(start))
return
if magic != QDSS_MAGIC:
print_out_str(
"!!! Magic {0:X} doesn't match! Tracing was not dumped!".format(magic))
return
setattr(self.qdss, tag_to_field_name[tag], start + 4096)
def parse_dump(self):
out_dir = self.ramdump.outdir
self.name_lookup_table = self.ramdump.gdbmi.get_enum_lookup_table(
'dump_client_type', 32)
dump_table_ptr_offset = self.ramdump.field_offset(
'struct msm_memory_dump', 'dump_table_ptr')
version_offset = self.ramdump.field_offset(
'struct msm_dump_table', 'version')
num_entries_offset = self.ramdump.field_offset(
'struct msm_dump_table', 'num_entries')
client_entries_offset = self.ramdump.field_offset(
'struct msm_dump_table', 'client_entries')
id_offset = self.ramdump.field_offset('struct msm_client_dump', 'id')
start_addr_offset = self.ramdump.field_offset(
'struct msm_client_dump', 'start_addr')
end_addr_offset = self.ramdump.field_offset(
'struct msm_client_dump', 'end_addr')
client_dump_entry_size = self.ramdump.sizeof('struct msm_client_dump')
mem_dump_data = self.ramdump.address_of('mem_dump_data')
dump_table = self.ramdump.read_word(
mem_dump_data + dump_table_ptr_offset)
version = self.ramdump.read_word(dump_table + version_offset)
if version is None:
print_out_str('Version is bogus! Can\'t parse debug image')
return
num_entries = self.ramdump.read_word(dump_table + num_entries_offset)
if num_entries is None or num_entries > 100:
print_out_str('num_entries is bogus! Can\'t parse debug image')
return
print_out_str('\nDebug image version: {0}.{1} Number of entries {2}'.format(
version >> 20, version & 0xFFFFF, num_entries))
print_out_str('--------')
for i in range(0, num_entries):
this_client = dump_table + client_entries_offset + \
i * client_dump_entry_size
client_id = self.ramdump.read_word(this_client + id_offset)
client_start = self.ramdump.read_word(
this_client + start_addr_offset)
client_end = self.ramdump.read_word(this_client + end_addr_offset)
if client_id < 0 or client_id > len(self.name_lookup_table):
print_out_str(
'!!! Invalid client id found {0:x}'.format(client_id))
continue
client_name = self.name_lookup_table[client_id]
if client_name not in print_table:
print_out_str(
'!!! {0} Does not have an associated function. The parser needs to be updated!'.format(client_name))
else:
print_out_str(
'Parsing debug information for {0}'.format(client_name))
func = print_table[client_name]
getattr(DebugImage, func)(self, client_start,
client_end, client_name)
print_out_str('--------')
self.qdss.dump_standard(self.ramdump)
if not self.ramdump.skip_qdss_bin:
self.qdss.save_etf_bin(self.ramdump)
self.qdss.save_etr_bin(self.ramdump)
def parse(self):
# use the mem_dump_data variable to detect if debug image feature was compiled in,
# and memdump data variable for debug image v2 feature, rather than relying on
# configuration option.
if self.ramdump.address_of('mem_dump_data'):
self.parse_dump()
elif self.ramdump.address_of('memdump'):
regs = DebugImage_v2(self.ramdump)
regs.parse_dump_v2(self.ramdump)
else:
print_out_str(
'!!! Debug image was not enabled. No debug dump will be provided')
return

View File

@@ -0,0 +1,20 @@
# Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser
import print_out
import dmesglib
@register_parser('--dmesg', 'Print the dmesg', shortopt='-d')
class Dmesg(RamParser):
def parse(self):
dmesglib.DmesgLib(self.ramdump, print_out.out_file).extract_dmesg()

View File

@@ -0,0 +1,54 @@
"""
Copyright (c) 2020 The Linux Foundation. All rights reserved.
SPDX-License-Identifier: GPL-2.0-only
Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 and
only version 2 as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import os,sys
import struct
import subprocess
from print_out import print_out_str
from parser_util import register_parser, RamParser
import local_settings
@register_parser('--dtb', 'Dump the devicetree blob information')
class dtb_parsing(RamParser):
def __init__(self, *args):
super(dtb_parsing, self).__init__(*args)
def dtb_parse(self, ram_dump):
initial_boot_params_addr = ram_dump.address_of('initial_boot_params')
if initial_boot_params_addr:
initial_boot_params = ram_dump.read_u64(initial_boot_params_addr)
magic = ram_dump.read_u32(initial_boot_params)
if (magic == 0xEDFE0DD0):
db_size = ram_dump.read_u64(initial_boot_params + 0x4)
dtbsize=((db_size&0xFF)<<24)|((db_size&0xFF00)<<8)|((db_size&0xFF0000)>>8)|((db_size&0xFF000000)>>24)
dtb_file = "devicetree.dtb"
dtb_path = os.path.join(ram_dump.outdir,dtb_file)
dtb_fd = open(dtb_path,'wb')
dtb_data = ram_dump.read_physical(ram_dump.virt_to_phys(initial_boot_params),dtbsize)
dtb_fd.write(dtb_data)
dtb_fd.close()
def parse(self):
self.dtb_parse(self.ramdump)
dts_output_file = "{0}/{1}".format(self.ramdump.outdir, "dts.txt")
with open(dts_output_file, 'w') as dts_out:
devicetree_dtb = os.path.join(self.ramdump.outdir, "devicetree.dtb")
if os.path.exists(devicetree_dtb):
try:
retcode = subprocess.Popen([local_settings.dtc_path, '-f', '-I', 'dtb', '-O', 'dts', devicetree_dtb], stdout=dts_out, stderr=dts_out, shell=False)
except OSError as e:
print_out_str("exception is {0} dtc used {1}".format(str(e), local_settings.dtc_path))

View File

@@ -0,0 +1,462 @@
# Copyright (c) 2021 The Linux Foundation. All rights reserved.
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser, cleanupString
from print_out import print_out_str
from struct_print import struct_print_class
def parse_mountpoint(ramdump, dentry):
d_iname_offset = ramdump.field_offset('struct dentry', 'd_iname')
d_iname_last = cleanupString(ramdump.read_cstring(dentry + d_iname_offset, 40))
d_parent = ramdump.read_structure_field(dentry, 'struct dentry', 'd_parent')
d_parent_prev = 0
while d_parent != d_parent_prev:
d_iname = cleanupString(ramdump.read_cstring(d_parent + d_iname_offset, 40))
if d_iname != "/":
d_iname_last = d_iname + "/" + d_iname_last
d_parent_prev = d_parent
d_parent = ramdump.read_structure_field(d_parent, 'struct dentry', 'd_parent')
mount_point_path = "/" + d_iname_last
return mount_point_path
def print_f2fs_data(ramdump):
output_file = ramdump.open_file("f2fs_info.txt")
init_nsproxy_addr = ramdump.address_of('init_nsproxy')
mnt_ns = ramdump.read_structure_field(init_nsproxy_addr, 'struct nsproxy', 'mnt_ns')
mnt_ns_list_offset = ramdump.field_offset('struct mnt_namespace', 'list')
mnt_ns_list_head = mnt_ns + mnt_ns_list_offset
next_offset = ramdump.field_offset('struct list_head', 'next')
mnt_ns_entry = ramdump.read_word(mnt_ns_list_head + next_offset)
mount_list_offset = ramdump.field_offset('struct mount', 'mnt_list')
mount_mnt_o = ramdump.field_offset('struct mount', 'mnt')
mount_mnt_rcu_o = ramdump.field_offset('struct mount', 'mnt_rcu')
vfsmount_size = ramdump.sizeof('struct vfsmount')
mnt_sb_o = ramdump.field_offset('struct vfsmount', 'mnt_sb')
mnt_o = ramdump.field_offset('struct mount', 'mnt')
s_writers_o = ramdump.field_offset('struct super_block', 's_writers')
frozen_o = ramdump.field_offset('struct sb_writers', 'frozen')
s_umount_o = ramdump.field_offset('struct super_block', 's_umount')
count_o = ramdump.field_offset('struct rw_semaphore', 'count')
s_id_o = ramdump.field_offset('struct super_block', 's_id')
d_iname_offset = ramdump.field_offset('struct dentry', 'd_iname')
dentry_d_child_o = ramdump.field_offset('struct dentry', 'd_child')
output_file.write("(struct mount *) (struct super_block *) [FS_TYPE] \t\t[DEVNAME] [DIRNAME] [SB_ID] [umount_RW_LOCK_count] [READ_ONLY] [writer FROZEN (1=YES,generally)]\n")
while mnt_ns_entry != mnt_ns_list_head:
root = mnt_ns_entry - mount_list_offset
root_mnt_devname = ramdump.read_structure_cstring(root, 'struct mount', 'mnt_devname', 40)
root_mnt_sb = ramdump.read_word(root + mnt_o + mnt_sb_o)
sb_s_type = ramdump.read_structure_field(root_mnt_sb, 'struct super_block', 's_type')
s_type_name = ramdump.read_structure_cstring(sb_s_type, 'struct file_system_type', 'name', 40)
sb_s_id = cleanupString(ramdump.read_cstring(root_mnt_sb + s_id_o, 40))
sb_s_flags = ramdump.read_structure_field(root_mnt_sb, 'struct super_block', 's_flags')
sb_s_flags = sb_s_flags & 0x1 #MS_RDONLY=0x1
sb_s_writers_frozen = ramdump.read_int(root_mnt_sb + s_writers_o + frozen_o)
if sb_s_writers_frozen == 0:
sb_s_writers_frozen = 0x1
sb_s_umount_count = ramdump.read_word(root_mnt_sb + s_umount_o + count_o)
root_mnt_mountpoint = ramdump.read_structure_field(root, 'struct mount', 'mnt_mountpoint')
mountpoint = parse_mountpoint(ramdump, root_mnt_mountpoint)
if s_type_name == "f2fs" and mountpoint == "/data":
output_file.write("0x{0:16X}\t 0x{1:16X}\t\t{2:12}\t{3:30}\t\t\t{4:30}\t{5:15}\t{6:016X}\t\t{7:3}\t\t\t\t\t{8:2}\n".format(root, root_mnt_sb,
s_type_name, root_mnt_devname, mountpoint, sb_s_id, sb_s_umount_count, sb_s_flags, sb_s_writers_frozen))
f2fs_sb_info = ramdump.read_structure_field(root_mnt_sb, 'struct super_block', 's_fs_info')
f2fs_sb = ramdump.read_structure_field(f2fs_sb_info, 'struct f2fs_sb_info', 'sb')
nm_info = ramdump.read_structure_field(f2fs_sb_info, 'struct f2fs_sb_info', 'nm_info')
sm_info = ramdump.read_structure_field(f2fs_sb_info, 'struct f2fs_sb_info', 'sm_info')
ckpt_info = ramdump.read_structure_field(f2fs_sb_info, 'struct f2fs_sb_info', 'ckpt')
sbi = struct_print_class(ramdump, 'f2fs_sb_info', f2fs_sb_info, output_file)
sbi.append('sb_lock', 'rw_semaphore')
sbi.append('valid_super_block', 'u32')
sbi.append('s_flag', 'u32')
sbi.append('writepages', 'mutex')
sbi.append('io_order_lock', 'rw_semaphore')
sbi.append('cur_cp_pack', 'u32')
sbi.append('cp_global_sem', 'rw_semaphore')
sbi.append('cp_rwsem', 'rw_semaphore')
sbi.append('node_write', 'rw_semaphore')
sbi.append('node_change', 'rw_semaphore')
sbi.append('fsync_seg_id', 'u32')
sbi.append('fsync_node_num', 'u32')
sbi.append('max_orphans', 'u32')
sbi.append('flush_lock', 'mutex')
if (ramdump.kernel_version <= (5, 4, 0)):
sbi.append('extent_tree_lock', 'mutex')
#basic filesystem units
sbi.append('log_sectors_per_block', 'u32')
sbi.append('log_blocksize', 'u32')
sbi.append('blocksize', 'u32')
sbi.append('root_ino_num', 'u32')
sbi.append('node_ino_num', 'u32')
sbi.append('meta_ino_num', 'u32')
sbi.append('log_blocks_per_seg', 'u32')
sbi.append('blocks_per_seg', 'u32')
sbi.append('segs_per_sec', 'u32')
sbi.append('secs_per_zone', 'u32')
sbi.append('total_sections', 'u32')
sbi.append('total_node_count', 'u32')
sbi.append('total_valid_node_count', 'u32')
if (ramdump.kernel_version <= (5, 4, 0)):
sbi.append('max_file_blocks', 'u32')
sbi.append('dir_level', 'u32')
sbi.append('readdir_ra', 'u32')
sbi.append('user_block_count', 'u32')
sbi.append('total_valid_block_count', 'u32')
sbi.append('discard_blks', 'u32')
sbi.append('last_valid_block_count', 'u32')
sbi.append('reserved_blocks', 'u32')
sbi.append('current_reserved_blocks', 'u32')
sbi.append('unusable_block_count', 'u32')
sbi.append('nquota_files', 'u32')
sbi.append('quota_sem', 'rw_semaphore')
sbi.append('nr_pages[0]', 'u32')
sbi.append('nr_pages[1]', 'u32')
sbi.append('nr_pages[2]', 'u32')
sbi.append('nr_pages[3]', 'u32')
sbi.append('nr_pages[4]', 'u32')
sbi.append('nr_pages[5]', 'u32')
sbi.append('nr_pages[6]', 'u32')
sbi.append('nr_pages[7]', 'u32')
sbi.append('nr_pages[8]', 'u32')
sbi.append('nr_pages[9]', 'u32')
sbi.append('nr_pages[10]', 'u32')
sbi.append('nr_pages[11]', 'u32')
sbi.append('nr_pages[12]', 'u32')
sbi.append('nr_pages[13]', 'u32')
sbi.append('wb_sync_req[0]', 'u32')
sbi.append('wb_sync_req[1]', 'u32')
sbi.append('gc_lock', 'rw_semaphore')
sbi.append('cur_victim_sec', 'u32')
sbi.append('gc_mode', 'u32')
sbi.append('next_victim_seg[0]', 'u32')
sbi.append('next_victim_seg[1]', 'u32')
sbi.append('atomic_files', 'u32')
if (ramdump.kernel_version <= (5, 4, 0)):
sbi.append('skipped_atomic_files[0]', 'u64')
sbi.append('skipped_atomic_files[1]', 'u64')
sbi.append('skipped_gc_rwsem', 'u64')
sbi.append('gc_pin_file_threshold', 'u64')
sbi.append('pin_sem', 'rw_semaphore')
sbi.append('max_victim_search', 'u32')
sbi.append('migration_granularity', 'u32')
if (ramdump.is_config_defined('CONFIG_F2FS_STAT_FS')):
sbi.append('meta_count', 'u32')
sbi.append('segment_count[0]', 'u32')
sbi.append('segment_count[1]', 'u32')
sbi.append('block_count[0]', 'u32')
sbi.append('block_count[1]', 'u32')
sbi.append('inplace_count', 'u32')
sbi.append('total_hit_ext', 'u64')
sbi.append('read_hit_rbtree', 'u64')
sbi.append('read_hit_largest', 'u64')
sbi.append('read_hit_cached', 'u64')
sbi.append('inline_xattr', 'u32')
sbi.append('inline_inode', 'u32')
sbi.append('inline_dir', 'u32')
sbi.append('compr_inode', 'u32')
sbi.append('compr_blocks', 'u32')
if (ramdump.kernel_version <= (5, 4, 0)):
sbi.append('vw_cnt', 'u32')
sbi.append('max_vw_cnt', 'u32')
sbi.append('max_aw_cnt', 'u32')
sbi.append('io_skip_bggc', 'u32')
sbi.append('other_skip_bggc', 'u32')
sbi.append('ndirty_inode[0]', 'u32')
sbi.append('ndirty_inode[1]', 'u32')
sbi.append('ndirty_inode[2]', 'u32')
sbi.append('ndirty_inode[3]', 'u32')
if (ramdump.kernel_version <= (5, 4, 0)):
sbi.append('rw_iostat', 'u64')
sbi.append('prev_rw_iostat', 'u64')
sbi.append('iostat_enable', 'u8')
sbi.append('iostat_next_period', 'u64')
sbi.append('iostat_period_ms', 'u32')
sbi.append('data_io_flag', 'u32')
sbi.append('node_io_flag', 'u32')
sbi.append('s_ndevs', 'u32')
sbi.append('dirty_device', 'u32')
sbi.append('umount_mutex', 'mutex')
sbi.append('shrinker_run_no', 'u32')
sbi.append('sectors_written_start', 'u64')
sbi.append('kbytes_written', 'u64')
sbi.process()
sbi.print_struct()
sb = struct_print_class(ramdump, 'super_block', f2fs_sb, output_file)
sb.append('s_flags', 'u64')
sb.append('s_iflags', 'u64')
sb.append('s_magic', 'u64')
sb.append('s_umount', 'rw_semaphore')
sb.append('s_count', 'u32')
sb.append('s_active', 'u32')
sb.process()
sb.print_struct()
f2fs_raw_super = ramdump.read_structure_field(f2fs_sb_info, 'struct f2fs_sb_info', 'raw_super')
rawsb = struct_print_class(ramdump, 'f2fs_super_block', f2fs_raw_super, output_file)
rawsb.append('magic', 'u32')
rawsb.append('major_ver', 'u16')
rawsb.append('minor_ver', 'u16')
rawsb.append('log_sectorsize', 'u32')
rawsb.append('log_sectors_per_block', 'u32')
rawsb.append('log_blocksize', 'u32')
rawsb.append('log_blocks_per_seg', 'u32')
rawsb.append('segs_per_sec', 'u32')
rawsb.append('secs_per_zone', 'u32')
rawsb.append('checksum_offset', 'u32')
rawsb.append('block_count', 'u64')
rawsb.append('section_count', 'u32')
rawsb.append('segment_count', 'u32')
rawsb.append('segment_count_ckpt', 'u32')
rawsb.append('segment_count_sit', 'u32')
rawsb.append('segment_count_nat', 'u32')
rawsb.append('segment_count_ssa', 'u32')
rawsb.append('segment_count_main', 'u32')
rawsb.append('extension_count', 'u32')
rawsb.append('cp_payload', 'u32')
rawsb.append('feature', 'u32')
rawsb.append('encryption_level', 'u8')
rawsb.process()
rawsb.print_struct()
#Dump F2FS node manager sbi->nm_info
nmi = struct_print_class(ramdump, 'f2fs_nm_info', nm_info, output_file)
nmi.append('max_nid', 'u32')
nmi.append('available_nids', 'u32')
nmi.append('next_scan_nid', 'u32')
nmi.append('ram_thresh', 'u32')
nmi.append('ra_nid_pages', 'u32')
nmi.append('dirty_nats_ratio', 'u32')
nmi.append('nat_tree_lock', 'rw_semaphore')
nmi.append('nat_cnt', 'u32')
if (ramdump.kernel_version < (5, 4, 0)):
nmi.append('dirty_nat_cnt', 'u32')
nmi.append('nat_blocks', 'u32')
nmi.append('nid_cnt[0]', 'u32')
nmi.append('nid_cnt[1]', 'u32')
nmi.append('build_lock', 'mutex')
nmi.append('nat_bits_blocks', 'u32')
nmi.append('bitmap_size', 'u32')
nmi.process()
nmi.print_struct()
#F2FS segment manager sbi->sm_info
smi = struct_print_class(ramdump, 'f2fs_sm_info', sm_info, output_file)
smi.append('curseg_lock', 'rw_semaphore')
smi.append('segment_count', 'u32')
smi.append('main_segments', 'u32')
smi.append('reserved_segments', 'u32')
smi.append('ovp_segments', 'u32')
smi.append('rec_prefree_segments', 'u32')
if (ramdump.kernel_version <= (5, 4, 0)):
smi.append('trim_sections', 'u32')
smi.append('ipu_policy', 'u32')
smi.append('min_ipu_util', 'u32')
smi.append('min_fsync_blocks', 'u32')
smi.append('min_seq_blocks', 'u32')
smi.append('min_hot_blocks', 'u32')
smi.append('min_ssr_sections', 'u32')
smi.process()
smi.print_struct()
sit_info = ramdump.read_structure_field(sm_info, 'struct f2fs_sm_info', 'sit_info')
sit = struct_print_class(ramdump, 'sit_info', sit_info, output_file)
sit.append('sit_blocks', 'u32')
sit.append('written_valid_blocks', 'u32')
sit.append('bitmap_size', 'u32')
sit.append('dirty_sentries', 'u32')
sit.append('sents_per_block', 'u32')
sit.append('sentry_lock', 'rw_semaphore')
sit.append('elapsed_time', 'u64')
sit.append('mounted_time', 'u64')
sit.append('min_mtime', 'u64')
sit.append('max_mtime', 'u64')
sit.append('last_victim[0]', 'u32')
sit.append('last_victim[1]', 'u32')
sit.append('last_victim[2]', 'u32')
sit.append('last_victim[3]', 'u32')
sit.process()
sit.print_struct()
free_info = ramdump.read_structure_field(sm_info, 'struct f2fs_sm_info', 'free_info')
freeinfo = struct_print_class(ramdump, 'free_segmap_info', free_info, output_file)
freeinfo.append('start_segno', 'u32')
freeinfo.append('free_segments', 'u32')
freeinfo.append('free_sections', 'u32')
freeinfo.process()
freeinfo.print_struct()
dirty_info = ramdump.read_structure_field(sm_info, 'struct f2fs_sm_info', 'dirty_info')
dirtyinfo = struct_print_class(ramdump, 'dirty_seglist_info', dirty_info, output_file)
dirtyinfo.append('seglist_lock', 'mutex')
dirtyinfo.append('nr_dirty[0]', 'u32')
dirtyinfo.append('nr_dirty[1]', 'u32')
dirtyinfo.append('nr_dirty[2]', 'u32')
dirtyinfo.append('nr_dirty[3]', 'u32')
dirtyinfo.append('nr_dirty[4]', 'u32')
dirtyinfo.append('nr_dirty[5]', 'u32')
dirtyinfo.append('nr_dirty[6]', 'u32')
dirtyinfo.append('nr_dirty[7]', 'u32')
dirtyinfo.process()
dirtyinfo.print_struct()
curseg_array = ramdump.read_structure_field(sm_info, 'struct f2fs_sm_info', 'curseg_array')
curseginfo = struct_print_class(ramdump, 'curseg_info', curseg_array, output_file)
curseginfo.append('curseg_mutex', 'mutex')
curseginfo.append('journal_rwsem', 'rw_semaphore')
curseginfo.append('segno', 'u32')
curseginfo.append('next_blkoff', 'u16')
curseginfo.append('zone', 'u32')
curseginfo.append('next_segno', 'u32')
curseginfo.process()
curseginfo.print_struct()
fcc_info = ramdump.read_structure_field(sm_info, 'struct f2fs_sm_info', 'fcc_info')
fccinfo = struct_print_class(ramdump, 'flush_cmd_control', fcc_info, output_file)
fccinfo.append('issued_flush', 'u32')
fccinfo.append('queued_flush', 'u32')
fccinfo.process()
fccinfo.print_struct()
dcc_info = ramdump.read_structure_field(sm_info, 'struct f2fs_sm_info', 'dcc_info')
dccinfo = struct_print_class(ramdump, 'discard_cmd_control', dcc_info, output_file)
dccinfo.append('discard_wake', 'u32')
dccinfo.append('cmd_lock', 'mutex')
dccinfo.append('nr_discards', 'u32')
dccinfo.append('max_discards', 'u32')
dccinfo.append('discard_granularity', 'u32')
dccinfo.append('undiscard_blks', 'u32')
dccinfo.append('next_pos', 'u32')
dccinfo.append('issued_discard', 'u32')
dccinfo.append('queued_discard', 'u32')
dccinfo.append('discard_cmd_cnt', 'u32')
dccinfo.process()
dccinfo.print_struct()
writeio_0 = ramdump.read_structure_field(f2fs_sb_info, 'struct f2fs_sb_info', 'write_io[0]')
writeio_1 = ramdump.read_structure_field(f2fs_sb_info, 'struct f2fs_sb_info', 'write_io[1]')
writeio_2 = ramdump.read_structure_field(f2fs_sb_info, 'struct f2fs_sb_info', 'write_io[2]')
writeio_list = [writeio_0, writeio_1, writeio_2]
for writeio in writeio_list:
bio = ramdump.read_structure_field(writeio, 'struct f2fs_bio_info', 'bio')
w_io = struct_print_class(ramdump, 'f2fs_bio_info', writeio, output_file)
w_io.append('bio', 'ptr')
if bio:
w_io.append('io_rwsem', 'rw_semaphore')
w_io.append('bio_list_lock', 'rw_semaphore')
w_io.process()
w_io.print_struct()
ckpt = struct_print_class(ramdump, 'f2fs_checkpoint', ckpt_info, output_file)
ckpt.append('checkpoint_ver', 'u64')
ckpt.append('user_block_count', 'u64')
ckpt.append('valid_block_count', 'u64')
ckpt.append('rsvd_segment_count', 'u32')
ckpt.append('overprov_segment_count', 'u32')
ckpt.append('free_segment_count', 'u32')
ckpt.append('cur_node_segno[0]', 'u32')
ckpt.append('cur_node_segno[1]', 'u32')
ckpt.append('cur_node_segno[2]', 'u32')
ckpt.append('cur_node_segno[3]', 'u32')
ckpt.append('cur_node_segno[4]', 'u32')
ckpt.append('cur_node_segno[5]', 'u32')
ckpt.append('cur_node_segno[6]', 'u32')
ckpt.append('cur_node_segno[7]', 'u32')
ckpt.append('cur_node_blkoff[0]', 'u16')
ckpt.append('cur_node_blkoff[1]', 'u16')
ckpt.append('cur_node_blkoff[2]', 'u16')
ckpt.append('cur_node_blkoff[3]', 'u16')
ckpt.append('cur_node_blkoff[4]', 'u16')
ckpt.append('cur_node_blkoff[5]', 'u16')
ckpt.append('cur_node_blkoff[6]', 'u16')
ckpt.append('cur_node_blkoff[7]', 'u16')
ckpt.append('cur_data_segno[0]', 'u32')
ckpt.append('cur_data_segno[1]', 'u32')
ckpt.append('cur_data_segno[2]', 'u32')
ckpt.append('cur_data_segno[3]', 'u32')
ckpt.append('cur_data_segno[4]', 'u32')
ckpt.append('cur_data_segno[5]', 'u32')
ckpt.append('cur_data_segno[6]', 'u32')
ckpt.append('cur_data_segno[7]', 'u32')
ckpt.append('cur_data_blkoff[0]', 'u16')
ckpt.append('cur_data_blkoff[1]', 'u16')
ckpt.append('cur_data_blkoff[2]', 'u16')
ckpt.append('cur_data_blkoff[3]', 'u16')
ckpt.append('cur_data_blkoff[4]', 'u16')
ckpt.append('cur_data_blkoff[5]', 'u16')
ckpt.append('cur_data_blkoff[6]', 'u16')
ckpt.append('cur_data_blkoff[7]', 'u16')
ckpt.append('ckpt_flags', 'u32')
ckpt.append('cp_pack_total_block_count', 'u32')
ckpt.append('cp_pack_start_sum', 'u32')
ckpt.append('valid_node_count', 'u32')
ckpt.append('valid_inode_count', 'u32')
ckpt.append('next_free_nid', 'u32')
ckpt.append('sit_ver_bitmap_bytesize', 'u32')
ckpt.append('nat_ver_bitmap_bytesize', 'u32')
ckpt.append('checksum_offset', 'u32')
ckpt.append('elapsed_time', 'u64')
ckpt.append('alloc_type[0]', 'u8')
ckpt.append('alloc_type[1]', 'u8')
ckpt.append('alloc_type[2]', 'u8')
ckpt.append('alloc_type[3]', 'u8')
ckpt.append('alloc_type[4]', 'u8')
ckpt.append('alloc_type[5]', 'u8')
ckpt.append('alloc_type[6]', 'u8')
ckpt.append('alloc_type[7]', 'u8')
ckpt.append('alloc_type[8]', 'u8')
ckpt.append('alloc_type[9]', 'u8')
ckpt.append('alloc_type[10]', 'u8')
ckpt.append('alloc_type[11]', 'u8')
ckpt.append('alloc_type[12]', 'u8')
ckpt.append('alloc_type[13]', 'u8')
ckpt.append('alloc_type[14]', 'u8')
ckpt.append('alloc_type[15]', 'u8')
ckpt.process()
ckpt.print_struct()
mount_opt = ramdump.struct_field_addr(f2fs_sb_info, 'struct f2fs_sb_info', 'mount_opt')
mntop = struct_print_class(ramdump, 'f2fs_mount_info', mount_opt, output_file)
mntop.append('opt', 'u32')
mntop.append('write_io_size_bits', 'u32')
mntop.append('root_reserved_blocks', 'u32')
mntop.append('s_resuid', 'u32')
mntop.append('s_resgid', 'u32')
mntop.append('active_logs', 'u32')
mntop.append('inline_xattr_size', 'u32')
if (ramdump.is_config_defined('CONFIG_QUOTA')):
mntop.append('s_jquota_fmt', 'u8')
if (ramdump.kernel_version <= (5, 4, 0)):
mntop.append('whint_mode', 'u32')
mntop.append('alloc_mode', 'u32')
mntop.append('fsync_mode', 'u32')
mntop.append('fs_mode', 'u32')
mntop.append('bggc_mode', 'u32')
if (ramdump.is_config_defined('CONFIG_FS_ENCRYPTION') and ramdump.kernel_version <= (5, 4, 0)):
mntop.append('inlinecrypt', 'u8')
mntop.append('unusable_cap_perc', 'u32')
mntop.append('unusable_cap', 'u32')
mntop.process()
mntop.print_struct()
gc_thread = ramdump.read_structure_field(f2fs_sb_info, 'struct f2fs_sb_info', 'gc_thread')
gc = struct_print_class(ramdump, 'f2fs_gc_kthread', gc_thread, output_file)
gc.append('urgent_sleep_time', 'u32')
gc.append('min_sleep_time', 'u32')
gc.append('max_sleep_time', 'u32')
gc.append('no_gc_sleep_time', 'u32')
gc.append('gc_wake', 'u32')
gc.process()
gc.print_struct()
break
mnt_ns_entry = ramdump.read_structure_field(mnt_ns_entry, 'struct list_head', 'next')
output_file.close()
@register_parser('--print-f2fs', 'Extract F2FS super_block info')
class FileSystem(RamParser):
def parse(self):
if self.ramdump.kernel_version < (4, 9):
print_out_str("Linux version lower than 4.9 is not supported!!")
return
else:
print_f2fs_data(self.ramdump)

View File

@@ -0,0 +1,171 @@
# Copyright (c) 2012,2014-2015,2017-2020 The Linux Foundation. All rights reserved.
# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from print_out import print_out_str
from parser_util import register_parser, RamParser
from mm import pfn_to_page, page_buddy, page_count, for_each_pfn
from mm import page_to_pfn
import sys
files = {
'' : {
'filename' : 'unknown',
'filepath' : 'unknown',
'a_ops' : 'unknown',
'addr_space' : 'unknown',
'total_pages' : 0,
'total_size' : 0,
}
}
@register_parser('--print-filetracking', 'print file tracking information (if available)')
class FileTracking(RamParser):
def get_filepath(self, path, name, parent):
cycle_flag = 0
cycle_detection = 12
while True:
if name == '/' or name == '' or name is None:
return path, cycle_flag
path = '/' + name + path
parent = self.ramdump.read_structure_field(parent, 'struct dentry', 'd_parent')
if parent is not None:
d_name_ptr = (parent + self.ramdump.field_offset('struct dentry ', 'd_name')) + \
self.ramdump.field_offset('struct qstr', 'name')
test_name = self.ramdump.read_cstring(self.ramdump.read_pointer(d_name_ptr), 100)
if test_name == name and name.find('/') == -1 and test_name.find('/') == -1:
cycle_flag = 2
break
else:
if name.find('/') != -1 and test_name.find('/') != -1:
break
name = test_name
else:
break
cycle_detection -= 1
if cycle_detection < 1:
cycle_flag = 1
break
if path[1] == '/':
path = path[2:]
return path, cycle_flag
def get_file_metadata(self, page):
addr_space = inode = dentry_list = dentry = a_ops = None
name = path = ''
if page is not None:
addr_space = self.ramdump.read_structure_field(page, 'struct page', 'mapping')
if addr_space is not None:
inode = self.ramdump.read_structure_field(addr_space, 'struct address_space', 'host')
a_ops = self.ramdump.read_structure_field(addr_space, 'struct address_space ', 'a_ops')
else:
return '', '', None, None, 0
if inode is not None:
dentry_list = self.ramdump.read_structure_field(inode, 'struct inode', 'i_dentry')
if dentry_list is not None:
dentry = self.ramdump.container_of(dentry_list, 'struct dentry', 'd_u')
if dentry is not None:
d_name_ptr = (dentry + self.ramdump.field_offset('struct dentry ', 'd_name')) + \
self.ramdump.field_offset('struct qstr', 'name')
name = self.ramdump.read_cstring(self.ramdump.read_pointer(d_name_ptr), 100)
else:
return '', '', None, None, 0
if name is None:
return '', '', None, None, 0
path, cycle_flag = self.get_filepath(path, name, dentry)
return name, path, a_ops, addr_space, cycle_flag
def update_file_list(self, name, path, a_ops, addr_space, cycle_flag):
if addr_space not in files:
files[addr_space] = {}
files[addr_space]['total_pages'] = 0
files[addr_space]['total_size'] = 0
if 'filename' not in files[addr_space]:
files[addr_space]['filename'] = name
if cycle_flag == 1:
files[addr_space]['filepath'] = 'PATH CYCLE DETECTED: ' + path
elif cycle_flag == 2:
files[addr_space]['filepath'] = 'PARENT == CURRENT FILE: ' + path
else:
files[addr_space]['filepath'] = path
a_ops_symbol = self.ramdump.unwind_lookup(a_ops)
files[addr_space]['a_ops'] = a_ops_symbol[0] if a_ops_symbol is not None else a_ops_symbol
files[addr_space]['addr_space'] = addr_space
files[addr_space]['total_pages'] += 1
files[addr_space]['total_size'] += 4
def parse(self):
ranges = None
g_optimization = False
for arg in sys.argv:
if "ranges=" in arg:
g_optimization = True
k, ranges = arg.split("=")
start, end = ranges.split('-')
start_pfn = int(start, 16) >> self.ramdump.page_shift
end_pfn = int(end, 16) >> self.ramdump.page_shift
break
elif "page=" in arg:
g_optimization = True
k, page = arg.split('=')
page = int(page, 16)
start_pfn = page_to_pfn(self.ramdump, page)
end_pfn = start_pfn + 1
break
out_tracking = self.ramdump.open_file('file_tracking.txt')
if g_optimization:
ranges = range(start_pfn, end_pfn)
else:
ranges = for_each_pfn(self.ramdump)
for pfn in ranges:
page = pfn_to_page(self.ramdump, pfn)
if (page_buddy(self.ramdump, page) or \
page_count(self.ramdump, page) == 0):
continue
name, path, a_ops, addr_space, cycle_flag = self.get_file_metadata(page)
if a_ops is None:
continue
self.update_file_list(name, path, a_ops, addr_space, cycle_flag)
total_sizes = {}
file_list = sorted(files, key=lambda x: (files[x]['total_size']), reverse=True)
for file in range(len(file_list)):
if files[file_list[file]]['filename'] == 'unknown':
continue
name = files[file_list[file]]['filename']
path = files[file_list[file]]['filepath']
a_ops = files[file_list[file]]['a_ops']
addr_space = files[file_list[file]]['addr_space']
pages = files[file_list[file]]['total_pages']
size = files[file_list[file]]['total_size']
out_str = 'File : {0}\nPath : {1}\na_ops : {2}\nAddr Space : 0x{3:x}\nNo. Pages : {4}\nSize (KB) : {5}\n\n\n'
out_tracking.write(out_str.format(name, path, a_ops, addr_space, pages, size))
if a_ops not in total_sizes:
total_sizes[a_ops] = 0
total_sizes[a_ops] += size
out_tracking.write('-----------= Total Sizes (KB) =-----------\n\n')
total_sizes_list = sorted(total_sizes, key=lambda x: (total_sizes[x]), reverse=True)
for aops in range(len(total_sizes_list)):
out_str = '{0} : {1}\n'
out_tracking.write(out_str.format(total_sizes_list[aops], total_sizes[total_sizes_list[aops]]))
out_tracking.close()
print_out_str('---wrote file tracking information to file_tracking.txt')

View File

@@ -0,0 +1,116 @@
# Copyright (c) 2021, The Linux Foundation. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import ctypes
import pdb
from parser_util import register_parser, RamParser
from print_out import print_out_str
from linux_list import ListWalker
from ramdump import Struct
from struct_print import struct_print_class
@register_parser('--fs-parser', 'FS report', optional=True)
class fs_parser_class(RamParser):
def __init__(self, *args):
super(fs_parser_class, self).__init__(*args)
self.sb_vaddr_list = []
self.inode_vaddr_list = []
def output(self, str_val):
self.output_file.write(str_val)
def parse(self):
self.output_file = self.ramdump.open_file('fs-report.txt')
print_out_str("fs-parser:start")
s1 = self.ramdump.get_kernel_version()
s2 = 'Kernel version : [{0:d}.{0:d}.{0:d}]\n'.format(s1[0], s1[1], s1[2])
self.output(s2)
vaddr = self.ramdump.read_word('file_systems')
self.file_systems_struct(vaddr)
self.iterate_sb()
#self.iterate_inode()
print_out_str("fs-parser:end")
self.output_file.close()
return
def file_systems_struct(self, fs_vaddr):
if fs_vaddr == 0:
return
shc = struct_print_class(self.ramdump, 'file_system_type', fs_vaddr, self.output_file)
shc.append('name', 'char *')
shc.append('fs_flags', 'u32')
node = shc.append('fs_supers', 'hlist_head', list_struct_name='super_block', list_field_name='s_instances')
shc.append('next', 'ptr')
shc.process()
shc.print_struct()
name_ptr = shc.get_val('name')
name = self.ramdump.read_cstring(name_ptr)
if name not in ['proc', 'sysfs', 'tmpfs', 'debugfs', 'tracefs']:
fs_supers = shc.get_val('fs_supers')
s_instance_offset = self.ramdump.field_offset('struct super_block', 's_instances')
list_walker = ListWalker(self.ramdump, fs_supers, s_instance_offset)
list_walker.walk(fs_supers, self.fs_sb_list_func)
next_ptr = shc.get_val('next')
self.file_systems_struct(next_ptr)
def fs_sb_list_func(self, sb_vaddr):
self.sb_vaddr_list.append(sb_vaddr)
def iterate_sb(self):
for sb_vaddr in self.sb_vaddr_list:
self.print_sb(sb_vaddr)
def print_sb(self, sb_vaddr):
shc = struct_print_class(self.ramdump, 'super_block', sb_vaddr, self.output_file)
shc.append('s_flags', 'u32')
shc.append('s_iflags', 'u32')
shc.append('s_magic', 'u32')
shc.append('s_id', 'char[32]')
shc.append('s_inodes', 'list_head', list_struct_name='inode', list_field_name='i_sb_list')
shc.append('s_inodes_wb', 'list_head', list_struct_name='inode', list_field_name='i_wb_list')
shc.process()
shc.print_struct()
s_inodes = shc.get_val('s_inodes')
s_inodes_offset = self.ramdump.field_offset('struct inode', 'i_sb_list')
list_walker = ListWalker(self.ramdump, s_inodes, s_inodes_offset)
list_walker.walk(s_inodes, self.fs_inode_list_func)
s_inodes_wb = shc.get_val('s_inodes_wb')
s_inodes_wb_offset = self.ramdump.field_offset('struct inode', 'i_wb_list')
list_walker = ListWalker(self.ramdump, s_inodes_wb, s_inodes_wb_offset)
list_walker.walk(s_inodes_wb, self.fs_inode_list_func)
def fs_inode_list_func(self, inode_vaddr):
self.inode_vaddr_list.append(inode_vaddr)
def iterate_inode(self):
for inode_vaddr in self.inode_vaddr_list:
self.print_inode(inode_vaddr)
def print_inode(self, inode_vaddr):
shc = struct_print_class(self.ramdump, 'inode', inode_vaddr, self.output_file)
shc.append('i_opflags', 'u16')
shc.append('i_flags', 'u32')
shc.append('i_ino', 'u64')
shc.append('i_size', 'u64')
shc.append('i_bytes', 'u16')
shc.append('i_state', 'u64')
node = shc.append('i_lru', 'list_head', list_struct_name='inode', list_field_name='i_lru')
shc.process()
shc.print_struct()
if __name__ == '__main__':
print('nothing to do yet\n')

View File

@@ -0,0 +1,370 @@
# Copyright (c) 2017-2022, The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import re
from collections import OrderedDict
from parser_util import register_parser, RamParser
from print_out import print_out_str
from tempfile import NamedTemporaryFile
from .ftrace_event_list import FtraceParser_Event_List
from .ftrace_event import FtraceParser_Event, BufferedWrite
import linux_list as llist
#import time
@register_parser('--dump-ftrace', 'extract ftrace by iterate the ring buffer page',optional=True)
class FtraceParser(RamParser):
def __init__(self, *args):
super(FtraceParser, self).__init__(*args)
self.format_event_map = OrderedDict()
self.format_event_field_map = OrderedDict()
self.event_call = 'struct trace_event_call'
self.event_class = 'struct trace_event_class'
self.trace_names = ["binder", "bootreceiver", "clock_reg", "kgsl-fence",
"memory", "mmc", "rproc_qcom", "suspend", "ufs",
"usb", "wifi", "rwmmio"]
self.whitelisted_trace_names =[]
self.ftrace_buffer_size_kb = None
self.per_cpu_buffer_pages = None
self.savedcmd = self.ramdump.read_pdatatype('savedcmd')
if len(self.ramdump.ftrace_args):
self.whitelisted_trace_names = self.ramdump.ftrace_args
if self.ramdump.ftrace_max_size:
self.per_cpu_buffer_pages = self.ramdump.ftrace_max_size / 4
def ftrace_field_func(self, common_list, ram_dump):
name_offset = ram_dump.field_offset('struct ftrace_event_field', 'name')
type_offset = ram_dump.field_offset('struct ftrace_event_field', 'type')
filter_type_offset = ram_dump.field_offset('struct ftrace_event_field', 'filter_type')
field_offset = ram_dump.field_offset('struct ftrace_event_field', 'offset')
size_offset = ram_dump.field_offset('struct ftrace_event_field', 'size')
signed_offset = ram_dump.field_offset('struct ftrace_event_field', 'is_signed')
name = ram_dump.read_word(common_list + name_offset)
field_name = ram_dump.read_cstring(name, 256)
type_name = ram_dump.read_word(common_list + type_offset)
type_str = ram_dump.read_cstring(type_name, 256)
offset = ram_dump.read_u32(common_list + field_offset)
size = ram_dump.read_u32(common_list + size_offset)
signed = ram_dump.read_u32(common_list + signed_offset)
if re.match('(.*)\[(.*)', type_str) and not (re.match('__data_loc', type_str)):
s = re.split('\[', type_str)
s[1] = '[' + s[1]
self.formats_out.write(
"\tfield:{0} {1}{2};\toffset:{3};\tsize:{4};\tsigned:{5};\n".format(s[0], field_name, s[1], offset,
size, signed))
if "common_type" == field_name or "common_flags" == field_name or "common_preempt_count" == field_name or "common_pid" == field_name:
temp = 0
else:
format_list = []
format_list.append(type_str)
format_list.append(offset)
format_list.append(size)
self.format_event_field_map[field_name] = format_list
else:
self.formats_out.write(
"\tfield:{0} {1};\toffset:{2};\tsize:{3};\tsigned:{4};\n".format(type_str, field_name, offset, size,
signed))
#self.format_event_field_map = {}
if "common_type" == field_name or "common_flags" == field_name or "common_preempt_count" == field_name or "common_pid" == field_name:
temp = 0
else:
format_list = []
format_list.append(type_str)
format_list.append(offset)
format_list.append(size)
self.format_event_field_map[field_name] = format_list
def ftrace_events_func(self, ftrace_list, ram_dump):
event_offset = ram_dump.field_offset(self.event_call, 'event')
fmt_offset = ram_dump.field_offset(self.event_call, 'print_fmt')
class_offset = ram_dump.field_offset(self.event_call, 'class')
flags_offset = ram_dump.field_offset(self.event_call, 'flags')
flags = ram_dump.read_word(ftrace_list + flags_offset)
if ram_dump.kernel_version >= (4, 14):
TRACE_EVENT_FL_TRACEPOINT = 0x10
elif ram_dump.kernel_version >= (4, 9):
TRACE_EVENT_FL_TRACEPOINT = 0x20
else:
TRACE_EVENT_FL_TRACEPOINT = 0x40
if (ram_dump.kernel_version >= (3, 18) and (flags & TRACE_EVENT_FL_TRACEPOINT)):
tp_offset = ram_dump.field_offset(self.event_call, 'tp')
tp_name_offset = ram_dump.field_offset('struct tracepoint', 'name')
tp = ram_dump.read_word(ftrace_list + tp_offset)
name = ram_dump.read_word(tp + tp_name_offset)
else:
name_offset = ram_dump.field_offset(self.event_call, 'name')
name = ram_dump.read_word(ftrace_list + name_offset)
type_offset = ram_dump.field_offset('struct trace_event', 'type')
fields_offset = ram_dump.field_offset(self.event_class, 'fields')
common_field_list = ram_dump.address_of('ftrace_common_fields')
field_next_offset = ram_dump.field_offset('struct ftrace_event_field', 'link')
name_str = ram_dump.read_cstring(name, 512)
event_id = ram_dump.read_word(ftrace_list + event_offset + type_offset)
fmt = ram_dump.read_word(ftrace_list + fmt_offset)
fmt_str = ram_dump.read_cstring(fmt, 2048)
self.formats_out.write("name: {0}\n".format(name_str))
self.formats_out.write("ID: {0}\n".format(event_id))
self.formats_out.write("format:\n")
#self.format_event_map[name_str] = format_event_field_map
list_walker = llist.ListWalker(ram_dump, common_field_list, field_next_offset)
list_walker.walk_prev(common_field_list, self.ftrace_field_func, ram_dump)
self.formats_out.write("\n")
event_class = ram_dump.read_word(ftrace_list + class_offset)
field_list = event_class + fields_offset
list_walker = llist.ListWalker(ram_dump, field_list, field_next_offset)
list_walker.walk_prev(field_list, self.ftrace_field_func, ram_dump)
self.formats_out.write("\n")
self.formats_out.write("print fmt: {0}\n".format(fmt_str))
fmt_list = []
fmt_list.append(self.format_event_field_map)
fmt_list.append(fmt_str)
self.format_event_map[name_str] = fmt_list
self.format_event_field_map = OrderedDict()
def ftrace_get_format(self):
self.formats_out = self.ramdump.open_file('formats.txt')
fevent_list = FtraceParser_Event_List(self.ramdump)
#print(fevent_list.ftrace_raw_struct_type)
ftrace_events_list = self.ramdump.address_of('ftrace_events')
next_offset = self.ramdump.field_offset(self.event_call, 'list')
list_walker = llist.ListWalker(self.ramdump, ftrace_events_list, next_offset)
list_walker.walk_prev(ftrace_events_list, self.ftrace_events_func, self.ramdump)
self.formats_out.close()
return fevent_list
def ftrace_extract(self):
#ftrace_event_time = 0
#post_ftrace_event_time = 0
#taskdump_time = 0
#parse_trace_entry_time = 0
global_trace_data_org = self.ramdump.address_of('ftrace_trace_arrays')
global_trace_data_offset = self.ramdump.field_offset(
'struct list_head ', 'next')
global_trace_data_next = self.ramdump.read_pointer(global_trace_data_org + global_trace_data_offset)
if self.ramdump.kernel_version >= (5, 10):
trace_buffer_offset = self.ramdump.field_offset(
'struct trace_array', 'array_buffer')
else:
trace_buffer_offset = self.ramdump.field_offset(
'struct trace_array', 'trace_buffer')
trace_buffer_name_offset = self.ramdump.field_offset(
'struct trace_array', 'name')
if self.ramdump.kernel_version >= (5, 10):
ring_trace_buffer_ptr = self.ramdump.field_offset(
'struct array_buffer', 'buffer')
else:
ring_trace_buffer_ptr = self.ramdump.field_offset(
'struct trace_buffer', 'buffer')
if self.ramdump.kernel_version >= (5, 10):
ring_trace_buffer_cpus_ptr = self.ramdump.field_offset(
'struct trace_buffer', 'cpus')
ring_trace_buffer_base_addr = self.ramdump.field_offset(
'struct trace_buffer', 'buffers')
else:
ring_trace_buffer_cpus_ptr = self.ramdump.frame_field_offset(
'rb_wake_up_waiters','struct ring_buffer', 'cpus')
if ring_trace_buffer_cpus_ptr is None:
ring_trace_buffer_cpus_ptr = 0x4
ring_trace_buffer_base_addr = self.ramdump.frame_field_offset(
'rb_wake_up_waiters','struct ring_buffer', 'buffers')
if ring_trace_buffer_base_addr is None:
ring_trace_buffer_base_addr = self.ramdump.field_offset(
'struct ring_buffer', 'buffers')
if ring_trace_buffer_base_addr is None:
if self.ramdump.arm64:
ring_trace_buffer_base_addr = 0x58
else:
ring_trace_buffer_base_addr = 0x38
ring_trace_buffer_nr_pages = self.ramdump.field_offset(
'struct ring_buffer_per_cpu', 'nr_pages')
log_pattern = re.compile(r'\s*(.*)-(\d+)\s*\[(\d+)\]\s*.*')
fevent_list = self.ftrace_get_format();
while(global_trace_data_org != global_trace_data_next):
trace_array = global_trace_data_next
#print("v.v (struct trace_array)0x%x" %(trace_array))
trace_buffer_name = self.ramdump.read_word(trace_array + trace_buffer_name_offset)
if not (trace_buffer_name):
trace_name = None
else:
trace_name = self.ramdump.read_cstring(trace_buffer_name, 256)
trace_buffer_ptr_data = self.ramdump.read_pointer(trace_array + trace_buffer_offset)
ring_trace_buffer_data = trace_buffer_ptr_data + trace_buffer_offset
ring_trace_buffer_base_data = self.ramdump.read_pointer(ring_trace_buffer_data + ring_trace_buffer_ptr)
ring_trace_buffer_base_data1 = self.ramdump.read_pointer(ring_trace_buffer_base_data + ring_trace_buffer_base_addr)
if trace_name is None or trace_name == 0x0 or trace_name == "0x0" or trace_name == "None" or trace_name == "null" or len(trace_name) < 1:
#ftrace_out = self.ramdump.open_file('ftrace.txt','w')
fout = self.ramdump.open_file('ftrace.txt','w')
ftrace_out = BufferedWrite(fout)
header_data = "# tracer: nop \n" \
"#\n" \
"# entries-in-buffer/entries-written: 315882/1727030 #P:8\n" \
"#\n" \
"# _-----=> irqs-off\n" \
"# / _----=> need-resched\n" \
"# | / _---=> hardirq/softirq\n" \
"# || / _--=> preempt-depth\n" \
"# ||| / delay\n" \
"# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n" \
"# | | | |||| | |\n"
ftrace_out.write(header_data)
else:
if trace_name in self.whitelisted_trace_names or self.whitelisted_trace_names == ["all"]:
#ftrace_out = self.ramdump.open_file('ftrace_parser/' + 'ftrace_' + trace_name + '.txt','w')
fout = self.ramdump.open_file('ftrace_parser/' + 'ftrace_' + trace_name + '.txt','w')
ftrace_out = BufferedWrite(fout)
else:
global_trace_data_next = self.ramdump.read_pointer(global_trace_data_next)
continue
# ftrace_out = self.ramdump.open_file('ftrace_parser/' + 'ftrace_' + trace_name + '.txt','w')
ftrace_time_data = {}
nr_total_buffer_pages = 0
rb_per_cpu = []
nr_pages_per_buffer = []
#taskdump.do_dump_stacks(self.ramdump, 0)
for cpu_idx in range(0,8):
#array_ptr = self.ramdump.read_u64(ring_trace_buffer_base_data1 + self.ramdump.sizeof('void *') * cpu_idx)
array_ptr = (ring_trace_buffer_base_data1 + self.ramdump.sizeof('void *') * cpu_idx)
b = self.ramdump.read_pointer(array_ptr)
if b is None or b == 0x0:
continue
if self.ramdump.arm64:
nr_pages = self.ramdump.read_u64(
b + ring_trace_buffer_nr_pages)
else:
nr_pages = self.ramdump.read_u32(
b + ring_trace_buffer_nr_pages)
if nr_pages is None:
continue
if self.per_cpu_buffer_pages and self.per_cpu_buffer_pages < nr_pages:
nr_pages = self.per_cpu_buffer_pages
nr_total_buffer_pages = nr_total_buffer_pages + nr_pages
nr_pages_per_buffer.append(nr_pages)
rb_per_cpu.append(b)
#print "ring_trace_buffer_cpus nr_pages = %d" % nr_pages
#print "cpu_buffer = {0}".format(hex(b))
print("\nTotal pages across cpu trace buffers = {}".format(round(nr_total_buffer_pages)))
#start = time.time()
for cpu_idx in range(0,len(rb_per_cpu)):
nr_pages_per_buffer_item = nr_pages_per_buffer[cpu_idx]
per_cpu_buffer = rb_per_cpu[cpu_idx]
if per_cpu_buffer is not None:
evt = FtraceParser_Event(self.ramdump,ftrace_out,cpu_idx,fevent_list.ftrace_event_type,fevent_list.ftrace_raw_struct_type,ftrace_time_data,self.format_event_map,self.savedcmd)
evt.ring_buffer_per_cpu_parsing(per_cpu_buffer)
#parse_trace_entry_time += evt.parse_trace_entry_time
#ftrace_event_time += (time.time()-start)
global_trace_data_next = self.ramdump.read_pointer(global_trace_data_next)
switch_map = {}
ftrace_file_map = {}
if trace_name is None or trace_name == 0x0 or trace_name == "0x0" or trace_name == "None" or trace_name == "null" or len(trace_name) < 1:
ftrace_core0_fd = self.ramdump.open_file('ftrace_core0.txt', 'w')
ftrace_core1_fd = self.ramdump.open_file('ftrace_core1.txt', 'w')
ftrace_core2_fd = self.ramdump.open_file('ftrace_core2.txt', 'w')
ftrace_core3_fd = self.ramdump.open_file('ftrace_core3.txt', 'w')
ftrace_core4_fd = self.ramdump.open_file('ftrace_core4.txt', 'w')
ftrace_core5_fd = self.ramdump.open_file('ftrace_core5.txt', 'w')
ftrace_core6_fd = self.ramdump.open_file('ftrace_core6.txt', 'w')
ftrace_core7_fd = self.ramdump.open_file('ftrace_core7.txt', 'w')
else:
if trace_name in self.whitelisted_trace_names or self.whitelisted_trace_names == ["all"]:
ftrace_core0_fd = self.ramdump.open_file('ftrace_parser/' + 'ftrace_' + trace_name + '_core0.txt','w')
ftrace_core1_fd = self.ramdump.open_file('ftrace_parser/' + 'ftrace_' + trace_name + '_core1.txt','w')
ftrace_core2_fd = self.ramdump.open_file('ftrace_parser/' + 'ftrace_' + trace_name + '_core2.txt','w')
ftrace_core3_fd = self.ramdump.open_file('ftrace_parser/' + 'ftrace_' + trace_name + '_core3.txt','w')
ftrace_core4_fd = self.ramdump.open_file('ftrace_parser/' + 'ftrace_' + trace_name + '_core4.txt','w')
ftrace_core5_fd = self.ramdump.open_file('ftrace_parser/' + 'ftrace_' + trace_name + '_core5.txt','w')
ftrace_core6_fd = self.ramdump.open_file('ftrace_parser/' + 'ftrace_' + trace_name + '_core6.txt','w')
ftrace_core7_fd = self.ramdump.open_file('ftrace_parser/' + 'ftrace_' + trace_name + '_core7.txt','w')
else:
continue
ftrace_file_map["000"] = BufferedWrite(ftrace_core0_fd)
ftrace_file_map["001"] = BufferedWrite(ftrace_core1_fd)
ftrace_file_map["002"] = BufferedWrite(ftrace_core2_fd)
ftrace_file_map["003"] = BufferedWrite(ftrace_core3_fd)
ftrace_file_map["004"] = BufferedWrite(ftrace_core4_fd)
ftrace_file_map["005"] = BufferedWrite(ftrace_core5_fd)
ftrace_file_map["006"] = BufferedWrite(ftrace_core6_fd)
ftrace_file_map["007"] = BufferedWrite(ftrace_core7_fd)
#start = time.time()
sorted_dict = {k: ftrace_time_data[k] for k in sorted(ftrace_time_data)}
for key in sorted(sorted_dict.keys()):
for i in range(0,len(ftrace_time_data[key])):
line = str(ftrace_time_data[key][i])
replaced_line = line
trace_log = log_pattern.match(line)
bestguess_pid = None
bestguess_comm = None
if bool(trace_log):
cpu_number = trace_log.group(3)
entry_pid = trace_log.group(2)
else:
cpu_number = None
entry_pid = None
if "sched_switch:" in line:
prev_comm = line.split("prev_comm=")[1].split(" ")[0]
prev_pid = line.split("prev_pid=")[1].split(" ")[0]
curr_comm = line.split("next_comm=")[1].split(" ")[0]
curr_pid = line.split("next_pid=")[1].split(" ")[0]
if cpu_number not in switch_map:
switch_map[cpu_number] = {}
switch_map[cpu_number]["comm"] = curr_comm
switch_map[cpu_number]["pid"] = curr_pid
bestguess_pid = prev_pid
bestguess_comm = prev_comm
elif "<TBD>" in line and cpu_number in switch_map:
bestguess_comm = switch_map[cpu_number]["comm"]
bestguess_pid = switch_map[cpu_number]["pid"]
if "<TBD>" in line:
if entry_pid is not None and bestguess_pid is not None and int(entry_pid) == int(bestguess_pid):
replaced_line = line.replace("<TBD>", bestguess_comm)
else:
replaced_line = line.replace("<TBD>", "<...>")
ftrace_out.write(replaced_line)
ftrace_file_map[str(cpu_number)].write(replaced_line)
#post_ftrace_event_time += (time.time()-start)
#print("Ftrace Event Parsing took {} secs".format(ftrace_event_time))
#print("Post Ftrace Event Sorting and Write took {} secs".format(post_ftrace_event_time))
#print("Parse Ftrace Entry function took {} secs".format(parse_trace_entry_time))
def parse(self):
if self.ramdump.ftrace_limit_time == 0:
self.ftrace_extract()
else:
from func_timeout import func_timeout
print_out_str("Limit ftrace parser running time to {}s".format(self.ramdump.ftrace_limit_time))
func_timeout(self.ramdump.ftrace_limit_time, self.ftrace_extract)

View File

@@ -0,0 +1,969 @@
# Copyright (c) 2020-2022 The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from collections import OrderedDict
import re
from parser_util import register_parser, RamParser
from print_out import print_out_str
from tempfile import NamedTemporaryFile
from struct_print import struct_print_class
comm_pid_dict = {}
softirq_action_list = {}
softirq_action_list[0] = "HI_SOFTIRQ"
softirq_action_list[1] = "TIMER_SOFTIRQ"
softirq_action_list[2] = "NET_TX_SOFTIRQ"
softirq_action_list[3] = "NET_RX_SOFTIRQ"
softirq_action_list[4] = "BLOCK_IOPOLL_SOFTIRQ"
softirq_action_list[5] = "TASKLET_SOFTIRQ"
softirq_action_list[6] = "TASKLET_SOFTIRQ"
softirq_action_list[7] = "SCHED_SOFTIRQ"
softirq_action_list[8] = "HRTIMER_SOFTIRQ"
softirq_action_list[9] = "RCU_SOFTIRQ"
softirq_action_list[10] = "NR_SOFTIRQS"
TRACE_FLAG_IRQS_OFF = 0x01
TRACE_FLAG_IRQS_NOSUPPORT = 0x02
TRACE_FLAG_NEED_RESCHED = 0x04
TRACE_FLAG_HARDIRQ = 0x08
TRACE_FLAG_SOFTIRQ = 0x10
TRACE_FLAG_PREEMPT_RESCHED = 0x20
TRACE_FLAG_NMI = 0x40
TRACE_FLAG_BH_OFF = 0x80
class BufferedWrite(object):
"""
Helper class to facilitate batch
writes into the files.
Default batch size is 8000
"""
def __init__(self, fdesc):
self.buffer = []
self.fdesc = fdesc
self.batchsize = 8000
self.count = 0
def write(self, data):
self.buffer.append(data)
self.count += 1
if self.count >= 8000:
self.flush()
self.count = 0
def flush(self):
if len(self.buffer):
self.fdesc.write("".join(self.buffer))
self.buffer = []
def __del__(self):
if self.fdesc:
self.flush()
class FtraceParser_Event(object):
def __init__(self,ramdump,ftrace_out,cpu, trace_event_type,ftrace_raw_struct_type,ftrace_time_data,fromat_event_map,savedcmd):
self.cpu = "[{:03d}]".format(cpu)
self.ramdump = ramdump
self.ftrace_out = ftrace_out
#self.ftrace_out = BufferedWrite(ftrace_out)
self.nr_ftrace_events = 0
self.ftrace_event_type = trace_event_type
self.ftrace_raw_struct_type = ftrace_raw_struct_type
self.ftrace_time_data = ftrace_time_data
self.fromat_event_map = fromat_event_map
#self.parse_trace_entry_time = 0
self.buffer_page_real_end_offset = self.ramdump.field_offset(
'struct buffer_page ', 'real_end')
self.buffer_page_data_page_offset = self.ramdump.field_offset(
'struct buffer_page ', 'page')
self.buffer_data_page_commit_offset = self.ramdump.field_offset(
'struct buffer_data_page ', 'commit')
self.buffer_data_page_time_stamp_offset = self.ramdump.field_offset(
'struct buffer_data_page ', 'time_stamp')
self.buffer_data_page_data_offset = self.ramdump.field_offset(
'struct buffer_data_page ', 'data')
self.rb_event_array_offset = self.ramdump.field_offset(
'struct ring_buffer_event', 'array')
self.rb_event_timedelta_offset = self.ramdump.field_offset(
'struct ring_buffer_event', 'time_delta')
self.rb_event_typelen_offset = self.ramdump.field_offset(
'struct ring_buffer_event', 'type_len')
self.trace_entry_type_offset = self.ramdump.field_offset('struct trace_entry ', 'type')
self.pid_max = self.ramdump.read_int("pid_max")
self.map_cmdline_to_pid_offset = self.ramdump.field_offset(
'struct saved_cmdlines_buffer', 'map_cmdline_to_pid')
self.saved_cmdlines_offset = self.ramdump.field_offset(
'struct saved_cmdlines_buffer', 'saved_cmdlines')
self.pid_offset = self.ramdump.field_offset("struct trace_entry" , "pid")
self.preempt_count_offset = self.ramdump.field_offset("struct trace_entry", "preempt_count")
self.flags_offset = self.ramdump.field_offset("struct trace_entry", "flags")
self.comm_pid_dict = comm_pid_dict
self.savedcmd = savedcmd
def get_event_length(self, rb_event, rb_event_type, time_delta, buffer_data_page_end):
type_len = rb_event_type
if(type_len == 0):
length = self.ramdump.read_u32(rb_event + self.rb_event_array_offset)
return length
elif(type_len <= 28):
return (type_len << 2)
elif(type_len == 29):
if(time_delta == 1):
length = self.ramdump.read_u32(rb_event + self.rb_event_array_offset)
return length
else:
if rb_event > buffer_data_page_end:
print_out_str("rb_event({}) is bigger than buffer_data_page_end({})".format(hex(rb_event), hex(buffer_data_page_end)))
return -1
return buffer_data_page_end - rb_event #Padding till end of page
elif(type_len == 30):
# Accounts for header size + one u32 array entry
return 8
elif(type_len == 31):
return 8
else:
print_out_str("Unknown type_len {}".format(type_len))
return -1
def parse_buffer_page_entry(self, buffer_page_entry):
buffer_data_page = None
buffer_data_page_end = None
#buffer_data_page_data_offset = None
rb_event = None
rb_event_timestamp = 0
time_delta = 0
record_length = 0
#rb_event_array_offset = 0
tr_entry = None
tr_event_type = None
commit = 0
'''
struct buffer_page {
[0x0] struct list_head list;
[0x10] local_t write;
[0x18] unsigned int read;
[0x20] local_t entries;
[0x28] unsigned long real_end;
[0x30] struct buffer_data_page *page;
}
'''
buffer_data_page = self.ramdump.read_pointer(buffer_page_entry + self.buffer_page_data_page_offset)
'''
struct buffer_data_page {
[0x0] u64 time_stamp;
[0x8] local_t commit;
[0x10] unsigned char data[];
}
'''
commit = 0
if self.ramdump.arm64:
commit = self.ramdump.read_u64(
buffer_data_page + self.buffer_data_page_commit_offset)
else:
commit = self.ramdump.read_u32(
buffer_data_page + self.buffer_data_page_commit_offset)
if commit and commit > 0:
buffer_data_page_end = buffer_data_page + commit
time_stamp = self.ramdump.read_u64(
buffer_data_page + self.buffer_data_page_time_stamp_offset)
rb_event = buffer_data_page + self.buffer_data_page_data_offset
total_read = 0
while (total_read < commit):
time_delta = self.ramdump.read_u32(rb_event + self.rb_event_timedelta_offset)
time_delta = time_delta >> 5
# print_out_str("time_delta after = {0} ".format(time_delta))
rb_event_timestamp = rb_event_timestamp + time_delta
rb_event_length_old = self.ramdump.read_u32(rb_event + self.rb_event_typelen_offset)
rb_event_type = (((1 << 5) - 1) & rb_event_length_old);
record_length = self.get_event_length(rb_event, rb_event_type, time_delta, buffer_data_page_end)
if record_length == -1:
break
#print("rb_event_type is ", rb_event_type)
if rb_event_type == 0:
# This could be that type_len * 4 > 112
# so type_len is set to 0 and 32 bit array filed holds length
# while payload starts afterwards at array[1]
tr_entry = rb_event + self.rb_event_array_offset + 0x4
tr_event_type = self.ramdump.read_u16( tr_entry + self.trace_entry_type_offset)
if tr_event_type < self.nr_ftrace_events:
#self.ftrace_out.write("unknown event \n")
pass
else:
self.parse_trace_entry(tr_entry, tr_event_type, time_stamp + rb_event_timestamp)
record_length = record_length + 0x4 #Header Size
elif rb_event_type <= 28: #Data Events
tr_entry = rb_event + self.rb_event_array_offset
tr_event_type = self.ramdump.read_u16(tr_entry + self.trace_entry_type_offset)
if tr_event_type < self.nr_ftrace_events:
#self.ftrace_out.write("unknown event \n")
pass
else:
self.parse_trace_entry(tr_entry, tr_event_type, time_stamp + rb_event_timestamp)
record_length = record_length + 0x4
elif rb_event_type == 29:
"""
Padding event or discarded event
time_delta here can be 0 or 1
time delta is set 0 when event is bigger than minimum size (8 bytes)
in this case we consider rest of the page as padding
time delta is set to 1 for discarded event
Here the size is stored in array[0]
"""
record_length = record_length + 0x4
pass
elif rb_event_type == 30:
# This is a time extend event so we need to use the 32 bit field from array[0](28..59)
# if time delta actually exceeds 2^27 nanoseconds which is > what 27 bit field can hold
# We are accounting for a complete time stamp stored in this field (59 bits)
rb_event_timestamp = rb_event_timestamp + (self.ramdump.read_u32(rb_event + self.rb_event_array_offset) << 27)
elif rb_event_type == 31:
# Accounts for an absolute timestamp
rb_event_timestamp = 0
rb_event = rb_event + record_length
total_read += record_length
#alignment = 4 - (rb_event % 4)
#rb_event += alignment
def remaing_space(self,count,text_count):
r = count - text_count
temp = " "
for idx in range(r):
temp = temp + " "
return temp
def find_cmdline(self, pid):
comm = "<TBD>"
if self.savedcmd is not None:
if pid == 0:
comm = "<idle>"
else:
tpid = pid & (self.pid_max - 1)
cmdline_map = self.savedcmd.map_pid_to_cmdline[tpid]
if cmdline_map != -1 and cmdline_map != None:
map_cmdline_to_pid = self.savedcmd.map_cmdline_to_pid
cmdline_tpid = self.ramdump.read_int(map_cmdline_to_pid + cmdline_map * 4)
if cmdline_tpid == pid:
saved_cmdlines = self.savedcmd.saved_cmdlines
comm = self.ramdump.read_cstring(saved_cmdlines + cmdline_map * 16, 16) #TASK_COMM_LEN
comm = "{}-{}".format(comm, pid)
self.comm_pid_dict[pid] = comm
return comm
def get_lat_fmt(self, flags, preempt_count):
lat_fmt = ''
nmi = flags & TRACE_FLAG_NMI
hardirq = flags & TRACE_FLAG_HARDIRQ
softirq = flags & TRACE_FLAG_SOFTIRQ
bh_off = flags & TRACE_FLAG_BH_OFF
if (flags & TRACE_FLAG_IRQS_OFF) and bh_off:
irqs_off = 'D'
elif (flags & TRACE_FLAG_IRQS_OFF):
irqs_off = 'd'
elif bh_off:
irqs_off = 'b'
elif (flags & TRACE_FLAG_IRQS_NOSUPPORT):
irqs_off = 'X'
else:
irqs_off = '.'
resched = flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED)
if (resched == (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED)):
need_resched = 'N'
elif (resched == TRACE_FLAG_NEED_RESCHED):
need_resched = 'n'
elif (resched == TRACE_FLAG_PREEMPT_RESCHED):
need_resched = 'p'
else:
need_resched = '.'
if nmi and hardirq:
hardsoft_irq = 'Z'
elif nmi:
hardsoft_irq = 'z'
elif hardirq and softirq:
hardsoft_irq = 'H'
elif hardirq:
hardsoft_irq = 'h'
elif softirq:
hardsoft_irq = 's'
else:
hardsoft_irq = '.'
lat_fmt += irqs_off
lat_fmt += need_resched
lat_fmt += hardsoft_irq
if (preempt_count & 0xf):
lat_fmt += '{0:x}'.format((preempt_count & 0xf))
else:
lat_fmt += '.'
if (preempt_count & 0xf0):
lat_fmt += '{0:x}'.format((preempt_count >> 4))
else:
lat_fmt += '.'
return lat_fmt
def parse_trace_entry(self, entry, type, time):
ftrace_raw_entry = None
event_name = ""
local_timestamp = None
pid = 0
preempt_count = 0
struct_type = None
next_comm = None
next_pid = 0
next_prio = 0
work = None
print_ip = None
print_buffer = None
vector = None
space_count = 25
local_timestamp = time / 1000000000.0
if not (local_timestamp in self.ftrace_time_data):
self.ftrace_time_data[local_timestamp] = []
#print("type = {0}".format(type))
if str(type) not in self.ftrace_event_type:
#print_out_str("unknown event type = {0}".format(str(type)))
return
event_name = str(self.ftrace_event_type[str(type)])
#print("event_name {0}".format(event_name))
if event_name is None or event_name == 'None' or 'None' in event_name or len(event_name) <= 1:
return
ftrace_raw_entry = entry
struct_type = self.ftrace_raw_struct_type[str(type)]
pid = self.ramdump.read_u32(ftrace_raw_entry + self.pid_offset)
if pid > self.pid_max:
return
preempt_count = self.ramdump.read_u16(ftrace_raw_entry + self.preempt_count_offset) & 0xFF
flags = self.ramdump.read_u16(ftrace_raw_entry + self.flags_offset) & 0xFF
DEBUG_ENABLE = 0
if pid in self.comm_pid_dict.keys():
comm = self.comm_pid_dict[pid]
else:
comm = self.find_cmdline(pid)
curr_comm = "{0: >25}".format(comm)
lat_fmt = self.get_lat_fmt(flags, preempt_count)
if event_name == "scm_call_start":
#print("ftrace_raw_entry of scm_call_start = {0}".format(hex(ftrace_raw_entry)))
trace_event_raw_offset = self.ramdump.field_offset('struct ' + struct_type, "x0")
trace_event_raw_next_comm = self.ramdump.field_offset('struct ' + struct_type, "arginfo")
trace_event_raw_next_pid = self.ramdump.field_offset('struct ' + struct_type, "args")
trace_event_raw_next_prio = self.ramdump.field_offset('struct ' + struct_type, "x5")
"""prev_state = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_offset)
if ( prev_state == 0) or ( prev_state == 0x400):
prev_state = "RUNNING";
elif ( prev_state == 1):
prev_state = "S";
elif ( prev_state == 2):
prev_state = "T";
else:
prev_state = "OTHERS";"""
if self.ramdump.arm64:
x0 = self.ramdump.read_u64(ftrace_raw_entry + trace_event_raw_offset)
else:
x0 = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_offset)
arginfo = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_next_comm)
args = (ftrace_raw_entry + trace_event_raw_next_pid)
if self.ramdump.arm64:
x5 = self.ramdump.read_u64(ftrace_raw_entry + trace_event_raw_next_prio)
else:
x5 = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_next_prio)
#print("x0 = {0}".format(hex(x0)))
#print("x5 = {0}".format(hex(x5)))
#print("arginfo = {0}".format(hex(arginfo)))
arr = []
ptr_size = self.ramdump.sizeof('void *')
#print("ptr_size = {0}".format(ptr_size))
for i in range(1, 9):
if self.ramdump.arm64:
ptr = self.ramdump.read_u64(args + (i*ptr_size))
else:
ptr = self.ramdump.read_u32(args + (i*ptr_size))
arr.append(hex(ptr))
space_data = self.remaing_space(space_count,len("scm_call_start:"))
if DEBUG_ENABLE == 1:
self.ftrace_out.write(
" <TBD>-{9} {0} {10} {1:.6f}: scm_call_start:{2}func id={3}:(args:{4}, {5}, {6} ,{7} ,{8})\n".format(self.cpu, round(local_timestamp,6),
space_data,x0,arginfo,arr[0],arr[1],arr[2],x5,pid,lat_fmt))
temp_data = " {9} {0} {10} {1:.6f}: scm_call_start:{2}func id={3}:(args:{4}, {5}, {6} ,{7} ,{8})\n".format(self.cpu, round(local_timestamp,6),
space_data,hex(x0),hex(arginfo),arr[0],arr[1],arr[2],hex(x5),curr_comm,lat_fmt)
#print("temp_data = {0}".format(temp_data))
self.ftrace_time_data[local_timestamp].append(temp_data)
elif event_name == "scm_call_end":
#print("ftrace_raw_entry of scm_call_start = {0}".format(hex(ftrace_raw_entry)))
trace_event_raw_offset = self.ramdump.field_offset('struct ' + struct_type, "ret")
"""prev_state = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_offset)
if ( prev_state == 0) or ( prev_state == 0x400):
prev_state = "RUNNING";
elif ( prev_state == 1):
prev_state = "S";
elif ( prev_state == 2):
prev_state = "T";
else:
prev_state = "OTHERS";"""
if self.ramdump.arm64:
rets = self.ramdump.read_u64(ftrace_raw_entry + trace_event_raw_offset)
else:
rets = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_offset)
#print("x0 = {0}".format(hex(x0)))
#print("x5 = {0}".format(hex(x5)))
#print("arginfo = {0}".format(hex(arginfo)))
arr = []
ptr_size = self.ramdump.sizeof('void *')
#print("ptr_size = {0}".format(ptr_size))
for i in range(1, 4):
if self.ramdump.arm64:
ptr = self.ramdump.read_u64(rets + (i*ptr_size))
else:
ptr = self.ramdump.read_u32(rets + (i*ptr_size))
arr.append(ptr)
space_data = self.remaing_space(space_count,len("scm_call_end:"))
if DEBUG_ENABLE == 1:
self.ftrace_out.write(
" <TBD>-{6} {0} {7} {1:.6f}: scm_call_end:{2}ret:{3}, {4}, {5}\n".format(self.cpu, round(local_timestamp,6),
space_data,arr[0],arr[1],arr[2],pid,lat_fmt))
temp_data = " {6} {0} {7} {1:.6f}: scm_call_end:{2}ret:{3}, {4}, {5}\n)\n".format(self.cpu, round(local_timestamp,6),
space_data,arr[0],arr[1],arr[2],curr_comm,lat_fmt)
#print("temp_data = {0}".format(temp_data))
self.ftrace_time_data[local_timestamp].append(temp_data)
elif event_name == "sched_switch":
trace_event_raw_offset = self.ramdump.field_offset('struct ' + struct_type, "prev_state")
trace_event_raw_next_comm = self.ramdump.field_offset('struct ' + struct_type, "next_comm")
trace_event_raw_next_pid = self.ramdump.field_offset('struct ' + struct_type, "next_pid")
trace_event_raw_next_prio = self.ramdump.field_offset('struct ' + struct_type, "next_prio")
trace_event_raw_prev_comm = self.ramdump.field_offset('struct ' + struct_type, "prev_comm")
trace_event_raw_prev_pid = self.ramdump.field_offset('struct ' + struct_type, "prev_pid")
trace_event_raw_prev_prio = self.ramdump.field_offset('struct ' + struct_type, "prev_prio")
trace_event_raw_prev_state = self.ramdump.field_offset('struct ' + struct_type, "prev_state")
next_comm = self.ramdump.read_cstring(ftrace_raw_entry + trace_event_raw_next_comm)
next_pid = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_next_pid)
next_prio = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_next_prio)
prev_comm = self.ramdump.read_cstring(ftrace_raw_entry + trace_event_raw_prev_comm)
prev_pid = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_prev_pid)
prev_prio = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_prev_prio)
prev_state1 = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_prev_state)
'''if ( prev_state1 == 0) or ( prev_state1 == 0x400):
prev_state1 = "R";
elif ( prev_state1 == 1):
prev_state1 = "S";
elif ( prev_state1 == 2):
prev_state1 = "D";
else:
prev_state1 = "T";'''
prev_state_info = (prev_state1 & ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1) - 1))
if ( prev_state_info == 0):
prev_state_info = "R"
elif ( prev_state_info == 1):
prev_state_info = "S"
elif ( prev_state_info == 2):
prev_state_info = "D"
elif ( prev_state_info == 4):
prev_state_info = "T"
elif ( prev_state_info == 8):
prev_state_info = "t"
elif ( prev_state_info == 16):
prev_state_info = "X"
elif ( prev_state_info == 32):
prev_state_info = "Z"
elif ( prev_state_info == 64):
prev_state_info = "P"
elif ( prev_state_info == 128):
prev_state_info = "I"
prev_state_info2 = ""
if prev_state_info:
prev_state_info2 = "+"
space_data = self.remaing_space(space_count,len("sched_switch:"))
if DEBUG_ENABLE == 1:
self.ftrace_out.write(
" <TBD>-{10} {0} {11} {1:.6f}: sched_switch:{2}{3}:{4} [{5}] {6} ==> {7}:{8} [{9}]\n".format(self.cpu, round(local_timestamp,6),
space_data,prev_comm,prev_pid,prev_prio,prev_state_info,next_comm,next_pid,next_prio,pid,lat_fmt))
temp_data = " {10} {0} {11} {1:.6f}: sched_switch:{2}{3}:{4} [{5}] {6} ==> {7}:{8} [{9}]\n".format(self.cpu, round(local_timestamp,6),
space_data,prev_comm,prev_pid,prev_prio,prev_state_info,next_comm,next_pid,next_prio,curr_comm,lat_fmt)
temp_data1 = " {9} {0} {10} {1:.6f}: sched_switch: prev_comm={2} prev_pid={3} prev_prio={4} prev_state={5} ==> next_comm={6} next_pid={7} next_prio={8}\n".format(self.cpu, round(local_timestamp,6),
prev_comm,prev_pid,prev_prio,prev_state_info,next_comm,next_pid,next_prio,curr_comm,lat_fmt)
self.ftrace_time_data[local_timestamp].append(temp_data1)
elif event_name == "softirq_raise":
trace_event_softirq_vec_offset = self.ramdump.field_offset('struct ' + 'trace_event_raw_softirq', "vec")
if trace_event_softirq_vec_offset:
vector = self.ramdump.read_u32(ftrace_raw_entry + trace_event_softirq_vec_offset)
if DEBUG_ENABLE == 1:
self.ftrace_out.write(
" <TBD>-{4} {0} {5} {1:.6f}: softirq_entry: vec={2} [action={3}]\n".format(self.cpu, local_timestamp, vector,softirq_action_list[vector],pid,lat_fmt))
try:
temp_data = " {4} {0} {5} {1:.6f}: softirq_raise: vec={2} [action={3}]\n".format(self.cpu, local_timestamp, vector,softirq_action_list[vector],curr_comm,lat_fmt)
except Exception as err:
print_out_str("failed to find a softirq action = {0}".format(vector))
temp_data = " {4} {0} {5} {1:.6f}: softirq_raise: vec={2} [action={3}]\n".format(self.cpu, local_timestamp, vector,"softirq unknown vector",curr_comm,lat_fmt)
self.ftrace_time_data[local_timestamp].append(temp_data)
elif event_name == "workqueue_activate_work":
trace_event_raw_work_offset = self.ramdump.field_offset('struct ' + 'trace_event_raw_workqueue_execute_start', "work")
function_offset = self.ramdump.field_offset(
'struct ' + 'work_struct', "func")
if trace_event_raw_work_offset:
space_data = self.remaing_space(space_count,len("workqueue_activate_work:"))
if self.ramdump.arm64:
work = self.ramdump.read_u64(ftrace_raw_entry + trace_event_raw_work_offset)
function = self.ramdump.read_u64(work + function_offset)
else:
work = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_work_offset)
function = self.ramdump.read_u32(work + function_offset)
if function != None:
function_name = self.ramdump.unwind_lookup(function)
if function_name == None:
function_name = 'na'
else:
function = 0
function_name = 'na'
temp_data = " {4} {0} {7} {1:.6f}: workqueue_activate_work:{2}work struct {3} function 0x{5:x} {6}\n".format(self.cpu,
local_timestamp,space_data,
str(hex(work)).replace("L",""), curr_comm, function, function_name, lat_fmt)
self.ftrace_time_data[local_timestamp].append(temp_data)
elif event_name == "workqueue_execute_start" or event_name == "workqueue_execute_end" or event_name == "workqueue_queue_work":
trace_event_raw_work_offset = 0
function_offset = 0
if event_name == "workqueue_execute_start":
function_offset = self.ramdump.field_offset(
'struct ' + 'trace_event_raw_workqueue_execute_start', "function")
trace_event_raw_work_offset = self.ramdump.field_offset(
'struct ' + 'trace_event_raw_workqueue_execute_start', "work")
elif event_name == "workqueue_execute_end":
function_offset = self.ramdump.field_offset(
'struct ' + 'trace_event_raw_workqueue_queue_work', "function")
trace_event_raw_work_offset = self.ramdump.field_offset(
'struct ' + 'trace_event_raw_workqueue_queue_work', "work")
elif event_name == "workqueue_queue_work":
function_offset = self.ramdump.field_offset(
'struct ' + 'trace_event_raw_workqueue_execute_end', "function")
trace_event_raw_work_offset = self.ramdump.field_offset(
'struct ' + 'trace_event_raw_workqueue_execute_end', "work")
function = 0
if function_offset:
if self.ramdump.arm64:
function = self.ramdump.read_u64(ftrace_raw_entry + function_offset)
else:
function = self.ramdump.read_u32(ftrace_raw_entry + function_offset)
function_name = 'na'
if function != 0:
function_name = self.ramdump.unwind_lookup(function)
if function_name == None:
function_name = 'na'
if trace_event_raw_work_offset:
if self.ramdump.arm64:
work = self.ramdump.read_u64(ftrace_raw_entry + trace_event_raw_work_offset)
else:
work = self.ramdump.read_u32(ftrace_raw_entry + trace_event_raw_work_offset)
temp_data = " {4} {0} {7} {1:.6f}: {2} work_struct {3} function 0x{5:x} {6}\n".format(self.cpu,
local_timestamp, event_name,
str(hex(work)).replace("L",""), curr_comm, function, function_name, lat_fmt)
self.ftrace_time_data[local_timestamp].append(temp_data)
elif event_name == "bprint":
MAX_LEN = 1000
print_entry_ip_offset = self.ramdump.field_offset('struct bprint_entry' , "ip")
print_entry_buf_offset = self.ramdump.field_offset('struct bprint_entry', "buf")
print_entry_fmt_offset = self.ramdump.field_offset('struct bprint_entry', "fmt")
print_ip = self.ramdump.read_word(ftrace_raw_entry + print_entry_ip_offset)
if self.ramdump.arm64:
print_entry_fmt = self.ramdump.read_u64(ftrace_raw_entry + print_entry_fmt_offset)
else:
print_entry_fmt = self.ramdump.read_u32(ftrace_raw_entry + print_entry_fmt_offset)
print_entry_fmt_data = self.ramdump.read_cstring(print_entry_fmt, MAX_LEN)
"""
['%px', '%llx', '%ps', '%p']
Supported :
d for integers
f for floating-point numbers
b for binary numbers
o for octal numbers
x for octal hexadecimal numbers
s for string
e for floating-point in an exponent format
"""
regex = re.compile('%[\*]*[a-zA-Z]+')
length = 0
print_buffer = []
print_buffer_offset = ftrace_raw_entry + print_entry_buf_offset
if print_entry_fmt_data:
function = self.ramdump.get_symbol_info1(print_ip)
prev_match = None
unaligned_print_buffer_offset = None
for match in regex.finditer(print_entry_fmt_data):
replacement = match.group()
if 'c' in match.group():
replacement = '%s'
print_buffer.append(self.ramdump.read_byte(print_buffer_offset))
print_buffer_offset += self.ramdump.sizeof('char')
elif "%*pbl" in match.group():
replacement = "%s"
print_entry_fmt_data = print_entry_fmt_data.replace(match.group(), replacement)
align = self.ramdump.sizeof("int") - 1
#Read precision/width
#print_buffer.append(self.ramdump.read_int(print_buffer_offset))
print_buffer_offset += self.ramdump.sizeof('unsigned int')
print_buffer_offset = (print_buffer_offset + (align)) & (~align)
#Read bitmask
nr_cpu_ids = self.ramdump.address_of("nr_cpu_ids")
nr_cpu_ids = self.ramdump.read_u32(nr_cpu_ids)
#single element of long is enough to accomodate all cpus
cpu_bits = self.ramdump.read_u64(print_buffer_offset)
# Trim bits to valid mask only
def getValidBits(num,k,p):
binary = bin(num)
binary = binary[2:]
end = len(binary) - p
start = end - k
return binary[start : end+1]
cpu_bits = getValidBits(cpu_bits, nr_cpu_ids, 0)
#print_buffer.append("{:b}".format(cpu_bits))
print_buffer.append(cpu_bits)
print_buffer_offset += self.ramdump.sizeof('unsigned long')
print_buffer_offset = (print_buffer_offset + (align)) & (~align)
continue
elif '%ps' in match.group():
replacement = "%s%x"
if self.ramdump.arm64:
addr = self.ramdump.read_u64(print_buffer_offset)
wname = self.ramdump.unwind_lookup(addr)
if wname is None:
wname = 'na'
print_buffer.append(wname)
print_buffer.append(addr)
print_buffer_offset += 8
else:
addr = self.ramdump.read_u32(print_buffer_offset)
wname = self.ramdump.unwind_lookup(addr)
if wname is None:
wname = 'na'
print_buffer.append(wname)
print_buffer.append(addr)
print_buffer_offset += 4
elif '%pS' in match.group():
replacement = "%s(%x)"
if self.ramdump.arm64:
addr = self.ramdump.read_u64(print_buffer_offset)
wname = self.ramdump.unwind_lookup(addr)
if wname is None:
wname = 'na'
else:
wname = '{}+{}'.format(wname[0], hex(wname[1]))
print_buffer.append(wname)
print_buffer.append(addr)
print_buffer_offset += 8
else:
addr = self.ramdump.read_u32(print_buffer_offset)
wname = self.ramdump.unwind_lookup(addr)
if wname is None:
wname = 'na'
else:
wname = '{}+{}'.format(wname[0], hex(wname[1]))
print_buffer.append(wname)
print_buffer.append(addr)
print_buffer_offset += 4
elif '%p' in match.group() and '%ps' not in match.group() and '%pS' not in match.group():
replacement = "%x"
if self.ramdump.arm64:
print_buffer.append(self.ramdump.read_u64(print_buffer_offset))
print_buffer_offset += 8
else:
print_buffer.append(self.ramdump.read_u32(print_buffer_offset))
print_buffer_offset += 4
elif 'x' in match.group():
replacement = "%x"
if self.ramdump.arm64:
print_buffer.append(self.ramdump.read_u64(print_buffer_offset))
print_buffer_offset += 8
else:
print_buffer.append(self.ramdump.read_u32(print_buffer_offset))
print_buffer_offset += 4
elif 's' in match.group():
replacement = "%s"
if prev_match is not None and '%s' in prev_match:
print_buffer_offset = unaligned_print_buffer_offset
sdata = self.ramdump.read_cstring(print_buffer_offset)
print_buffer.append(sdata)
print_buffer_offset = print_buffer_offset + len(sdata) + 1
elif 'll' in match.group() or 'l' in match.group():
replacement = "%d"
if self.ramdump.arm64:
print_buffer.append(self.ramdump.read_u64(print_buffer_offset))
print_buffer_offset += 8
else:
print_buffer.append(self.ramdump.read_u32(print_buffer_offset))
print_buffer_offset += 4
elif 'h' in match.group():
print_buffer.append(self.ramdump.read_u16(print_buffer_offset))
print_buffer_offset += self.ramdump.sizeof('short')
elif 'd' in match.group():
replacement = "%d"
if self.ramdump.arm64:
print_buffer.append(self.ramdump.read_int(print_buffer_offset))
print_buffer_offset += self.ramdump.sizeof('int')
elif 'u' in match.group():
replacement = "%d"
if 'll' in match.group() or 'l' in match.group():
if self.ramdump.arm64:
print_buffer.append(self.ramdump.read_u64(print_buffer_offset))
print_buffer_offset += 8
else:
print_buffer.append(self.ramdump.read_u32(print_buffer_offset))
print_buffer_offset += 4
else:
print_buffer.append(self.ramdump.read_u32(print_buffer_offset))
print_buffer_offset += self.ramdump.sizeof('unsigned int')
elif 'f' in match.group():
replacement = "%f"
print_buffer.append(self.ramdump.read_u32(print_buffer_offset))
print_buffer_offset += self.ramdump.sizeof('float')
if replacement != match.group():
print_entry_fmt_data = print_entry_fmt_data.replace(match.group(), replacement)
length += 1
prev_match = match.group()
unaligned_print_buffer_offset = print_buffer_offset
align = self.ramdump.sizeof("int") - 1
print_buffer_offset = (print_buffer_offset + (align)) & (~align)
try:
temp_data = " {4} {0} {5} {1:.6f}: bprint: {2} {3}\n".format(self.cpu,
local_timestamp,
function,print_entry_fmt_data% (
tuple(print_buffer)),
curr_comm,lat_fmt)
self.ftrace_time_data[local_timestamp].append(temp_data)
except Exception as err:
temp_data = "Error parsing bprint event entry"
return
elif event_name == "print":
#print("ftrace_raw_entry = {0}".format(hex(ftrace_raw_entry)))
print_entry_ip_offset = self.ramdump.field_offset('struct print_entry' , "ip")
print_entry_buf_offset = self.ramdump.field_offset('struct print_entry', "buf")
#print_entry_fmt_offset = self.ramdump.field_offset('struct print_entry', "fmt")
print_ip = self.ramdump.read_word(ftrace_raw_entry + print_entry_ip_offset)
print_buffer = self.ramdump.read_cstring(ftrace_raw_entry + print_entry_buf_offset)
#print_entry_fmt = self.ramdump.read_u64(ftrace_raw_entry + print_entry_fmt_offset)
#print_entry_fmt_data = self.ramdump.read_cstring(print_entry_fmt)
#print_ip_func = self.ramdump.read_cstring(print_ip)
function = self.ramdump.get_symbol_info1(print_ip)
temp_data = " {4} {0} {5} {1:.6f}: print: {2} {3}\n".format(self.cpu, local_timestamp , function, print_buffer, curr_comm, lat_fmt)
self.ftrace_time_data[local_timestamp].append(temp_data)
else:
event_data = self.fromat_event_map[event_name]
fmt_str = event_data[1]
if "rpmh" in event_name:
fmt_str = fmt_str.replace('send-msg:','send-msg')
fmt_str = fmt_str.replace(': ','')
elif "workqueue" in event_name:
fmt_str = fmt_str.replace('work struct','work_struct')
offset_data = event_data[0]
fmt_name_value_map = OrderedDict()
try:
d = str(fmt_str.split('",')[1].replace("'", ''))
pr = str(fmt_str.split('",')[0].replace("'", ''))
pr = str(pr.split('",')[0].replace('"', ''))
pr = str(pr.split('",')[0].replace('[', ''))
pr = str(pr.split('",')[0].replace(']', ''))
if "cpuhp_latency" == event_name:
pr = pr.replace("USEC ret: %d","USEC_ret:%d")
if "thermal_device_update" == event_name:
pr = pr.replace("received event","received_event")
temp_a = []
for ii in d.split(","):
ii = str(ii).replace("'","").replace(" ","")
temp_a.append(ii)
j = 0
temp_a = []
pr_f = []
if "workqueue_execute" in event_name:
for ki in pr.split(": "):
pr_f.append(str(ki))
else:
if ", " in pr and event_name != 'user_fault':
for ki in pr.split(", "):
if len(ki) >= 1:
pr_f.append(str(ki).replace(" ",""))
else:
for ki in pr.split(" "):
if len(ki) >= 1:
pr_f.append(str(ki).replace(" ",""))
for item,item_list in offset_data.items():
type_str,offset,size = item_list
if 'unsigned long' in type_str or 'u64' in type_str or '*' in type_str:
if self.ramdump.arm64:
v = self.ramdump.read_u64(ftrace_raw_entry + offset)
else:
v = self.ramdump.read_u32(ftrace_raw_entry + offset)
if "rwmmio" in event_name and "addr" in item:
phys = self.ramdump.virt_to_phys(v)
fmt_name_value_map[item] = "{}({})".format(hex(int(v)), hex(phys))
elif "func" not in item:
fmt_name_value_map[item] = hex(int(v))
else:
fmt_name_value_map[item] = v
elif 'long' in type_str or 'int' in type_str or 'u32' in type_str or 'bool' in type_str or 'pid_t' in type_str:
v = self.ramdump.read_u32(ftrace_raw_entry + offset)
fmt_name_value_map[item] = v
elif 'u8' in type_str:
v = self.ramdump.read_byte(ftrace_raw_entry + offset)
fmt_name_value_map[item] = v
elif 'const' in type_str and 'char *' in type_str:
v = self.ramdump.read_pointer(ftrace_raw_entry + offset)
v = self.ramdump.read_cstring(v)
fmt_name_value_map[item] = v
elif type_str.startswith('__data_loc') and type_str.endswith('char[]'):
v = self.ramdump.read_u32(ftrace_raw_entry + offset)
v = self.ramdump.read_cstring(ftrace_raw_entry + (v & 0xffff), (v >> 16))
if isinstance(v, bytes):
v = self.ramdump.read_cstring(ftrace_raw_entry + (offset*4))
fmt_name_value_map[item] = v
elif 'char[' in type_str:
length = re.match(r'(?:unsigned )?char\[(\d+)\]', type_str)
if length:
length = int(length.group(1))
else:
if "[TASK_COMM_LEN]" in type_str:
length = 16
else:
print_out_str("ftrace: unknown length for {} ({})".format(item, type_str))
length = 12 # Chosen arbitrarily
v = self.ramdump.read_cstring(ftrace_raw_entry + offset, max_length=length)
fmt_name_value_map[item] = v
elif 'char' in type_str:
v = self.ramdump.read_byte(ftrace_raw_entry + offset)
fmt_name_value_map[item] = v
elif 'unsigned short' in type_str or 'u16' in type_str:
v = self.ramdump.read_u16(ftrace_raw_entry + offset)
fmt_name_value_map[item] = v
elif 'short' in type_str or 'signed short' in type_str or 's16' in type_str:
v = self.ramdump.read_s32(ftrace_raw_entry + offset)
fmt_name_value_map[item] = v
elif 's64' in type_str:
v = self.ramdump.read_s64(ftrace_raw_entry + offset)
fmt_name_value_map[item] = v
else:
v = self.ramdump.read_u32(ftrace_raw_entry + offset)
fmt_name_value_map[item] = v
if "softirq" in event_name:
if v > len(softirq_action_list) -1:
action = v
else:
action = softirq_action_list[v]
fmt_name_value_map['action'] = action
if "rwmmio" in event_name and "caller" in item:
symbol = self.ramdump.read_word(ftrace_raw_entry + offset)
if symbol is not None:
fmt_name_value_map[item] = self.ramdump.get_symbol_info1(symbol)
temp_a.append(v)
j = j + 1
temp = ""
try:
for keyinfo in fmt_name_value_map:
if "function" == keyinfo and isinstance(fmt_name_value_map[keyinfo], int):
wq_function1 = self.ramdump.get_symbol_info1(fmt_name_value_map[keyinfo])
tt = keyinfo + "=" + wq_function1
if "func" in keyinfo and isinstance(fmt_name_value_map[keyinfo], int):
wq_function1 = self.ramdump.get_symbol_info1(fmt_name_value_map[keyinfo])
if wq_function1 and len(wq_function1) > 1 and wq_function1 != 'No':
tt = keyinfo + "=" + wq_function1
else:
tt = keyinfo + "=" + str(hex(fmt_name_value_map[keyinfo]))
else:
tt = keyinfo + "=" + str(fmt_name_value_map[keyinfo])
temp = temp + tt + " "
except Exception as err:
#print_out_str("missing event = {0} err = {1}".format(event_name,str(err)))
pass
try:
temp = temp + "\n"
temp_data = " {4} {0} {5} {1:.6f}: {2} {3}".format(self.cpu, round(local_timestamp, 6),event_name,temp,curr_comm,lat_fmt)
self.ftrace_time_data[local_timestamp].append(temp_data)
temp = ""
except Exception as err:
#print_out_str("missing event = {0} err = {1}".format(event_name,str(err)))
pass
except Exception as err:
#print_out_str("missing event = {0} err = {1}".format(event_name,str(err)))
pass
def ring_buffer_per_cpu_parsing(self, ring_trace_buffer_cpu):
page_index = 0
buffer_page_list_offset = self.ramdump.field_offset(
'struct buffer_page ', 'list')
buffer_page_list_prev_offset = self.ramdump.field_offset(
'struct list_head ', 'prev')
trace_ring_buffer_per_cpu_data = struct_print_class(self.ramdump, 'ring_buffer_per_cpu', ring_trace_buffer_cpu, None)
'''
crash> struct ring_buffer_per_cpu -x -o
struct ring_buffer_per_cpu {
[0x0] int cpu;
[0x4] atomic_t record_disabled;
[0x8] atomic_t resize_disabled;
[0x10] struct trace_buffer *buffer;
[0x80] unsigned long nr_pages;
[0x88] unsigned int current_context;
[0x90] struct list_head *pages;
[0x98] struct buffer_page *head_page;
[0xa0] struct buffer_page *tail_page; // parser this
[0xa8] struct buffer_page *commit_page;
[0xb0] struct buffer_page *reader_page;
'''
if self.ramdump.arm64:
trace_ring_buffer_per_cpu_data.append('nr_pages', 'u64')
else:
trace_ring_buffer_per_cpu_data.append('nr_pages', 'u32')
trace_ring_buffer_per_cpu_data.append('tail_page', 'ptr')
trace_ring_buffer_per_cpu_data.process()
nr_pages = trace_ring_buffer_per_cpu_data.get_val('nr_pages')
buffer_page_entry = trace_ring_buffer_per_cpu_data.get_val('tail_page')
while page_index < nr_pages:
if buffer_page_entry:
self.parse_buffer_page_entry(buffer_page_entry)
buffer_page_entry_list = buffer_page_entry + buffer_page_list_offset
buffer_page_entry = self.ramdump.read_pointer(buffer_page_entry_list + buffer_page_list_prev_offset)
page_index = page_index + 1
self.ftrace_out.flush()

View File

@@ -0,0 +1,69 @@
# Copyright (c) 2020, The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser
from print_out import print_out_str
class FtraceParser_Event_List(object):
def __init__(self, ramdump):
self.ramdump = ramdump
ftrace_event_call_list_offset = self.ramdump.field_offset("struct trace_event_call" , "list")
#print ("self.ftrace_event_call_list_offset = {0}".format(hex(self.ftrace_event_call_list_offset)))
ftrace_events_head = self.ramdump.address_of("ftrace_events")
ftrace_events_entry_offset = self.ramdump.field_offset("struct list_head", "next")
ftrace_events_entry = self.ramdump.read_pointer(ftrace_events_head + ftrace_events_entry_offset)
#print ("self.ftrace_events_entry = {0}".format(hex(self.ftrace_events_entry)))
ftrace_event_call_offset = self.ramdump.field_offset("struct trace_event_call" , "event")
tp_offset = self.ramdump.field_offset("struct trace_event_call", "tp")
ftrace_event_call_name_offset = self.ramdump.field_offset("struct tracepoint", "name")
ftrace_event_offset = self.ramdump.field_offset("struct trace_event", "type")
self.ftrace_event_type = {}
self.ftrace_raw_struct_type = {}
while ftrace_events_entry != ftrace_events_head:
ftrace_event = ftrace_events_entry - ftrace_event_call_list_offset
#ftrace_event_data = self.ramdump.read_u64(ftrace_event + ftrace_event_call_offset)
ftrace_event_data = ftrace_event + ftrace_event_call_offset
tp_data = ftrace_event + tp_offset
if ftrace_event_data:
#print ("ftrace_event_data +++ {0}".format(hex(ftrace_event_data)))
event_type = self.ramdump.read_u16(ftrace_event_data + ftrace_event_offset)
if self.ramdump.arm64:
event_name = self.ramdump.read_u64(tp_data)
#print ("event_name +++ {0}".format((hex(event_name))))
event_name_value = self.ramdump.read_u64(event_name + ftrace_event_call_name_offset)
else:
event_name = self.ramdump.read_u32(tp_data)
#print ("event_name +++ {0}".format((hex(event_name))))
event_name_value = self.ramdump.read_u32(event_name + ftrace_event_call_name_offset)
#print ("event_name_value +++ {0}".format((event_name_value)))
event_name1 = self.ramdump.read_cstring(event_name_value)
event_name2 = self.ramdump.read_cstring(event_name)
if "6" == str(event_type):
print_out_str("ftrace_event_data => {0} ftrace_event >> {1} tp_data >> {2} event_name >> {3} event_name_value >> {4} event_name2 {5} event_type {6}".format(hex(ftrace_event_data), hex(ftrace_event),hex(tp_data),hex(event_name),hex(event_name_value),event_name2,event_type))
self.ftrace_event_type[str(event_type)] = "bprint"
self.ftrace_raw_struct_type[str(event_type)] = "bprint"
elif "5" == str(event_type):
print_out_str("ftrace_event_data => {0} ftrace_event >> {1} tp_data >> {2} event_name >> {3} event_name_value >> {4} event_name2 {5} event_type {6}".format(hex(ftrace_event_data), hex(ftrace_event),hex(tp_data),hex(event_name),hex(event_name_value),event_name2,event_type))
self.ftrace_event_type[str(event_type)] = "print"
self.ftrace_raw_struct_type[str(event_type)] = "print"
else:
self.ftrace_event_type[str(event_type)] = str(event_name1)
self.ftrace_raw_struct_type[str(event_type)] = "trace_event_raw_" + str(event_name1)
ftrace_events_entry = self.ramdump.read_pointer(ftrace_events_entry + ftrace_events_entry_offset)
else:
break

View File

@@ -0,0 +1,10 @@
# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.

View File

@@ -0,0 +1,271 @@
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import re
config_sim_t32 = """; Printer settings
PRINTER=WINDOWS
; Environment variables
OS=
ID=T32
TMP=C:\\Temp
;SYS=C:\\T32
PBI=SIM
SCREEN=
VFULL
FONT=MEDIUM
"""
launch_t32_gmu = """set PWD=%~dp0
start C:\\t32\\bin\\windows64\\t32marm.exe -c %PWD%/configsim.t32, %PWD%\\snapshot\\gmu_t32\\gmu_startup_script.cmm %PWD%\\snapshot\\gmu_t32
"""
gmu_startup_script_header = """title "GMU snapshot"
ENTRY &DumpDir &AxfSymbol
WinCLEAR
AREA
AREA.CLEAR
sys.cpu CORTEXM3
sys.up
PRINT "DumpDir is &DumpDir"
cd &DumpDir
PRINT "using symbol from &AxfSymbol"
"""
gmu_startup_script_footer = """
; enable single-stepping
PRINT "Enabling single-stepping..."
SYStem.Option IMASKASM ON
SYStem.Option IMASKHLL ON
SNOOP.PC ON
; reset the processor registers
Register.Init
; load the saved fault info register values
&r0=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.r0))
Register.Set R0 (&r0)
&r1=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.r1))
Register.Set R1 (&r1)
&r2=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.r2))
Register.Set R2 (&r2)
&r3=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.r3))
Register.Set R3 (&r3)
&r12=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.r12))
Register.Set R12 (&r12)
&lr=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.stackedLR))
Register.Set LR (&lr)
&pc=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.stackedPC))
Register.Set PC (&pc)
&xpsr=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.stackedxPSR))
Register.Set XPSR (&xpsr)
&sp=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.faultStack))
Register.Set SP (&sp)
; load the other core register values
&r4=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[4]))
Register.Set R4 (&r4)
&r5=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[5]))
Register.Set R5 (&r5)
&r6=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[6]))
Register.Set R6 (&r6)
&r7=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[7]))
Register.Set R7 (&r7)
&r8=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[8]))
Register.Set R8 (&r8)
&r9=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[9]))
Register.Set R9 (&r9)
&r10=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[10]))
Register.Set R10 (&r10)
&r11=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[11]))
Register.Set R11 (&r11)
; load the saved fault info register values
&r0=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[0]))
Register.Set R0 (&r0)
&r1=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[1]))
Register.Set R1 (&r1)
&r2=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[2]))
Register.Set R2 (&r2)
&r3=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[3]))
Register.Set R3 (&r3)
&r12=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[12]))
Register.Set R12 (&r12)
&r13=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[13]))
Register.Set R13 (&r13)
&r14=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[14]))
Register.Set R14 (&r14)
&r15=Data.Long(Var.ADDRESS(gs_cm3Snapshot.coreRegs.r[15]))
Register.Set R15 (&r15)
; load the special register values
&msp=Data.Long(Var.ADDRESS(gs_cm3Snapshot.specialRegs.MSP))
Register.Set MSP (&msp)
&psp=Data.Long(Var.ADDRESS(gs_cm3Snapshot.specialRegs.PSP))
Register.Set PSP (&psp)
&basepri=Data.Long(Var.ADDRESS(gs_cm3Snapshot.specialRegs.BASEPRI))
Register.Set BASEPRI (&basepri)
&primask=Data.Long(Var.ADDRESS(gs_cm3Snapshot.specialRegs.PRIMASK))
Register.Set PRIMASK (&primask)
&faultmask=Data.Long(Var.ADDRESS(gs_cm3Snapshot.specialRegs.FAULTMASK))
Register.Set FAULTMASK (&faultmask)
&control=Data.Long(Var.ADDRESS(gs_cm3Snapshot.specialRegs.CONTROL))
Register.Set CONTROL (&control)
; load the nvic register values
&iser0=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.ISER[0]))
Data.Set SD:0xE000E100 %LE %Long &iser0
&iser1=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.ISER[1]))
Data.Set SD:0xE000E104 %LE %Long &iser1
&icer0=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.ICER[0]))
Data.Set SD:0xE000E180 %LE %Long &icer0
&icer1=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.ICER[1]))
Data.Set SD:0xE000E184 %LE %Long &icer1
&ispr0=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.ISPR[0]))
Data.Set SD:0xE000E200 %LE %Long &ispr0
&ispr1=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.ISPR[1]))
Data.Set SD:0xE000E204 %LE %Long &ispr1
&icpr0=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.ICPR[0]))
Data.Set SD:0xE000E280 %LE %Long &icpr0
&icpr1=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.ICPR[1]))
Data.Set SD:0xE000E284 %LE %Long &icpr1
&iabr0=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IABR[0]))
Data.Set SD:0xE000E300 %LE %Long &iabr0
&iabr1=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IABR[1]))
Data.Set SD:0xE000E304 %LE %Long &iabr1
; load the interrupt priority register values
&ipr0=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[0]))
Data.Set SD:0xE000E400 %LE %Long &ipr0
&ipr1=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[1]))
Data.Set SD:0xE000E404 %LE %Long &ipr1
&ipr2=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[2]))
Data.Set SD:0xE000E408 %LE %Long &ipr2
&ipr3=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[3]))
Data.Set SD:0xE000E40C %LE %Long &ipr3
&ipr4=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[4]))
Data.Set SD:0xE000E410 %LE %Long &ipr4
&ipr5=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[5]))
Data.Set SD:0xE000E414 %LE %Long &ipr5
&ipr6=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[6]))
Data.Set SD:0xE000E418 %LE %Long &ipr6
&ipr7=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[7]))
Data.Set SD:0xE000E41c %LE %Long &ipr7
&ipr8=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[8]))
Data.Set SD:0xE000E420 %LE %Long &ipr8
&ipr9=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[9]))
Data.Set SD:0xE000E424 %LE %Long &ipr9
&ipr10=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[10]))
Data.Set SD:0xE000E428 %LE %Long &ipr10
&ipr11=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[11]))
Data.Set SD:0xE000E42C %LE %Long &ipr11
&ipr12=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[12]))
Data.Set SD:0xE000E430 %LE %Long &ipr12
&ipr13=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[13]))
Data.Set SD:0xE000E434 %LE %Long &ipr13
&ipr14=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[14]))
Data.Set SD:0xE000E438 %LE %Long &ipr14
&ipr15=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.IPR[15]))
Data.Set SD:0xE000E43C %LE %Long &ipr15
; load the software trigger interrupt register value
&stir=Data.Long(Var.ADDRESS(gs_cm3Snapshot.nvicRegs.swTriggerIntrReg))
Data.Set SD:0xE000EF00 %LE %Long &stir
; load the fault status register values
&cfsr=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultStatus.cfsr))
Data.Set SD:0xE000ED28 %LE %Long &cfsr
&hfsr=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultStatus.hfsr))
Data.Set SD:0xE000ED2C %LE %Long &hfsr
&mmfar=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultStatus.mmfar))
Data.Set SD:0xE000ED34 %LE %Long &mmfar
&bfar=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultStatus.bfar))
Data.Set SD:0xE000ED38 %LE %Long &bfar
&exec_return=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.excReturn))
PRINT &exec_return
IF (&exec_return&0x4)==0
(
PRINT "MSP in use"
)
ELSE
(
PRINT "PSP in use"
)
&SP=Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.faultStack))
&SP=&SP+0x20
Register.Set R0 Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.r0))
Register.Set R1 Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.r1))
Register.Set R2 Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.r2))
Register.Set R3 Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.r3))
Register.Set R12 Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.r12))
Register.Set R13 &SP
Register.Set R14 Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.stackedLR))
Register.Set PC Data.Long(Var.ADDRESS(gs_cm3Snapshot.faultInfo.stackedPC))
v.v gs_cm3Snapshot
b::Register
v.f
PRINT "Success!"
"""
gmu_startup_script_symbol = 'data.load.elf "code_compile_mem.axf" /nocode'
gmu_t32_path = "gpu_parser/snapshot/gmu_t32/"
def prepare_bin_load_command(file):
address = re.split(r"-|\.", file)[3]
return ('data.load.binary "' + str(file) + '" ' + address + '\n')
def generate_cmm_script(dump):
gmu_startup_script = gmu_startup_script_header
for root, dirs, files in os.walk(gmu_t32_path):
bin_load_commands = [prepare_bin_load_command(file) for file in files
if file.endswith('.bin')]
gmu_startup_script += "".join(bin_load_commands)
gmu_startup_script += "\n"
gmu_startup_script += gmu_startup_script_symbol
gmu_startup_script += gmu_startup_script_footer
file = dump.open_file(gmu_t32_path + "gmu_startup_script.cmm", "w")
file.write(gmu_startup_script)
file.close()
def generate_gmu_t32_files(dump):
generate_cmm_script(dump)
cfg_file = dump.open_file("gpu_parser/configsim.t32", "w")
cfg_file.write(config_sim_t32)
cfg_file.close()
launcht32_bat = dump.open_file("gpu_parser/launch_t32_gmu.bat", "w")
launcht32_bat.write(launch_t32_gmu)
launcht32_bat.close()

View File

@@ -0,0 +1,233 @@
# Copyright (c) 2021 The Linux Foundation. All rights reserved.
# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from ctypes import c_uint32, c_uint64, c_char, sizeof, Structure
MAGIC = 0xabbaabba
LOG_SKIP = 1
LOG_FIRE_EVENT = 2
LOG_CMDBATCH_SUBMITTED_EVENT = 3
LOG_CMDBATCH_RETIRED_EVENT = 4
LOG_SYNCPOINT_FENCE_EVENT = 5
LOG_SYNCPOINT_FENCE_EXPIRE_EVENT = 6
LOG_TIMELINE_FENCE_ALLOC_EVENT = 7
LOG_TIMELINE_FENCE_RELEASE_EVENT = 8
KGSL_EVENT_RETIRED = 1
KGSL_EVENT_CANCELLED = 2
NANO_TO_SEC = 1000000000
class fire_event (Structure):
_fields_ = [('id', c_uint32),
('ts', c_uint32),
('type', c_uint32),
('age', c_uint32)]
class cmdbatch_submitted (Structure):
_fields_ = [('id', c_uint32),
('ts', c_uint32),
('prio', c_uint32),
('flags', c_uint64)]
class cmdbatch_retired (Structure):
_fields_ = [('id', c_uint32),
('ts', c_uint32),
('prio', c_uint32),
('flag', c_uint64),
('start', c_uint64),
('retire', c_uint64)]
class syncpoint_fence (Structure):
_fields_ = [('id', c_uint32),
('name', c_char*74)]
class timeline_fence (Structure):
_fields_ = [('id', c_uint32),
('seqno', c_uint64)]
def get_event_id(event):
event_id_mask = (1 << 16) - 1
return event & event_id_mask
def fire_event_write(func_writeln, dump, kgsl_eventlog_buffer, header):
if header["payload_size"] != sizeof(fire_event):
return True
id = dump.read_u32(kgsl_eventlog_buffer + fire_event.id.offset)
ts = dump.read_u32(kgsl_eventlog_buffer + fire_event.ts.offset)
type = dump.read_u32(kgsl_eventlog_buffer + fire_event.type.offset)
age = dump.read_u32(kgsl_eventlog_buffer + fire_event.age.offset)
if type == KGSL_EVENT_RETIRED:
type = "retired"
elif type == KGSL_EVENT_CANCELLED:
type = "cancelled"
format_str = "pid:{0:<12}{1:<16.6f}" \
"kgsl_fire_event: ctx={2} ts={3} type={4} age={5}"
func_writeln(format_str.format(header["pid"], header["time"],
id, ts, type, age))
return False
def cmdbatch_submitted_event_write(func_writeln, dump, kgsl_eventlog_buffer,
header):
if header["payload_size"] != sizeof(cmdbatch_submitted):
return True
id = dump.read_u32(kgsl_eventlog_buffer + cmdbatch_submitted.id.offset)
ts = dump.read_u32(kgsl_eventlog_buffer + cmdbatch_submitted.ts.offset)
prio = dump.read_u32(kgsl_eventlog_buffer +
cmdbatch_submitted.prio.offset)
flags = dump.read_u64(kgsl_eventlog_buffer +
cmdbatch_submitted.flags.offset)
format_str = "pid:{0:<12}{1:<16.6f}" \
"adreno_cmdbatch_submitted: ctx={2} ctx_prio={3} " \
"ts={4} flags={5}"
func_writeln(format_str.format(header["pid"], header["time"],
id, prio, ts, flags))
return False
def cmdbatch_retired_event_write(func_writeln, dump, kgsl_eventlog_buffer,
header):
if header["payload_size"] != sizeof(cmdbatch_retired):
return True
id = dump.read_u32(kgsl_eventlog_buffer + cmdbatch_retired.id.offset)
ts = dump.read_u32(kgsl_eventlog_buffer + cmdbatch_retired.ts.offset)
prio = dump.read_u32(kgsl_eventlog_buffer + cmdbatch_retired.prio.offset)
flag = dump.read_u64(kgsl_eventlog_buffer + cmdbatch_retired.flag.offset)
start = dump.read_u64(kgsl_eventlog_buffer +
cmdbatch_retired.start.offset)
retire = dump.read_u64(kgsl_eventlog_buffer +
cmdbatch_retired.retire.offset)
format_str = "pid:{0:<12}{1:<16.6f}" \
"adreno_cmdbatch_retired: ctx={2} ctx_prio={3} " \
"ts={4} flags={5} start={6} retire={7}"
func_writeln(format_str.format(header["pid"], header["time"], id,
prio, ts, flag, start, retire))
return False
def syncpoint_fence_event_write(func_writeln, dump, kgsl_eventlog_buffer,
header):
if header["payload_size"] != sizeof(syncpoint_fence):
return True
id = dump.read_u32(kgsl_eventlog_buffer + syncpoint_fence.id.offset)
name = dump.read_cstring(kgsl_eventlog_buffer +
syncpoint_fence.name.offset, 74)
if header["event_id"] == LOG_SYNCPOINT_FENCE_EVENT:
event = "syncpoint_fence"
elif header["event_id"] == LOG_SYNCPOINT_FENCE_EXPIRE_EVENT:
event = "syncpoint_fence_expire"
format_str = "pid:{0:<12}{1:<16.6f}" \
"{2}: ctx={3} name={4}"
func_writeln(format_str.format(header["pid"], header["time"],
event, id, name))
return False
def timeline_fence_event_write(func_writeln, dump, kgsl_eventlog_buffer,
header):
if header["payload_size"] != sizeof(timeline_fence):
return True
id = dump.read_u32(kgsl_eventlog_buffer + timeline_fence.id.offset)
seqno = dump.read_u64(kgsl_eventlog_buffer + timeline_fence.seqno.offset)
if header["event_id"] == LOG_TIMELINE_FENCE_ALLOC_EVENT:
event = "kgsl_timeline_fence_alloc"
elif header["event_id"] == LOG_TIMELINE_FENCE_RELEASE_EVENT:
event = "kgsl_timeline_fence_release"
format_str = "pid:{0:<12}{1:<16.6f}" \
"{2}: timeline={3} seqno={4}"
func_writeln(format_str.format(header["pid"], header["time"], event,
id, seqno))
return False
def parse_eventlog_buffer(func_writeln, dump):
kgsl_eventlog_buffer = dump.read('kgsl_eventlog')
format_str = '{0:<15} {1:<16}{2}'
func_writeln(format_str.format("PID", "Timestamp", "Function"))
min_timestamp = None
offset = 0
ret = True
header = {}
while offset < 8192:
header["magic_num"] = dump.read_structure_field(
kgsl_eventlog_buffer, 'struct kgsl_log_header',
'magic')
header["event_id"] = dump.read_structure_field(
kgsl_eventlog_buffer,
'struct kgsl_log_header', 'eventid')
if header["event_id"] is None:
event = dump.read_structure_field(
kgsl_eventlog_buffer,
'struct kgsl_log_header', 'event')
header["event_id"] = get_event_id(event)
header["time"] = dump.read_structure_field(
kgsl_eventlog_buffer,
'struct kgsl_log_header', 'time') / NANO_TO_SEC
header["pid"] = dump.read_structure_field(
kgsl_eventlog_buffer,
'struct kgsl_log_header', 'pid')
header["payload_size"] = dump.read_structure_field(
kgsl_eventlog_buffer,
'struct kgsl_log_header', 'size')
if (header["magic_num"] != MAGIC) or (header["event_id"] > 8) or \
(header["event_id"] < 1):
offset += 1
kgsl_eventlog_buffer += 1
continue
if min_timestamp is None:
min_timestamp = header["time"]
kgsl_eventlog_buffer += dump.sizeof('struct kgsl_log_header')
if min_timestamp > header["time"]:
func_writeln("End of logging".center(90, '-') + '\n')
min_timestamp = header["time"]
if header["event_id"] == LOG_SKIP:
break
elif header["event_id"] == LOG_FIRE_EVENT:
ret = fire_event_write(func_writeln, dump, kgsl_eventlog_buffer,
header)
elif header["event_id"] == LOG_CMDBATCH_SUBMITTED_EVENT:
ret = cmdbatch_submitted_event_write(func_writeln, dump,
kgsl_eventlog_buffer, header)
elif header["event_id"] == LOG_CMDBATCH_RETIRED_EVENT:
ret = cmdbatch_retired_event_write(func_writeln, dump,
kgsl_eventlog_buffer, header)
elif (header["event_id"] == LOG_SYNCPOINT_FENCE_EVENT) or \
(header["event_id"] == LOG_SYNCPOINT_FENCE_EXPIRE_EVENT):
ret = syncpoint_fence_event_write(func_writeln, dump,
kgsl_eventlog_buffer, header)
elif (header["event_id"] == LOG_TIMELINE_FENCE_ALLOC_EVENT) or \
(header["event_id"] == LOG_TIMELINE_FENCE_RELEASE_EVENT):
ret = timeline_fence_event_write(func_writeln, dump,
kgsl_eventlog_buffer, header)
if ret:
offset += 1
kgsl_eventlog_buffer += 1
continue
offset += dump.sizeof('struct kgsl_log_header') + \
header["payload_size"]
kgsl_eventlog_buffer += header["payload_size"]

View File

@@ -0,0 +1,380 @@
# Copyright (c) 2021 The Linux Foundation. All rights reserved.
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from ctypes import c_int, c_int64, c_uint, c_ushort, sizeof, Structure
# High word is static, low word is snapshot version ID
SNAPSHOT_MAGIC = 0x504D0002
# Section header
SNAPSHOT_SECTION_MAGIC = 0xABCD
# Snapshot Sections
KGSL_SNAPSHOT_SECTION_OS = 0x0101
KGSL_SNAPSHOT_SECTION_END = 0xFFFF
KGSL_SNAPSHOT_SECTION_RB_V2 = 0x0302
KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2 = 0x0B02
KGSL_SNAPSHOT_SECTION_MEMLIST_V2 = 0x0E02
KGSL_SNAPSHOT_SECTION_MEM_HISTORY = 0x1202
KGSL_SNAPSHOT_SECTION_IB_V2 = 0x0402
KGSL_SNAPSHOT_SECTION_REGS = 0x0201
KGSL_SNAPSHOT_SECTION_MVC = 0x1501
KGSL_SNAPSHOT_SECTION_INDEXED_REGS = 0x0501
KGSL_SNAPSHOT_SECTION_DEBUG = 0x0901
KGSL_SNAPSHOT_SECTION_DEBUGBUS = 0x0A01
KGSL_SNAPSHOT_SECTION_SHADER = 0x1201
KGSL_SNAPSHOT_SECTION_GMU = 0x1601
KGSL_SNAPSHOT_SECTION_GMU_MEMORY = 0x1701
# Ringbuffer
KGSL_RB_SIZE = (32 * 1024)
KGSL_RB_DWORDS = (KGSL_RB_SIZE >> 2)
# GMU Sections
GMU_SECTION_TYPE_OTHER = 0
GMU_SECTION_TYPE_HFIMEM = 1
GMU_SECTION_TYPE_LOG = 2
GMU_SECTION_TYPE_BWMEM = 3
GMU_SECTION_TYPE_DEBUG = 4
GMU_SECTION_TYPE_DCACHE = 5
GMU_SECTION_TYPE_ICACHE = 6
GMU_SECTION_TYPE_UNCACHE = 7
GMU_SECTION_TYPE_REASON = 8
GMU_SECTION_TYPE_VERSION = 9
# GMU Memory Sections
SNAPSHOT_GMU_MEM_UNKNOWN = 0
SNAPSHOT_GMU_MEM_HFI = 1
SNAPSHOT_GMU_MEM_LOG = 2
SNAPSHOT_GMU_MEM_BWTABLE = 3
SNAPSHOT_GMU_MEM_DEBUG = 4
SNAPSHOT_GMU_MEM_BIN_BLOCK = 5
SNAPSHOT_GMU_MEM_CTXT_QUEUE = 6
# KGSL structures
class kgsl_snapshot_header(Structure):
_pack_ = 1
_fields_ = [('magic', c_uint),
('gpuid', c_uint),
('chipid', c_uint)]
class kgsl_snapshot_section_header(Structure):
_pack_ = 1
_fields_ = [('magic', c_ushort),
('id', c_ushort),
('size', c_uint)]
class kgsl_snapshot_rb_v2(Structure):
_pack_ = 1
_fields_ = [('start', c_int),
('end', c_int),
('rbsize', c_int),
('wptr', c_int),
('rptr', c_int),
('count', c_int),
('timestamp_queued', c_uint),
('timestamp_retired', c_uint),
('gpuaddr', c_int64),
('id', c_uint)]
class kgsl_snapshot_gmu_mem(Structure):
_pack_ = 1
_fields_ = [('type', c_int),
('hostaddr', c_int64),
('gmuaddr', c_int64),
('gpuaddr', c_int64)]
class kgsl_snapshot_gmu_mem_header(Structure):
_pack_ = 1
_fields_ = [('type', c_int),
('hostaddr_lower', c_uint),
('hostaddr_upper', c_uint),
('gmuaddr_lower', c_uint),
('gmuaddr_upper', c_uint),
('gpuaddr_lower', c_uint),
('gpuaddr_upper', c_uint)]
def gmu_log(devp, dump, gpurev):
if gpurev >= 0x80000:
gmu_dev = dump.sibling_field_addr(devp, 'struct gen8_device',
'adreno_dev', 'gmu')
gmu_logs = dump.read_structure_field(gmu_dev,
'struct gen8_gmu_device',
'gmu_log')
elif gpurev >= 0x70000:
gmu_dev = dump.sibling_field_addr(devp, 'struct gen7_device',
'adreno_dev', 'gmu')
gmu_logs = dump.read_structure_field(gmu_dev,
'struct gen7_gmu_device',
'gmu_log')
else:
gmu_dev = dump.sibling_field_addr(devp, 'struct a6xx_device',
'adreno_dev', 'gmu')
gmu_logs = dump.read_structure_field(gmu_dev,
'struct a6xx_gmu_device',
'gmu_log')
if dump.kernel_version >= (5, 10, 0):
gmu_log_hostptr = dump.read_structure_field(gmu_logs,
'struct kgsl_memdesc',
'hostptr')
gmu_log_size = dump.read_structure_field(gmu_logs,
'struct kgsl_memdesc', 'size')
gmu_log_gpuaddr = dump.read_structure_field(gmu_logs,
'struct kgsl_memdesc',
'gpuaddr')
# Set gmuaddr to 0 since it is not present in kgsl_memdesc
gmu_log_gmuaddr = 0
else:
gmu_log_hostptr = dump.read_structure_field(gmu_logs,
'struct gmu_memdesc',
'hostptr')
gmu_log_size = dump.read_structure_field(gmu_logs,
'struct gmu_memdesc', 'size')
gmu_log_gmuaddr = dump.read_structure_field(gmu_logs,
'struct gmu_memdesc',
'gmuaddr')
gmu_log_gpuaddr = 0
return (gmu_log_hostptr, gmu_log_size, gmu_log_gmuaddr, gmu_log_gpuaddr)
def hfi_mem(devp, dump, gpurev):
if gpurev >= 0x80000:
gmu_dev = dump.sibling_field_addr(devp, 'struct gen8_device',
'adreno_dev', 'gmu')
hfi = dump.struct_field_addr(gmu_dev, 'struct gen8_gmu_device',
'hfi')
hfi_mem = dump.read_structure_field(hfi, 'struct gen8_hfi',
'hfi_mem')
elif gpurev >= 0x70000:
gmu_dev = dump.sibling_field_addr(devp, 'struct gen7_device',
'adreno_dev', 'gmu')
hfi = dump.struct_field_addr(gmu_dev, 'struct gen7_gmu_device',
'hfi')
hfi_mem = dump.read_structure_field(hfi, 'struct gen7_hfi',
'hfi_mem')
else:
gmu_dev = dump.sibling_field_addr(devp, 'struct a6xx_device',
'adreno_dev', 'gmu')
hfi = dump.struct_field_addr(gmu_dev, 'struct a6xx_gmu_device',
'hfi')
hfi_mem = dump.read_structure_field(hfi, 'struct a6xx_hfi',
'hfi_mem')
if dump.kernel_version >= (5, 10, 0):
hfi_mem_hostptr = dump.read_structure_field(hfi_mem,
'struct kgsl_memdesc',
'hostptr')
hfi_mem_size = dump.read_structure_field(hfi_mem,
'struct kgsl_memdesc', 'size')
hfi_mem_gpuaddr = dump.read_structure_field(hfi_mem,
'struct kgsl_memdesc',
'gpuaddr')
# Set gmuaddr to 0 since it is not present in kgsl_memdesc
hfi_mem_gmuaddr = 0
else:
hfi_mem_hostptr = dump.read_structure_field(hfi_mem,
'struct gmu_memdesc',
'hostptr')
hfi_mem_size = dump.read_structure_field(hfi_mem,
'struct gmu_memdesc', 'size')
hfi_mem_gmuaddr = dump.read_structure_field(hfi_mem,
'struct gmu_memdesc',
'gmuaddr')
hfi_mem_gpuaddr = 0
return (hfi_mem_hostptr, hfi_mem_size, hfi_mem_gmuaddr, hfi_mem_gpuaddr)
def snapshot_gmu_mem_section(devp, dump, gpurev, file, hdr_type):
if hdr_type == SNAPSHOT_GMU_MEM_HFI:
(gmu_mem_hostptr, gmu_mem_size, gmu_mem_gmuaddr, gmu_mem_gpuaddr) = \
hfi_mem(devp, dump, gpurev)
elif hdr_type == SNAPSHOT_GMU_MEM_LOG:
(gmu_mem_hostptr, gmu_mem_size, gmu_mem_gmuaddr, gmu_mem_gpuaddr) = \
gmu_log(devp, dump, gpurev)
else:
return
section_header = kgsl_snapshot_section_header()
section_header.magic = SNAPSHOT_SECTION_MAGIC
section_header.id = KGSL_SNAPSHOT_SECTION_GMU_MEMORY
section_header.size = (gmu_mem_size + sizeof(kgsl_snapshot_gmu_mem) +
sizeof(kgsl_snapshot_section_header))
file.write(section_header)
mem_hdr = kgsl_snapshot_gmu_mem()
mem_hdr.type = hdr_type
mem_hdr.hostaddr = gmu_mem_hostptr
mem_hdr.gmuaddr = gmu_mem_gmuaddr
mem_hdr.gpuaddr = gmu_mem_gpuaddr
file.write(mem_hdr)
data = dump.read_binarystring(gmu_mem_hostptr, gmu_mem_size)
file.write(data)
def snapshot_rb_section(devp, dump, file, rb_type):
# Scratch buffer information
scratch_obj = dump.read_structure_field(devp,
'struct kgsl_device',
'scratch')
scratch_hostptr = dump.read_structure_field(scratch_obj,
'struct kgsl_memdesc',
'hostptr')
# Memstore information
memstore_obj = dump.read_structure_field(devp,
'struct kgsl_device',
'memstore')
memstore_hostptr = dump.read_structure_field(memstore_obj,
'struct kgsl_memdesc',
'hostptr')
# RB information
rb = dump.read_structure_field(devp,
'struct adreno_device', rb_type)
if (not rb):
return
rb_id = dump.read_structure_field(rb,
'struct adreno_ringbuffer',
'id')
rb_wptr = dump.read_structure_field(rb,
'struct adreno_ringbuffer',
'wptr')
rb_rptr = dump.read_s32(scratch_hostptr + rb_id * 4)
rb_queued_ts = dump.read_structure_field(rb,
'struct adreno_ringbuffer',
'timestamp')
rb_buffer_desc = dump.read_structure_field(rb,
'struct adreno_ringbuffer',
'buffer_desc')
rb_gpuaddr = dump.read_structure_field(rb_buffer_desc,
'struct kgsl_memdesc',
'gpuaddr')
rb_hostptr = dump.read_structure_field(rb_buffer_desc,
'struct kgsl_memdesc',
'hostptr')
rb_size = dump.read_structure_field(rb_buffer_desc,
'struct kgsl_memdesc', 'size')
rb_retired_ts = dump.read_s32(memstore_hostptr +
((rb_id + 0x32E) * 0x28 + 0x8))
# RB section
section_header = kgsl_snapshot_section_header()
section_header.magic = SNAPSHOT_SECTION_MAGIC
section_header.id = KGSL_SNAPSHOT_SECTION_RB_V2
section_header.size = (KGSL_RB_SIZE + sizeof(kgsl_snapshot_rb_v2) +
sizeof(kgsl_snapshot_section_header))
file.write(section_header)
rb_header = kgsl_snapshot_rb_v2()
rb_header.start = 0
rb_header.end = KGSL_RB_DWORDS
rb_header.wptr = rb_wptr
rb_header.rptr = rb_rptr
rb_header.rbsize = KGSL_RB_DWORDS
rb_header.count = KGSL_RB_DWORDS
rb_header.timestamp_queued = rb_queued_ts
rb_header.timestamp_retired = rb_retired_ts
rb_header.gpuaddr = rb_gpuaddr
rb_header.id = rb_id
file.write(rb_header)
data = dump.read_binarystring(rb_hostptr, rb_size)
file.write(data)
def create_snapshot_from_ramdump(devp, dump):
# GPU revision
gpucore = dump.read_structure_field(devp,
'struct adreno_device', 'gpucore')
gpurev = dump.read_structure_field(gpucore,
'struct adreno_gpu_core', 'gpurev')
# Gpu chip id
chipid = dump.read_structure_field(devp, 'struct adreno_device', 'chipid')
file_name = 'mini_snapshot.bpmd'
file = dump.open_file('gpu_parser/' + file_name, 'wb')
# Dump snapshot header
header = kgsl_snapshot_header()
header.magic = SNAPSHOT_MAGIC
header.gpuid = (0x0003 << 16) | gpurev
header.chipid = chipid
file.write(header)
# Dump RBs
snapshot_rb_section(devp, dump, file, 'cur_rb')
snapshot_rb_section(devp, dump, file, 'prev_rb')
# Check & dump GMU info
gmu_core = dump.struct_field_addr(devp, 'struct kgsl_device', 'gmu_core')
gmu_on = dump.read_structure_field(gmu_core,
'struct gmu_core_device', 'flags')
if ((gmu_on >> 4) & 1):
snapshot_gmu_mem_section(devp,
dump, gpurev, file, SNAPSHOT_GMU_MEM_HFI)
snapshot_gmu_mem_section(devp,
dump, gpurev, file, SNAPSHOT_GMU_MEM_LOG)
# Dump last section
last_section = kgsl_snapshot_section_header()
last_section.magic = SNAPSHOT_SECTION_MAGIC
last_section.id = KGSL_SNAPSHOT_SECTION_END
last_section.size = sizeof(kgsl_snapshot_section_header)
file.write(last_section)
file.close()
def extract_gmu_mem_from_snapshot(dump, snapshot_path):
file = dump.open_file(snapshot_path, 'rb')
header = kgsl_snapshot_header()
file.readinto(header)
while True:
current_pos = file.tell()
section_header = kgsl_snapshot_section_header()
file.readinto(section_header)
if section_header.id == KGSL_SNAPSHOT_SECTION_GMU_MEMORY:
gmu_mem_header = kgsl_snapshot_gmu_mem_header()
file.readinto(gmu_mem_header)
gmu_memory_sz = section_header.size - \
sizeof(kgsl_snapshot_gmu_mem_header) - \
sizeof(kgsl_snapshot_section_header)
data = file.read(gmu_memory_sz)
bin_filename = "gmu-section-" + str(gmu_mem_header.type) + "-" + \
str(hex(gmu_mem_header.gmuaddr_lower)) + ".snap.bin"
gmu_bin_file = dump.open_file("gpu_parser/snapshot/gmu_t32/" +
bin_filename, mode='wb')
gmu_bin_file.write(data)
gmu_bin_file.close()
elif section_header.id == 0 or section_header.size == 0:
print('Invalid id & size:', section_header.id, section_header.size)
print('Total size:', file.tell())
break
elif section_header.id == KGSL_SNAPSHOT_SECTION_END:
break
file.seek(current_pos + section_header.size, 0)
file.close()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,48 @@
# Copyright (c) 2013-2015, 2020-2021 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser
from parsers.gpu.gpuinfo_54 import GpuParser_54
from parsers.gpu.gpuinfo_510 import GpuParser_510
from print_out import print_out_str
@register_parser('--print-gpuinfo',
'print gpu driver related info', optional=True)
class GpuParser(RamParser):
def __init__(self, dump):
super(GpuParser, self).__init__(dump)
def parse(self):
if not (self.ramdump.is_config_defined('CONFIG_QCOM_KGSL') or
'msm_kgsl' in self.ramdump.ko_file_names):
print_out_str(
"No GPU support detected... Skipping GPU parser.")
return
if (self.ramdump.kernel_version == (0, 0, 0) or
self.ramdump.kernel_version >= (5, 10, 0)):
self.parser = GpuParser_510(self.ramdump)
elif self.ramdump.kernel_version >= (4, 9, 0):
self.parser = GpuParser_54(self.ramdump)
else:
print_out_str(
"No GPU support detected for specified kernel version..."
+ " Skipping GPU parser.")
return
self.parser.parse()
def write(self, string):
self.out.write(string)
def writeln(self, string=""):
self.out.write(string + '\n')

View File

@@ -0,0 +1,98 @@
"""
Copyright (c) 2020 The Linux Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of The Linux Foundation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os,sys
import struct
from print_out import print_out_str
from parser_util import register_parser, RamParser
ipa_log = "hotplug.txt"
def setup_out_file(path, self):
global out_file
try:
out_file = self.ramdump.open_file(path, 'wb')
return out_file
except:
print_out_str("could not open path {0}".format(path))
print_out_str("Do you have write/read permissions on the path?")
def print_out_ip(string):
out_file.write((string + '\n').encode('ascii', 'ignore'))
@register_parser('--hotplug', 'print hotplug notifier information')
class hp_logging(RamParser):
def __init__(self, *args):
super(hp_logging, self).__init__(*args)
def hp_parse(self, ram_dump):
curr_chan=0
ep_idx=0
cpuhp_hp_states_addr = ram_dump.address_of('cpuhp_hp_states')
if cpuhp_hp_states_addr:
idx = 0
cpuhp_sz = ram_dump.sizeof('struct cpuhp_step')
hp_states_sz = ram_dump.sizeof('cpuhp_hp_states')
max_idx = hp_states_sz/cpuhp_sz
name_offset = ram_dump.field_offset('struct cpuhp_step','name')
startup_soffset = ram_dump.field_offset('struct cpuhp_step','startup')
teardown_soffset = ram_dump.field_offset('struct cpuhp_step','teardown')
can_stop_offset = ram_dump.field_offset('struct cpuhp_step','cant_stop')
multi_instance_offset = ram_dump.field_offset('struct cpuhp_step','multi_instance')
while (idx < max_idx):
name_off = ram_dump.read_u64(cpuhp_hp_states_addr + name_offset)
name = ram_dump.read_cstring(name_off)
startup_soffset_addr = ram_dump.read_u64(cpuhp_hp_states_addr + startup_soffset)
if startup_soffset_addr != 0:
startup_soffset_addr = ram_dump.get_symbol_info1(startup_soffset_addr)
teardown_soffset_addr = ram_dump.read_u64(cpuhp_hp_states_addr + teardown_soffset)
if teardown_soffset_addr != 0:
teardown_soffset_addr = ram_dump.get_symbol_info1(teardown_soffset_addr)
can_stop = ram_dump.read_bool(cpuhp_hp_states_addr + can_stop_offset)
multi_instance = ram_dump.read_bool(cpuhp_hp_states_addr + multi_instance_offset)
if (name_off == 0 or name_off is None):
idx = idx + 1
cpuhp_hp_states_addr = cpuhp_hp_states_addr + cpuhp_sz
continue
temp = name + " startup = (single = {0} = , multi = {1} = ),teardown = ( single = {2} = , multi = {3} = ),cant_stop = {4}, multi_instance = {5}".format(startup_soffset_addr,startup_soffset_addr,teardown_soffset_addr,teardown_soffset_addr,can_stop,multi_instance)
print_out_ip(temp)
idx = idx + 1
cpuhp_hp_states_addr = cpuhp_hp_states_addr + cpuhp_sz
def parse(self):
setup_out_file(ipa_log, self)
self.hp_parse(self.ramdump)

View File

@@ -0,0 +1,29 @@
# Copyright (c) 2021 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import print_out
from parser_util import RamParser, cleanupString, register_parser
@register_parser('--hyp-log', 'Dump hypervisor log')
class HypLog(RamParser):
def parse(self):
if self.ramdump.hyp_diag_addr is None:
raise Exception('Cannot find hyp_diag_addr!!!')
hyp_diag_addr = self.ramdump.read_u32(self.ramdump.hyp_diag_addr, False)
hyp_log_addr = self.ramdump.read_u32(hyp_diag_addr + 0x78, False)
hyp_log_size = self.ramdump.read_u32(hyp_diag_addr + 0x10, False)
hyp_log = self.ramdump.read_physical(hyp_log_addr, hyp_log_size)
if hyp_log is None:
raise Exception('!!!Could not read hyp_log from address {0:x}'.format(addr))
hyp_log_output = self.ramdump.open_file('hyp_log.txt')
hyp_log_output.write(cleanupString(hyp_log.decode('ascii', 'ignore')))
hyp_log_output.close()
print_out.print_out_str('Hypervisor log successfully extracted!')

View File

@@ -0,0 +1,112 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
from parser_util import register_parser, RamParser
@register_parser('--icc-summary', 'script to dump the interconnect summary to get BW information')
class IccSummary(RamParser):
def __init__(self, *args):
super(IccSummary, self).__init__(*args)
self.fop = self.ramdump.open_file("icc_summary.txt")
self.fop_provider = self.ramdump.open_file("icc_providers.txt")
self.external_link_data = []
def __del__(self):
self.fop.close()
self.fop_provider.close()
def get_internal_link(self, node):
numlinks = node.num_links
if node.links == 0x0:
return
for i in range(numlinks):
dnode_addr = node.links + i * 8
dnode = self.ramdump.read_pdatatype(dnode_addr, 'struct icc_node', attr_list=['name', 'id', 'provider'])
did = dnode.id
dname = self.ramdump.read_cstring(dnode.name)
sname = self.ramdump.read_cstring(node.name)
sid = node.id
if node.provider == dnode.provider:
self.fop_provider.write(" | {0:5}:{1:30} -> {2:5}:{3:30}\n".format(sid, sname, did, dname))
else:
data = "+ {0:5}:{1:30} -> {2:5}:{3:30}\n".format(sid, sname, did, dname)
self.external_link_data.append(data)
def extract_icc_summary(self):
icc_providers_addr = self.ramdump.address_of("icc_providers")
next_offset = self.ramdump.field_offset('struct list_head', 'next')
icc_providers = self.ramdump.read_pointer(icc_providers_addr + next_offset)
self.fop_provider.write("[providers]\n")
self.fop.write("{0:65} {1:12} {2:12} {3:12}\n".format(" node", "tag", "avg", "peak"))
line = "-" * 150 + "\n"
formatProvider = " | {0:5}:{1:40}\t\t\t [label={0:5}:{1:40}| avg_bw={2:<10} kBps\t| peak_bw={3:<10} kBps ]\n"
format_req_str = " | {0:90} \t\t\t\t\t\t\t {1:<12}\t\t\t\t {2:<12}\n"
formatStr = "+ {0:5}:{1:40} \t\t\t\t{2:<12} {3:<12}\n"
reqStr = "\t| {0:35} \t\t\t {1:<12} {2:<12} {3:<12} \n"
self.fop.write(line)
provider = self.ramdump.read_linkedlist('struct icc_provider',
"provider_list.next",
icc_providers)
no_of_provider = len(provider)
for i in range(no_of_provider):
self.fop_provider.write("\n\n+ subgraph cluster {}\n".format(i))
label = self.ramdump.read_datatype(provider[i].dev,
"struct device",
attr_list=['kobj.name'])
label_name = self.ramdump.read_cstring(label.kobj.name)
self.fop_provider.write(" + label = {}\n".format(label_name))
node = provider[i].nodes.next - 0x28
nodes = self.ramdump.read_linkedlist('struct icc_node',
'node_list.next', node)
no_of_nodes = len(nodes)
for j in range(no_of_nodes):
icc_node_name = self.ramdump.read_cstring(nodes[j].name)
avg_bw = nodes[j].avg_bw
peak_bw = nodes[j].peak_bw
node_id = nodes[j].id
self.fop_provider.write(
formatProvider.format(node_id, icc_node_name, avg_bw,
peak_bw))
self.fop.write(
formatStr.format(node_id, icc_node_name, avg_bw, peak_bw))
req_list = nodes[j].req_list.first
req = self.ramdump.read_linkedlist('struct icc_req',
'req_node.next', req_list,
attr_list=['req_node',
'avg_bw',
'peak_bw', 'tag',
'dev'])
no_of_req = len(req)
print_seq = 0
for k in range(no_of_req):
dev = self.ramdump.read_datatype(req[k].dev,
'struct device',
attr_list=['kobj.name'])
dev_name = self.ramdump.read_cstring(dev.kobj.name)
req_avg = req[k].avg_bw
req_peak = req[k].peak_bw
req_tag = req[k].tag
if req_avg or req_peak:
if print_seq == 0:
self.fop_provider.write(
" + reqs (active only)\n")
self.fop_provider.write(
format_req_str.format(dev_name, req_avg, req_peak))
print_seq = 1
self.fop.write(
reqStr.format(dev_name, req_tag, req_avg, req_peak))
self.fop.write("\n\n")
self.fop_provider.write("\n")
self.fop_provider.write(' + intenral links\n')
for j in range(no_of_nodes):
self.get_internal_link(nodes[j])
if self.external_link_data:
self.fop_provider.write("\n\n[external links]\n")
for i in range(len(self.external_link_data)):
self.fop_provider.write(self.external_link_data[i])
def parse(self):
self.extract_icc_summary()

View File

@@ -0,0 +1,352 @@
# Copyright (c) 2013-2016, 2020 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import math
from print_out import print_out_str
from parser_util import register_parser, RamParser
from sizes import SZ_4K, SZ_64K, SZ_1M, SZ_16M, get_order, order_size_strings
from iommulib import IommuLib, MSM_SMMU_DOMAIN, MSM_SMMU_AARCH64_DOMAIN, ARM_SMMU_DOMAIN
from lpaeiommulib import parse_long_form_tables
from aarch64iommulib import parse_aarch64_tables
@register_parser('--print-iommu-pg-tables', 'Print IOMMU page tables')
class IOMMU(RamParser):
class FlatMapping(object):
def __init__(self, virt, phys=-1, type='[]', size=SZ_4K, mapped=False):
self.virt = virt
self.phys = phys
self.mapping_type = type
self.mapping_size = size
self.mapped = mapped
class CollapsedMapping(object):
def __init__(self, virt_start, virt_end, phys_start=-1, phys_end=-1, type='[]', size=SZ_4K, mapped=False):
self.virt_start = virt_start
self.virt_end = virt_end - 1
self.phys_start = phys_start
self.phys_end = phys_end - 1
self.mapping_type = type
self.mapping_size = size
self.mapped = mapped
def phys_size(self):
return (self.phys_end - self.phys_start + 1)
def virt_size(self):
return (self.virt_end - self.virt_start + 1)
def __init__(self, *args):
super(IOMMU, self).__init__(*args)
self.out_file = None
self.NUM_FL_PTE = 4096
self.NUM_SL_PTE = 256
self.FL_BASE_MASK = 0xFFFFFC00
self.FL_TYPE_TABLE = (1 << 0)
self.FL_TYPE_SECT = (1 << 1)
self.FL_SUPERSECTION = (1 << 18)
self.FL_AP0 = (1 << 10)
self.FL_AP1 = (1 << 11)
self.FL_AP2 = (1 << 15)
self.FL_SHARED = (1 << 16)
self.FL_BUFFERABLE = (1 << 2)
self.FL_CACHEABLE = (1 << 3)
self.FL_TEX0 = (1 << 12)
self.FL_NG = (1 << 17)
self.SL_BASE_MASK_LARGE = 0xFFFF0000
self.SL_BASE_MASK_SMALL = 0xFFFFF000
self.SL_TYPE_LARGE = (1 << 0)
self.SL_TYPE_SMALL = (2 << 0)
self.SL_AP0 = (1 << 4)
self.SL_AP1 = (2 << 4)
self.SL_AP2 = (1 << 9)
self.SL_SHARED = (1 << 10)
self.SL_BUFFERABLE = (1 << 2)
self.SL_CACHEABLE = (1 << 3)
self.SL_TEX0 = (1 << 6)
self.SL_NG = (1 << 11)
self.node_offset = self.ramdump.field_offset(
'struct msm_iova_data', 'node')
def fl_offset(va):
return (((va) & 0xFFF00000) >> 20)
def sl_offset(va):
return (((va) & 0xFF000) >> 12)
def print_sl_page_table(self, pg_table):
sl_pte = pg_table
for i in range(0, self.NUM_SL_PTE):
phy_addr = self.ramdump.read_u32(sl_pte, False)
if phy_addr is not None: # and phy_addr & self.SL_TYPE_SMALL:
read_write = '[R/W]'
if phy_addr & self.SL_AP2:
read_write = '[R]'
if phy_addr & self.SL_TYPE_SMALL:
self.out_file.write('SL_PTE[%d] = %x %s\n' %
(i, phy_addr & self.SL_BASE_MASK_SMALL, read_write))
elif phy_addr & self.SL_TYPE_LARGE:
self.out_file.write('SL_PTE[%d] = %x %s\n' %
(i, phy_addr & self.SL_BASE_MASK_LARGE, read_write))
elif phy_addr != 0:
self.out_file.write(
'SL_PTE[%d] = %x NOTE: ERROR [Do not understand page table bits]\n' % (i, phy_addr))
sl_pte += 4
def print_page_table(self, pg_table):
fl_pte = pg_table
for i in range(0, self.NUM_FL_PTE):
# for i in range(0,5):
sl_pg_table_phy_addr = self.ramdump.read_u32(fl_pte)
if sl_pg_table_phy_addr is not None:
if sl_pg_table_phy_addr & self.FL_TYPE_TABLE:
self.out_file.write('FL_PTE[%d] = %x [4K/64K]\n' %
(i, sl_pg_table_phy_addr & self.FL_BASE_MASK))
self.print_sl_page_table(
sl_pg_table_phy_addr & self.FL_BASE_MASK)
elif sl_pg_table_phy_addr & self.FL_SUPERSECTION:
self.out_file.write('FL_PTE[%d] = %x [16M]\n' %
(i, sl_pg_table_phy_addr & 0xFF000000))
elif sl_pg_table_phy_addr & self.FL_TYPE_SECT:
self.out_file.write('FL_PTE[%d] = %x [1M]\n' %
(i, sl_pg_table_phy_addr & 0xFFF00000))
elif sl_pg_table_phy_addr != 0:
self.out_file.write(
'FL_PTE[%d] = %x NOTE: ERROR [Cannot understand first level page table entry]\n' % (i, sl_pg_table_phy_addr))
else:
self.out_file.write(
'FL_PTE[%d] NOTE: ERROR [Cannot understand first level page table entry]\n' % (i))
fl_pte += 4
def get_mapping_info(self, pg_table, index):
sl_pte = pg_table + (index * 4)
phy_addr = self.ramdump.read_u32(sl_pte, False)
current_phy_addr = -1
current_page_size = SZ_4K
current_map_type = 0
status = True
if phy_addr is not None:
if phy_addr & self.SL_AP2:
current_map_type = self.SL_AP2
if phy_addr & self.SL_TYPE_SMALL:
current_phy_addr = phy_addr & self.SL_BASE_MASK_SMALL
current_page_size = SZ_4K
elif phy_addr & self.SL_TYPE_LARGE:
current_phy_addr = phy_addr & self.SL_BASE_MASK_LARGE
current_page_size = SZ_64K
elif phy_addr != 0:
current_phy_addr = phy_addr
status = False
return (current_phy_addr, current_page_size, current_map_type, status)
def get_sect_mapping_info(self, addr):
current_phy_addr = -1
current_page_size = SZ_4K
current_map_type = 0
status = True
if addr is not None:
if addr & self.SL_AP2:
current_map_type = self.SL_AP2
if addr & self.FL_SUPERSECTION:
current_phy_addr = addr & 0xFF000000
current_page_size = SZ_16M
elif addr & self.FL_TYPE_SECT:
current_phy_addr = addr & 0xFFF00000
current_page_size = SZ_1M
elif addr != 0:
current_phy_addr = addr
status = False
return (current_phy_addr, current_page_size, current_map_type, status)
def add_flat_mapping(self, mappings, fl_idx, sl_idx, phy_adr, map_type, page_size, mapped):
virt = (fl_idx << 20) | (sl_idx << 12)
map_type_str = '[R/W]'
if map_type == self.SL_AP2:
map_type_str = '[R]'
map = self.FlatMapping(virt, phy_adr, map_type_str, page_size, mapped)
if virt not in mappings:
mappings[virt] = map
else:
self.out_file.write(
'[!] WARNING: FL_PTE[%d] SL_PTE[%d] ERROR [Duplicate mapping?]\n' % (fl_idx, sl_idx))
return mappings
def add_collapsed_mapping(self, mappings, virt_start, virt_end, phys_start, phys_end, map_type, page_size, mapped):
map = self.CollapsedMapping(
virt_start, virt_end, phys_start, phys_end, map_type, page_size, mapped)
if virt_start not in mappings:
mappings[virt_start] = map
else:
self.out_file.write(
'[!] WARNING: ERROR [Duplicate mapping at virtual address 0x%08x?]\n' % (virt_start))
return mappings
def create_flat_mapping(self, pg_table):
tmp_mapping = {}
fl_pte = pg_table
for fl_index in range(0, self.NUM_FL_PTE):
fl_pg_table_entry = self.ramdump.read_u32(fl_pte)
if fl_pg_table_entry is not None:
if fl_pg_table_entry & self.FL_TYPE_SECT:
(phy_addr, page_size, map_type,
status) = self.get_sect_mapping_info(fl_pg_table_entry)
if status:
if phy_addr != -1:
tmp_mapping = self.add_flat_mapping(
tmp_mapping, fl_index, 0, phy_addr, map_type, page_size, True)
else:
# no mapping
tmp_mapping = self.add_flat_mapping(
tmp_mapping, fl_index, 0, -1, 0, 0, False)
elif fl_pg_table_entry & self.FL_TYPE_TABLE:
sl_pte = fl_pg_table_entry & self.FL_BASE_MASK
for sl_index in range(0, self.NUM_SL_PTE):
(phy_addr, page_size, map_type,
status) = self.get_mapping_info(sl_pte, sl_index)
if status:
if phy_addr != -1:
tmp_mapping = self.add_flat_mapping(
tmp_mapping, fl_index, sl_index, phy_addr, map_type, page_size, True)
else:
# no mapping
tmp_mapping = self.add_flat_mapping(
tmp_mapping, fl_index, sl_index, -1, 0, 0, False)
else:
self.out_file.write(
'[!] WARNING: FL_PTE[%d] SL_PTE[%d] ERROR [Unknown error]\n' % (fl_index, sl_index))
elif fl_pg_table_entry != 0:
self.out_file.write(
'[!] WARNING: FL_PTE[%d] = %x NOTE: ERROR [Cannot understand first level page table entry]\n' %
(fl_index, fl_pg_table_entry))
else:
tmp_mapping = self.add_flat_mapping(
tmp_mapping, fl_index, 0, -1, 0, 0, False)
else:
self.out_file.write(
'[!] WARNING: FL_PTE[%d] NOTE: ERROR [Cannot understand first level page table entry]\n' % (fl_index))
fl_pte += 4
return tmp_mapping
def create_collapsed_mapping(self, flat_mapping):
collapsed_mapping = {}
if len(flat_mapping.keys()) > 0:
virt_addrs = sorted(flat_mapping.keys())
start_map = prev_map = flat_mapping[virt_addrs[0]]
last_mapping = False
for virt in virt_addrs[1:]:
map = flat_mapping[virt]
new_mapping = False
if map.mapping_size == prev_map.mapping_size and map.mapping_type == prev_map.mapping_type and map.mapped == prev_map.mapped:
if prev_map.mapping_size == SZ_4K:
if (map.phys - SZ_4K) != prev_map.phys and map.phys != prev_map.phys:
new_mapping = True
elif prev_map.mapping_size == SZ_64K:
if (map.phys - SZ_64K) != prev_map.phys and map.phys != prev_map.phys:
new_mapping = True
elif prev_map.mapping_size == SZ_1M:
if (map.phys - SZ_1M) != prev_map.phys and map.phys != prev_map.phys:
new_mapping = True
elif prev_map.mapping_size == SZ_16M:
if (map.phys - SZ_16M) != prev_map.phys and map.phys != prev_map.phys:
new_mapping = True
elif virt == virt_addrs[-1]:
# Last one
last_mapping = True
else:
new_mapping = True
if new_mapping:
collapsed_mapping = self.add_collapsed_mapping(
collapsed_mapping, start_map.virt, map.virt,
start_map.phys, prev_map.phys +
prev_map.mapping_size,
prev_map.mapping_type, prev_map.mapping_size, prev_map.mapped)
start_map = map
elif last_mapping:
collapsed_mapping = self.add_collapsed_mapping(
collapsed_mapping, start_map.virt, 0xFFFFFFFF + 1,
start_map.phys, prev_map.phys +
prev_map.mapping_size,
prev_map.mapping_type, prev_map.mapping_size, prev_map.mapped)
prev_map = map
return collapsed_mapping
def print_page_table_pretty(self, pg_table):
flat_mapping = self.create_flat_mapping(pg_table)
collapsed_mapping = self.create_collapsed_mapping(flat_mapping)
for virt in sorted(collapsed_mapping.keys()):
mapping = collapsed_mapping[virt]
if mapping.mapped:
self.out_file.write(
'0x%08x--0x%08x [0x%08x] A:0x%08x--0x%08x [0x%08x] %s[%s]\n' % (mapping.virt_start, mapping.virt_end, mapping.virt_size(),
mapping.phys_start, mapping.phys_end,
mapping.phys_size(), mapping.mapping_type, order_size_strings[get_order(mapping.mapping_size)]))
else:
self.out_file.write('0x%08x--0x%08x [0x%08x] [UNMAPPED]\n' %
(mapping.virt_start, mapping.virt_end, mapping.virt_size()))
def parse_short_form_tables(self, d, domain_num):
self.out_file = self.ramdump.open_file(
'msm_iommu_domain_%02d_0x%12X.txt' % (domain_num, d.pg_table))
redirect = 'OFF'
if d.redirect is None:
redirect = 'UNKNOWN'
elif d.redirect > 0:
redirect = 'ON'
iommu_context = 'None attached'
if len(d.ctx_list) > 0:
iommu_context = ''
for (num, name) in d.ctx_list:
iommu_context += '%s (%d) ' % (name, num)
iommu_context = iommu_context.strip()
self.out_file.write('IOMMU Context: %s. Domain: %s'
'[L2 cache redirect for page tables is %s]\n' % (
iommu_context, d.client_name, redirect))
self.out_file.write(
'[VA Start -- VA End ] [Size ] [PA Start -- PA End ] [Size ] [Read/Write][Page Table Entry Size]\n')
if d.pg_table == 0:
self.out_file.write(
'No Page Table Found. (Probably a secure domain)\n')
else:
self.print_page_table_pretty(d.pg_table)
self.out_file.write('\n-------------\nRAW Dump\n')
self.print_page_table(d.pg_table)
self.out_file.close()
def parse(self):
ilib = IommuLib(self.ramdump)
self.domain_list = ilib.domain_list
if self.domain_list is None:
print_out_str(
'[!] WARNING: IOMMU domains was not found in this build. No IOMMU page tables will be generated')
return
for (domain_num, d) in enumerate(self.domain_list):
if self.ramdump.is_config_defined('CONFIG_IOMMU_LPAE'):
parse_long_form_tables(self.ramdump, d, domain_num)
elif (d.domain_type == MSM_SMMU_DOMAIN):
self.parse_short_form_tables(d, domain_num)
elif ((d.domain_type == ARM_SMMU_DOMAIN) or
(d.domain_type == MSM_SMMU_AARCH64_DOMAIN)):
parse_aarch64_tables(self.ramdump, d, domain_num)

View File

@@ -0,0 +1,603 @@
"""
Copyright (c) 2016, 2018, 2020-2021 The Linux Foundation. All rights reserved.
Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of The Linux Foundation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Changes from Qualcomm Innovation Center are provided under the following license:
Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause-Clear
"""
from parser_util import register_parser, RamParser
from print_out import print_out_str
from rb_tree import RbTree
import logging
import os
RB_PARENT_COLOR_MASK = 0xFFFFFFFFFFFFFFFC
grand_total = 0
TASK_NAME_LENGTH = 16
ion_heap_buffers = []
dmabuf_heap_names = ["qcom_dma_heaps"]
def bytes_to_KB(bytes):
kb_val = 0
if bytes != 0:
kb_val = bytes // 1024
return kb_val
def bytes_to_mb(bytes):
val = 0
if bytes != 0:
val = (bytes // 1024) // 1024
return val
def get_dmabuf_heap_names(self, ramdump, ion_info):
heap_list = ramdump.address_of('heap_list')
if heap_list is None:
ion_info.write("NOTE: 'heap_list' list not found to extract the "
"dmabuf heap type list")
return False
list_offset = ramdump.field_offset('struct dma_heap', 'list')
name_offset = ramdump.field_offset('struct dma_heap', 'name')
next_offset = ramdump.field_offset('struct list_head', 'next')
prev_offset = ramdump.field_offset('struct list_head', 'prev')
head = ramdump.read_word(heap_list + next_offset)
while (head != heap_list):
dma_heap_addr = head - list_offset
dma_heap_name_addr = ramdump.read_word(dma_heap_addr + name_offset)
dma_heap_name = ramdump.read_cstring(dma_heap_name_addr, 48)
dmabuf_heap_names.append(dma_heap_name)
head = ramdump.read_word(head + next_offset)
prev = ramdump.read_word(head + prev_offset)
if head == 0 or prev == 0:
ion_info.write("NOTE: 'dmabuf heap_list' is corrupted")
return False
return True
def ion_buffer_info(self, ramdump, ion_info):
ion_info = ramdump.open_file('ionbuffer.txt')
db_list = ramdump.address_of('db_list')
if db_list is None:
ion_info.write("NOTE: 'db_list' list not found to extract the ion "
"buffer information")
return
total_dma_heap = 0
total_dma_info = ramdump.open_file('total_dma_heap.txt')
ion_info.write("*****Parsing dma buf info for ion leak debugging*****\n\n")
head_offset = ramdump.field_offset('struct dma_buf_list', 'head')
head = ramdump.read_word(db_list + head_offset)
next_offset = ramdump.field_offset('struct list_head', 'next')
prev_offset = ramdump.field_offset('struct list_head', 'prev')
list_node_offset = ramdump.field_offset('struct dma_buf', 'list_node')
size_offset = ramdump.field_offset('struct dma_buf', 'size')
ops_offset = ramdump.field_offset('struct dma_buf', 'ops')
file_offset = ramdump.field_offset('struct dma_buf', 'file')
f_count_offset = ramdump.field_offset('struct file', 'f_count')
name_offset = ramdump.field_offset('struct dma_buf', 'buf_name')
if name_offset is None:
name_offset = ramdump.field_offset('struct dma_buf', 'name')
exp_name_offset = ramdump.field_offset('struct dma_buf', 'exp_name')
print("File_addr dma_buf REF Name Size Exp Heap Size in KB dma_heap->ops dma_buf->ops \n", file = ion_info)
dma_buf_info = []
if (ramdump.kernel_version >= (5, 10)):
if get_dmabuf_heap_names(self, ramdump, ion_info) is False:
return
while (head != db_list):
dma_buf_addr = head - list_node_offset
size = ramdump.read_word(dma_buf_addr + size_offset)
total_dma_heap = total_dma_heap + size
file = ramdump.read_word(dma_buf_addr + file_offset)
f_count = ramdump.read_u64(file + f_count_offset)
exp_name = ramdump.read_word(dma_buf_addr + exp_name_offset)
exp_name = ramdump.read_cstring(exp_name, 48)
dma_buf_ops = ''
dma_buf_ops_addr = ramdump.read_word(dma_buf_addr + ops_offset)
look = ramdump.unwind_lookup(dma_buf_ops_addr)
if look != None:
fop, offset = look
dma_buf_ops = fop
ionheap_name = None
ops = ''
if (ramdump.kernel_version >= (5, 10)):
if exp_name in dmabuf_heap_names:
ion_buffer = ramdump.read_structure_field(dma_buf_addr, 'struct dma_buf', 'priv')
ion_heap = ramdump.read_structure_field(ion_buffer, 'struct qcom_sg_buffer', 'heap')
ionheap_name_addr = ramdump.read_structure_field(ion_heap, 'struct dma_heap', 'name')
ionheap_name = ramdump.read_cstring(ionheap_name_addr, 48)
dma_heap_ops = ramdump.read_structure_field(ion_heap, 'struct dma_heap', 'ops')
look = ramdump.unwind_lookup(dma_heap_ops)
if look !=None:
fop, offset = look
ops = fop
else:
if exp_name == 'ion':
ion_buffer = ramdump.read_structure_field(dma_buf_addr, 'struct dma_buf', 'priv')
ion_heap = ramdump.read_structure_field(ion_buffer, 'struct ion_buffer', 'heap')
ionheap_name_addr = ramdump.read_structure_field(ion_heap, 'struct ion_heap', 'name')
ionheap_name = ramdump.read_cstring(ionheap_name_addr, 48)
if ionheap_name is None:
ionheap_name = "None"
name = ramdump.read_word(dma_buf_addr + name_offset)
if not name:
name = "None"
else:
name = ramdump.read_cstring(name, 48)
dma_buf_info.append([dma_buf_addr, f_count, name, size, exp_name,
ionheap_name, size, ops, dma_buf_ops, file])
head = ramdump.read_word(head)
dma_buf_info = sorted(dma_buf_info, key=lambda l: l[6], reverse=True)
total_dma_heap_mb = bytes_to_mb(total_dma_heap)
total_dma_heap_mb = str(total_dma_heap_mb) + "MB"
total_dma_info.write("Total dma memory: {0}".format(total_dma_heap_mb))
for item in dma_buf_info:
print("v.v (struct file *)0x%x v.v (struct dma_buf*)0x%x %2d %8s 0x%-8x %-24s %-24s %16dKB %-32s %-32s"
%(item[9], item[0], item[1], item[2], item[3], item[4], item[5], item[6]/1024, item[7], item[8]), file = ion_info)
def get_bufs(self, task, bufs, ion_info, ramdump):
t_size = 0
dma_buf_fops = ramdump.address_of('dma_buf_fops')
if dma_buf_fops is None:
ion_info.write("NOTE: 'dma_buf_fops' not found for file information\n")
return 0
if task is None:
return 0
files = ramdump.read_pointer(task + self.files_offset)
if files is None:
return 0
fdt = ramdump.read_pointer(files + self.fdt_offset)
if fdt is None:
return 0
fd = ramdump.read_pointer(fdt + self.fd_offset)
max_fds = ramdump.read_halfword(fdt + self.max_fds_offset)
stime = ramdump.read_word(self.timekeeper + self.stime_offset)
ctime_offset = ramdump.field_offset('struct dma_buf', 'ktime')
if ctime_offset is not None:
ctime_offset += ramdump.field_offset('struct timespec', 'tv_sec')
for i in range(max_fds):
file = ramdump.read_pointer(fd + i*8)
if (file == 0):
continue
f_op = ramdump.read_pointer(file + self.f_op_offset)
if (f_op != dma_buf_fops):
continue
dmabuf = ramdump.read_pointer(file + self.private_data_offset)
size = ramdump.read_word(dmabuf + self.size_offset)
time = 0
if ctime_offset is not None:
ctime = ramdump.read_word(dmabuf + ctime_offset)
ctime = ctime // 1000000000
time = stime - ctime
name = ramdump.read_word(dmabuf + self.exp_name_offset)
if not name:
name = "None"
else:
name = ramdump.read_cstring(name, 48)
item = [name, hex(size), bytes_to_KB(size), str(time), file, dmabuf]
if item not in bufs:
t_size = t_size + size
bufs.append(item)
bufs.sort(key=lambda item: -item[2])
return t_size
def get_proc_bufs(self, task, bufs, ion_info, ramdump):
size = 0
for curr in ramdump.for_each_thread(task):
size += get_bufs(self, curr, bufs, ion_info, ramdump)
return size
def ion_proc_info(self, ramdump):
ionproc_file = ramdump.open_file('ionproc.txt')
ionproc_file.write("*****Parsing dma proc info for ion leak debugging*****\n")
pid_offset = ramdump.field_offset('struct task_struct', 'tgid')
comm_offset = ramdump.field_offset('struct task_struct', 'comm')
dma_procs = []
for task in ramdump.for_each_process():
bufs = []
size = get_proc_bufs(self, task, bufs, ionproc_file, ramdump)
if (size == 0):
continue
comm = ramdump.read_cstring(task + comm_offset)
pid = ramdump.read_int(task + pid_offset)
dma_procs.append([comm, pid, bytes_to_KB(size), bufs])
dma_procs.sort(key=lambda item: -item[2])
for proc in dma_procs:
str = "\n{0} (PID {1}) size (KB): {2}\n"\
.format(proc[0], proc[1], proc[2])
ionproc_file.write(str)
ionproc_file.write("{0:15} {1:15} {2:10} {3:20} {4:20} {5:20}\n".format(
'Name', 'Size', 'Size in KB', 'Time Alive(sec)', 'file', 'dma_buf'))
for item in proc[3]:
print("%-32s %8s %8s %8s v.v (struct file*)0x%x v.v (struct dma_buf)0x%x" %(
item[0], item[1], item[2], item[3], item[4], item[5]), file = ionproc_file)
def do_dump_ionbuff_info(self, ramdump, ion_info):
addressspace = 8
heap_addr_array = []
ionbuffer_file = ramdump.open_file('ionbuffer.txt')
# read num of heaps
number_of_heaps = ramdump.read_word('num_heaps')
ion_info.write('Number of heaps:{0} \n'.format(number_of_heaps))
# get heap starting address
heap_addr = ramdump.read_pointer('heaps')
if self.ramdump.arm64:
addressspace = 8
else:
addressspace = 4
# get address of all heaps
nIndex = 0
for nIndex in range(0, number_of_heaps):
heap_addr_array.append(heap_addr + (nIndex*addressspace))
# parse a heap
nIndex = 0
for nIndex in range(0, number_of_heaps):
str = "\n\n parsing {0:0} of {1:0} heap Heap: 0x{2:x}"
ionbuffer_file.write(str.format(
nIndex + 1,
number_of_heaps,
ramdump.read_word(
heap_addr_array[nIndex])))
parse_heap(self, ramdump, heap_addr_array[nIndex], ionbuffer_file)
ionbuffer_file.write(
'\n Total ION buffer size: {0:1} KB'.format(
bytes_to_KB(grand_total)))
def parse_heap(self, ramdump, heap_addr, ionbuffer_file):
global grand_total
nr_clients = 0
total_orphan_buffer_size = 0
ion_heap = ramdump.read_word(heap_addr)
ionheap_id = ramdump.read_structure_field(
ion_heap, 'struct ion_heap', 'id')
ionheap_name_addr = ramdump.read_structure_field(
ion_heap, 'struct ion_heap', 'name')
ionheap_name = ramdump.read_cstring(ionheap_name_addr, TASK_NAME_LENGTH)
ionheap_type = ramdump.read_structure_field(
ion_heap, 'struct ion_heap', 'type')
ionheap_total_allocated = ramdump.read_structure_field(
ion_heap, 'struct ion_heap', 'total_allocated.counter')
ionheap_total_handles = ramdump.read_structure_field(
ion_heap, 'struct ion_heap', 'total_handles.counter')
self.ion_handle_node_offset = ramdump.field_offset(
'struct ion_handle', 'node')
ionbuffer_file.write("\n*********************************************")
str = "\n Heap ID : {0} Heap Type: {1} Heap Name : {2}\n"
ionbuffer_file.write(str.format(ionheap_id, ionheap_type, ionheap_name))
ionbuffer_file.write('\n Total allocated : {0:1} KB'.format(
bytes_to_KB(ionheap_total_allocated)))
ionbuffer_file.write('\n Total Handles : {0:1} KB'.format(
bytes_to_KB(ionheap_total_handles)))
orphan = bytes_to_KB(ionheap_total_allocated - ionheap_total_handles)
ionbuffer_file.write('\n Orphan : {0:1} KB'.format(orphan))
ionbuffer_file.write("\n*********************************************")
ion_dev = ramdump.read_structure_field(
ion_heap, 'struct ion_heap', 'dev')
clients_rb_root = ion_dev + ramdump.field_offset('struct ion_device', 'clients')
if ionheap_total_allocated != 0:
nr_clients = show_ion_dev_client(
self, ramdump,
clients_rb_root,
ionheap_id, ionbuffer_file)
str = "\n \nTotal number of clients: {0:1}"
ionbuffer_file.write(str.format(nr_clients))
ionbuffer_file.write("\n ----------------------------------")
str = "\n orphaned allocations (info is from last known client):\n"
ionbuffer_file.write(str)
total_orphan_buffer_size, total_buffer_size = \
parse_orphan_buffers(self, ramdump, ion_dev, ionheap_id, ionbuffer_file)
ionbuffer_file.write("\n ----------------------------------")
ionbuffer_file.write(
'\n total orphan size: {0} KB'.format(
bytes_to_KB(total_orphan_buffer_size)))
ionbuffer_file.write(
'\n total buffer size: {0} KB'.format(
bytes_to_KB(total_buffer_size)))
ionbuffer_file.write("\n ----------------------------------")
grand_total = grand_total + total_buffer_size
def parse_orphan_buffers(self, ramdump, ion_dev, heap_id, ionbuffer_file):
orphan_buffer_size = 0
total_buffer_size = 0
rbtree = RbTree(ramdump, ion_dev + ramdump.field_offset('struct ion_device', 'buffers'),
logger = self.logger, debug = True)
ion_buffer_rb_node_offset = ramdump.field_offset(
'struct ion_buffer', 'node')
ion_buffer_task_comm_offset = ramdump.field_offset(
'struct ion_buffer', 'task_comm')
ion_buffer_ref_offset = ramdump.field_offset(
'struct ion_buffer', 'ref')
str = "\n buffer: 0x{0:x}, Buffer size: {1} KB "
str = str + "comm: {2} PID: {3} kmap count: {4} ref_count : {5}"
for rb_node in rbtree:
ion_buffer = rb_node - ion_buffer_rb_node_offset
ion_buffer_ref_add = ion_buffer + ion_buffer_ref_offset
ion_buffer_heap = ramdump.read_structure_field(
ion_buffer, 'struct ion_buffer', 'heap')
ion_heap_id = ramdump.read_structure_field(
ion_buffer_heap, 'struct ion_heap', 'id')
ion_buffer_size = ramdump.read_structure_field(
ion_buffer, 'struct ion_buffer', 'size')
ion_buffer_handlecount = ramdump.read_structure_field(
ion_buffer, 'struct ion_buffer', 'handle_count')
ref_counter = ramdump.read_structure_field(
ion_buffer_ref_add, 'struct kref', 'refcount.counter')
if heap_id == ion_heap_id:
total_buffer_size = total_buffer_size + ion_buffer_size
# if orphaned allocation
if ion_buffer_handlecount == 0:
ion_buffer_pid = ramdump.read_structure_field(
ion_buffer, 'struct ion_buffer', 'pid')
ion_buffer_kmap_count = ramdump.read_structure_field(
ion_buffer, 'struct ion_buffer', 'kmap_cnt')
client_name = ramdump.read_cstring(
(ion_buffer + ion_buffer_task_comm_offset),
TASK_NAME_LENGTH)
ionbuffer_file.write(str.format(
ion_buffer,
bytes_to_KB(ion_buffer_size),
client_name,
ion_buffer_pid,
ion_buffer_kmap_count,
ref_counter))
orphan_buffer_size = orphan_buffer_size + ion_buffer_size
return orphan_buffer_size, total_buffer_size
def show_ion_dev_client(
self,
ramdump,
rb_root,
ionheap_id, ionbuffer_file):
global ion_heap_buffers
nr_clients = 0
client_name = 0
rbtree = RbTree(ramdump, rb_root, logger = self.logger, debug = True)
ion_client_node_offset = ramdump.field_offset(
'struct ion_client', 'node')
task_comm_offset = ramdump.field_offset(
'struct task_struct', 'comm')
tempstr = "\n\n CLIENT: (struct ion_client *)0x{0:x} , "
str = tempstr + "task : {1} / ion_client : {2} / PID: {3} / Size : {4} KB"
str1 = tempstr + "ion_client : {1} / PID: {2} / Size : {3} KB"
if True:
for rb_node in rbtree:
ion_client = rb_node - ion_client_node_offset
heap_size = traverse_ion_heap_buffer(
self,
ramdump,
ion_client,
ionheap_id,
ionbuffer_file)
if heap_size > 0:
nr_clients = nr_clients + 1
ion_client_task = ramdump.read_structure_field(
ion_client, 'struct ion_client', 'task')
task_comm = ion_client_task + task_comm_offset
client_name = ramdump.read_cstring(
task_comm, TASK_NAME_LENGTH)
ion_client_name = ramdump.read_structure_field(
ion_client,
'struct ion_client',
'display_name')
ion_client_name = ramdump.read_cstring(
ion_client_name,
TASK_NAME_LENGTH)
client_PID = ramdump.read_structure_field(
ion_client, 'struct ion_client', 'pid')
if ion_client_task != 0:
ionbuffer_file.write(str.format(
ion_client, client_name, ion_client_name,
client_PID, bytes_to_KB(heap_size)))
else:
ionbuffer_file.write(str1.format(
ion_client, ion_client_name,
client_PID, bytes_to_KB(heap_size)))
for heap_buffer in ion_heap_buffers:
ionbuffer_file.write(heap_buffer)
return nr_clients
def traverse_ion_heap_buffer(self, ramdump, ion_client, ionheap_id, ionbuffer_file):
global ion_heap_buffers
ion_handle_root_offset = ramdump.field_offset(
'struct ion_client', 'handles')
ion_handle_root_address = ion_client + ion_handle_root_offset
ion_buffer_heap_size = 0
ion_heap_buffers = []
str = "\n (+) ion_buffer: 0x{0:x} size: {1:0} KB Handle Count: {2:0}"
rbtree = RbTree(ramdump, ion_handle_root_address,
logger=self.logger, debug = True)
for ion_handle_rb_node in rbtree:
ion_handle = ion_handle_rb_node - self.ion_handle_node_offset
ion_buffer = ramdump.read_structure_field(
ion_handle, 'struct ion_handle', 'buffer')
ion_buffer_size = ramdump.read_structure_field(
ion_buffer, 'struct ion_buffer', 'size')
ion_buffer_heap = ramdump.read_structure_field(
ion_buffer, 'struct ion_buffer', 'heap')
ion_heap_id = ramdump.read_structure_field(
ion_buffer_heap, 'struct ion_heap', 'id')
if ionheap_id == ion_heap_id:
ion_buffer_heap_size = ion_buffer_heap_size + ion_buffer_size
ion_buffer_handlecount = ramdump.read_structure_field(
ion_buffer,
'struct ion_buffer', 'handle_count')
temp = str.format(
ion_buffer,
bytes_to_KB(ion_buffer_size),
ion_buffer_handlecount)
ion_heap_buffers.append(temp)
return ion_buffer_heap_size
def parser(self, arg, ramdump, node, ion_info):
rb_root = 0
last_node = 0
self.orphan_size = 0
rbnode_left_offset = ramdump.field_offset('struct rb_node', 'rb_left')
temp = ramdump.read_word(node)
if temp == 0:
return 0
if arg == 1:
rb_root = find_rb_root(self, ramdump, node, ion_info)
last_node = find_rb_first(
self, ramdump, rb_root, rbnode_left_offset, ion_info)
if arg == 2:
last_node = find_rb_next(
self, arg, ramdump, node, rbnode_left_offset, ion_info)
return last_node
def find_rb_next(self, arg, ramdump, node, rbnode_left_offset, ion_info):
parent = cal_rb_parent(self, ramdump, node, ion_info)
tmp_node = 0
if parent == node:
ion_info.write("RETURNING NULL")
return 0
rbnode_right_offset = ramdump.field_offset('struct rb_node', 'rb_right')
rb_right = ramdump.read_word(node + rbnode_right_offset)
if rb_right != 0: # right node exist
next_rb_node = find_rb_first(
self, ramdump, rb_right, rbnode_left_offset, ion_info)
return next_rb_node
else: # no right node, parse left node
flag = 1
while flag:
if parent == 0 or None:
tmp_node = 0
parent = 0
else:
parent = cal_rb_parent(self, ramdump, node, ion_info)
tmp_node = ramdump.read_word(parent + rbnode_right_offset)
if tmp_node == node:
node = parent
continue
else:
return parent
return 0
def find_rb_first(self, ramdump, node, rbnode_left_offset, ion_info):
last_node = node
while node != 0:
last_node = node
node = ramdump.read_word(node + rbnode_left_offset)
return last_node
def cal_rb_parent(self, ramdump, ion_dev_rb_root, ion_info):
rbnode_color_offset = ramdump.field_offset(
'struct rb_node', '__rb_parent_color')
color = ramdump.read_word(ion_dev_rb_root + rbnode_color_offset)
color = color & RB_PARENT_COLOR_MASK
return color
def find_rb_root(self, ramdump, ion_dev_rb_root, ion_info):
parent = ion_dev_rb_root
rbnode_color_offset = ramdump.field_offset(
'struct rb_node', '__rb_parent_color')
color = ramdump.read_word(ion_dev_rb_root + rbnode_color_offset)
while color != 1:
parent = cal_rb_parent(self, ramdump, parent, ion_info)
color = ramdump.read_word(parent + rbnode_color_offset)
return parent
@register_parser('--print-ionbuffer', 'Print ion buffer', optional=True)
class DumpIonBuffer(RamParser):
def __init__(self, *args):
super(DumpIonBuffer, self).__init__(*args)
self.timekeeper = self.ramdump.address_of('shadow_timekeeper')
self.files_offset = self.ramdump.field_offset(
'struct task_struct', 'files')
self.fdt_offset = self.ramdump.field_offset(
'struct files_struct', 'fdt')
self.fd_offset = self.ramdump.field_offset('struct fdtable', 'fd')
self.max_fds_offset = self.ramdump.field_offset(
'struct fdtable', 'max_fds')
self.f_op_offset = self.ramdump.field_offset('struct file', 'f_op')
self.private_data_offset = self.ramdump.field_offset('struct file',
'private_data')
self.size_offset = self.ramdump.field_offset('struct dma_buf', 'size')
self.name_offset = self.ramdump.field_offset('struct dma_buf', 'buf_name')
if self.name_offset is None:
self.name_offset = self.ramdump.field_offset('struct dma_buf', 'name')
self.exp_name_offset = self.ramdump.field_offset('struct dma_buf', 'exp_name')
self.stime_offset = self.ramdump.field_offset('struct timekeeper',
'ktime_sec')
def parse(self):
with self.ramdump.open_file('ionbuffer.txt') as ionbuffer_file:
if (self.ramdump.kernel_version < (3, 18, 0)):
ionbuffer_file.write('Kernel version 3.18 \
and above are supported, current version {0}.\
{1}'.format(self.ramdump.kernel_version[0],
self.ramdump.kernel_version[1]))
return
self.logger = logging.getLogger(__name__)
path = os.path.join(self.ramdump.outdir, 'print-ionbuffer.stderr')
self.logger.addHandler(logging.FileHandler(path, mode='w'))
self.logger.setLevel(logging.INFO)
self.logger.info("Starting --print-ionbuffer")
if (self.ramdump.kernel_version >= (4, 14)):
ion_buffer_info(self, self.ramdump, ionbuffer_file)
ion_proc_info(self, self.ramdump)
else:
do_dump_ionbuff_info(self, self.ramdump, ionbuffer_file)

View File

@@ -0,0 +1,133 @@
#SPDX-License-Identifier: GPL-2.0-only
#Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
import linux_list as llist
from parser_util import register_parser, RamParser, cleanupString
from print_out import print_out_str
@register_parser('--print-iostat', 'Print iostat info and block device list')
class iostat(RamParser):
def __init__(self, *args):
super(iostat, self).__init__(*args)
self.disk_name = []
def io_per_cpu_offset(self, cpu):
per_cpu_offset_addr = self.ramdump.address_of('__per_cpu_offset')
if per_cpu_offset_addr is None:
return 0
per_cpu_offset_addr_indexed = self.ramdump.array_index(
per_cpu_offset_addr, 'unsigned long', cpu)
#instead of read_slong, bd_stats + u64
return self.ramdump.read_u64(per_cpu_offset_addr_indexed)
def klist_node_to_dev(self, klist_node):
dev = self.ramdump.container_of(klist_node, 'struct device', 'knode_class')
if dev is None:
dev_private = self.ramdump.container_of(klist_node, 'struct device_private',
'knode_class')
if dev_private is None:
return None
dev = self.ramdump.read_structure_field(dev_private, 'struct device_private',
'device')
if dev is None:
return None
return dev
def block_class_init_walker(self, klist_node):
dev = self.klist_node_to_dev(klist_node)
if dev is None:
return
block_device = self.ramdump.container_of(dev, 'struct block_device', 'bd_device')
bd_disk = self.ramdump.read_structure_field(block_device, 'struct block_device', 'bd_disk')
disk_name_addr = self.ramdump.struct_field_addr(bd_disk, 'struct gendisk', 'disk_name')
disk_name = self.ramdump.read_cstring(disk_name_addr)
major = self.ramdump.read_structure_field(bd_disk, 'struct gendisk', 'major')
queue = self.ramdump.read_structure_field(bd_disk, 'struct gendisk', 'queue')
part0 = self.ramdump.read_structure_field(bd_disk, 'struct gendisk', 'part0')
bd_stats = self.ramdump.read_structure_field(part0, 'struct block_device', 'bd_stats')
count0 = 0
count1 = 0
if disk_name == None or disk_name in self.disk_name:
return
else:
self.disk_name.append(disk_name)
for i in self.ramdump.iter_cpus():
disk_stats = bd_stats + self.io_per_cpu_offset(i)
in_flight = self.ramdump.struct_field_addr(disk_stats, 'struct disk_stats', 'in_flight')
if self.ramdump.arm64:
in_flight0 = self.ramdump.read_s64(in_flight)
in_flight1 = self.ramdump.read_s64(in_flight + 8)
else:
in_flight0 = self.ramdump.read_s32(in_flight)
in_flight1 = self.ramdump.read_s32(in_flight + 4)
count0 += in_flight0
count1 += in_flight1
self.list_ouput.append([major, bd_disk, disk_name, queue, count0, count1, count0 + count1])
def init_block_class(self):
p = None
block_class = self.ramdump.address_of('block_class')
if block_class is None:
self.output.write("ERROR: 'block_class' not found\n")
return
if self.ramdump.field_offset('struct class', 'p') is None:
classkset_ptr = self.ramdump.read_pointer('class_kset')
classkset_list_ptr = self.ramdump.read_structure_field(classkset_ptr, 'struct kset', 'list.next')
kobj_offset = self.ramdump.field_offset('struct kobject', 'entry')
sp_buf = []
class_subsys_walker = llist.ListWalker(self.ramdump, classkset_list_ptr, kobj_offset)
class_subsys_walker.walk(classkset_list_ptr, self.get_class_to_subsys, block_class, sp_buf)
if bool(sp_buf):
p = sp_buf[0]
else:
p = self.ramdump.read_structure_field(block_class, 'struct class', 'p')
list_head = (p + self.ramdump.field_offset('struct subsys_private', 'klist_devices')
+ self.ramdump.field_offset('struct klist', 'k_list'))
list_offset = self.ramdump.field_offset('struct klist_node', 'n_node')
init_list_walker = llist.ListWalker(self.ramdump, list_head, list_offset)
if not init_list_walker.is_empty():
init_list_walker.walk(init_list_walker.next() + list_offset,
self.block_class_init_walker)
sorted_list = sorted(self.list_ouput, key=lambda l: l[6], reverse=True)
print("MAJOR gendisk NAME request_queue TOTAL ASYNC SYNC \n", file = self.f)
for item in sorted_list:
major = item[0]
bd_disk = item[1]
fops_offset = self.ramdump.field_offset('struct gendisk', 'fops')
fops = self.ramdump.read_pointer(bd_disk + fops_offset)
a_ops_name = self.ramdump.unwind_lookup(fops)
a_ops_string = ''
if a_ops_name is not None:
name, a = a_ops_name
a_ops_string = '[{0}]'.format(name)
else:
a_ops_string = "xxxxxxxxxx"
disk_name = item[2]
queue = item[3]
count0 = item[4]
count1 = item[5]
print("%-6d 0x%x %-8s 0x%x %-8d %-8d %-8d %-16s "
% (major, bd_disk, disk_name, queue, count0 + count1, count0, count1, a_ops_string), file=self.f)
def get_class_to_subsys(self, kobj_addr, block_class, found_sp:list):
if not bool(found_sp):
kset_addr = self.ramdump.container_of(kobj_addr, 'struct kset', 'kobj')
sp_addr = self.ramdump.container_of(kset_addr, 'struct subsys_private', 'subsys')
sp_class = self.ramdump.read_structure_field(sp_addr, 'struct subsys_private', 'class')
if block_class == sp_class:
found_sp.append(sp_addr)
return
def parse(self):
if (self.ramdump.kernel_version) < (5, 15, 0):
print_out_str('\n kernel 5.15 or above supported \n')
return
self.list_ouput = []
self.f = open(self.ramdump.outdir + "/iostat.txt", "w")
self.init_block_class()
self.f.close()

View File

@@ -0,0 +1,353 @@
"""
Copyright (c) 2020 The Linux Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of The Linux Foundation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import struct
from print_out import print_out_str
from parser_util import register_parser, RamParser
ipa_log = "ipalog.txt"
def setup_out_file(path, self):
global out_file
try:
out_file = self.ramdump.open_file(path, 'wb')
return out_file
except:
print_out_str("could not open path {0}".format(path))
print_out_str("Do you have write/read permissions on the path?")
def print_out_ip(string):
out_file.write((string + '\n').encode('ascii', 'ignore'))
@register_parser('--ipa', 'print ipa_logging information')
class ipa_logging(RamParser):
def __init__(self, *args):
super(ipa_logging, self).__init__(*args)
def ipa_parse(self, ram_dump):
curr_chan=0
ep_idx=0
ipa3_ctx_addr_offset = ram_dump.address_of('ipa3_ctx')
ipa3_ctx_addr = ram_dump.read_u64(ipa3_ctx_addr_offset)
ep_offset = ram_dump.field_offset('struct ipa3_context','ep')
ipa_num_pipes_offset = ram_dump.field_offset('struct ipa3_context','ipa_num_pipes')
valid_offset = ram_dump.field_offset('struct ipa3_ep_context','valid')
gsi_chan_hdl_offset = ram_dump.field_offset('struct ipa3_ep_context','gsi_chan_hdl')
gsi_evt_ring_hdl_offset = ram_dump.field_offset('struct ipa3_ep_context','gsi_evt_ring_hdl')
client_offset = ram_dump.field_offset('struct ipa3_ep_context','client')
sys_offset = ram_dump.field_offset('struct ipa3_ep_context','sys')
repl_offset = ram_dump.field_offset('struct ipa3_sys_context','repl')
len_offset = ram_dump.field_offset('struct ipa3_sys_context','len')
curr_polling_state_offset = ram_dump.field_offset('struct ipa3_sys_context','curr_polling_state')
head_idx_offset = ram_dump.field_offset('struct ipa3_repl_ctx','head_idx')
tail_idx_offset = ram_dump.field_offset('struct ipa3_repl_ctx','tail_idx')
capacity_offset = ram_dump.field_offset('struct ipa3_repl_ctx','capacity')
pending_offset = ram_dump.field_offset('struct ipa3_repl_ctx','pending')
page_recycle_repl_offset = ram_dump.field_offset('struct ipa3_sys_context','page_recycle_repl')
page_recycle_repl_offset = ram_dump.field_offset('struct ipa3_sys_context','page_recycle_repl')
ep_addr = ipa3_ctx_addr + ep_offset
gsi_ctx_offset = ram_dump.address_of('gsi_ctx')
gsi_ctx_addr = ram_dump.read_u64(gsi_ctx_offset)
chan_offset = ram_dump.field_offset('struct gsi_ctx','chan')
state_offset = ram_dump.field_offset('struct gsi_chan_ctx','state')
poll_mode_offset = ram_dump.field_offset('struct gsi_chan_ctx','poll_mode')
ring_offset = ram_dump.field_offset('struct gsi_chan_ctx','ring')
stats_offset = ram_dump.field_offset('struct gsi_chan_ctx','stats')
props_offset = ram_dump.field_offset('struct gsi_chan_ctx','props')
evtr_offset = ram_dump.field_offset('struct gsi_chan_ctx','evtr')
prot_offset = ram_dump.field_offset('struct gsi_chan_props','prot')
ch_id_offset = ram_dump.field_offset('struct gsi_chan_props','ch_id')
queued_offset = ram_dump.field_offset('struct gsi_chan_stats', 'queued')
completed_offset = ram_dump.field_offset('struct gsi_chan_stats', 'completed')
callback_to_poll_offset = ram_dump.field_offset('struct gsi_chan_stats', 'callback_to_poll')
poll_to_callback_offset = ram_dump.field_offset('struct gsi_chan_stats', 'poll_to_callback')
poll_pending_irq_offset = ram_dump.field_offset('struct gsi_chan_stats', 'poll_pending_irq')
invalid_tre_error_offset = ram_dump.field_offset('struct gsi_chan_stats', 'invalid_tre_error')
poll_ok_offset = ram_dump.field_offset('struct gsi_chan_stats', 'poll_ok')
poll_empty_offset = ram_dump.field_offset('struct gsi_chan_stats', 'poll_empty')
userdata_in_use_offset = ram_dump.field_offset('struct gsi_chan_stats', 'userdata_in_use')
dp_offset = ram_dump.field_offset('struct gsi_chan_stats', 'dp')
ch_below_lo_offset = ram_dump.field_offset('struct gsi_chan_dp_stats', 'ch_below_lo')
ch_below_hi_offset = ram_dump.field_offset('struct gsi_chan_dp_stats', 'ch_below_hi')
ch_above_hi_offset = ram_dump.field_offset('struct gsi_chan_dp_stats', 'ch_above_hi')
empty_time_offset = ram_dump.field_offset('struct gsi_chan_dp_stats', 'empty_time')
last_timestamp_offset = ram_dump.field_offset('struct gsi_chan_dp_stats', 'last_timestamp')
base_va_offset = ram_dump.field_offset('struct gsi_ring_ctx', 'base_va')
base_offset = ram_dump.field_offset('struct gsi_ring_ctx', 'base')
wp_offset = ram_dump.field_offset('struct gsi_ring_ctx', 'wp')
rp_offset = ram_dump.field_offset('struct gsi_ring_ctx', 'rp')
wp_local_offset = ram_dump.field_offset('struct gsi_ring_ctx', 'wp_local')
rp_local_offset = ram_dump.field_offset('struct gsi_ring_ctx', 'rp_local')
rlen_offset = ram_dump.field_offset('struct gsi_ring_ctx', 'len')
evtr_state_offset = ram_dump.field_offset('struct gsi_evt_ctx', 'state')
evtr_id_offset = ram_dump.field_offset('struct gsi_evt_ctx', 'id')
evtr_ring_offset = ram_dump.field_offset('struct gsi_evt_ctx', 'ring')
evtr_stats_offset = ram_dump.field_offset('struct gsi_evt_ctx', 'stats')
evt_completed_offset = ram_dump.field_offset('struct gsi_evt_stats', 'completed')
IPA_EP_MAX = ram_dump.read_u32(ipa_num_pipes_offset + ipa3_ctx_addr)
while ep_idx < IPA_EP_MAX:
ep_addr_data = ep_addr + ram_dump.sizeof('struct ipa3_ep_context') * ep_idx
valid = ram_dump.read_u32(valid_offset + ep_addr_data)
if valid != 0:
curr_chan = ram_dump.read_u64(gsi_chan_hdl_offset + ep_addr_data)
client = ram_dump.read_u32(client_offset + ep_addr_data)
ipa_client_enum = self.ramdump.gdbmi.get_value_of('IPA_CLIENT_IPSEC_ENCAP_ERR_CONS')
# This means, to accomodate difference in IPA client counts from PL to PL, we are keeping an offset,
# so that there is no exception due to difference in the upperbound.
client_count_offset=50
client_names = ram_dump.gdbmi.get_enum_lookup_table(
'ipa_client_type',ipa_client_enum+client_count_offset)
print_out_ip("IPA Pipe: {0}".format(ep_idx))
print_out_ip("Pipe Name: {0}".format(client_names[client]))
sys = ram_dump.read_u64(sys_offset + ep_addr_data)
if sys and sys != 0x0:
len = ram_dump.read_u32(len_offset + sys)
counter = ram_dump.read_u32(curr_polling_state_offset + sys)
print_out_ip("sys_len: {0}".format(len))
print_out_ip("Current Polling State: {0}".format(counter))
repl = ram_dump.read_u64(repl_offset + sys)
page_recycle_repl = ram_dump.read_u64(page_recycle_repl_offset + sys)
if repl and repl != 0x0:
head_counter = ram_dump.read_u32(head_idx_offset + repl)
tail_counter = ram_dump.read_u32(tail_idx_offset + repl)
capacity = ram_dump.read_u32(capacity_offset + repl)
pending_counter = ram_dump.read_u32(pending_offset + repl)
print_out_ip("Repl Cache Head Index: {0}".format(head_counter))
print_out_ip("Repl Cache Tail Index: {0}".format(tail_counter))
print_out_ip("Repl Capacity: {0}".format(capacity))
print_out_ip("Repl Wq Pending: {0}".format(pending_counter))
if page_recycle_repl and page_recycle_repl != 0x0:
head_counter = ram_dump.read_u32(head_idx_offset + page_recycle_repl)
tail_counter = ram_dump.read_u32(tail_idx_offset + page_recycle_repl)
capacity = ram_dump.read_u32(capacity_offset + page_recycle_repl)
print_out_ip("Page Repl Cache Head Index: {0}".format(head_counter))
print_out_ip("Page Repl Cache Tail Index: {0}".format(tail_counter))
print_out_ip("page Repl Capacity: {0}".format(capacity))
else:
print_out_ip("sys is null")
print_out_ip("gsi_chan_hdl: 0x{0}".format(curr_chan))
gsi_evt_ring_hdl = ram_dump.read_u64(gsi_evt_ring_hdl_offset + ep_addr_data)
print_out_ip("gsi_evt_ring_hdl: 0x{0}".format(gsi_evt_ring_hdl))
chan = chan_offset + gsi_ctx_addr
chan_idx_addr = (chan + ram_dump.sizeof('struct gsi_chan_ctx') * curr_chan)
pros = props_offset + chan_idx_addr
prot = ram_dump.read_u32(prot_offset + pros)
prot_enum = ram_dump.gdbmi.get_value_of('GSI_CHAN_PROT_11AD')
offset_for_prot_names=15 # 15 additional values in the array. When new channels come up, they will be populated here.
prot_names = ram_dump.gdbmi.get_enum_lookup_table(
'gsi_chan_prot', prot_enum+offset_for_prot_names)
ch_id = ram_dump.read_u16(ch_id_offset + chan_idx_addr)
state = ram_dump.read_u16(state_offset + chan_idx_addr)
offset_for_gsi_state_enum=5 #5 additional values in the array. When new states come up, they will be populated here.
state_enum = ram_dump.gdbmi.get_value_of('GSI_CHAN_STATE_ERROR')
state_names = ram_dump.gdbmi.get_enum_lookup_table(
'gsi_chan_state', state_enum+offset_for_gsi_state_enum)
poll_mode = ram_dump.read_u32(poll_mode_offset + chan_idx_addr)
ring = ring_offset + chan_idx_addr
base_va = ram_dump.read_u64(ring + base_va_offset)
base = ram_dump.read_u64(ring + base_offset)
wp = ram_dump.read_u64(ring + wp_offset)
rp = ram_dump.read_u64(ring + rp_offset)
wp_local = ram_dump.read_u64(ring + wp_local_offset)
rp_local = ram_dump.read_u64(ring + rp_local_offset)
rlen = ram_dump.read_u16(ring + rlen_offset)
stats = stats_offset + chan_idx_addr
queued = ram_dump.read_u64(stats + queued_offset)
completed = ram_dump.read_u64(stats + completed_offset)
callback_to_poll = ram_dump.read_u64(stats + callback_to_poll_offset)
poll_to_callback = ram_dump.read_u64(stats + poll_to_callback_offset)
poll_pending_irq = ram_dump.read_u64(stats + poll_pending_irq_offset)
invalid_tre_error = ram_dump.read_u64(stats + invalid_tre_error_offset)
poll_ok = ram_dump.read_u64(stats + poll_ok_offset)
poll_empty = ram_dump.read_u64(stats + poll_empty_offset)
userdata_in_use = ram_dump.read_u64(stats + userdata_in_use_offset)
dp = stats + dp_offset
ch_below_lo = ram_dump.read_u64(dp + ch_below_lo_offset)
ch_below_hi = ram_dump.read_u64(dp + ch_below_hi_offset)
ch_above_hi = ram_dump.read_u64(dp + ch_above_hi_offset)
empty_time = ram_dump.read_u64(dp + empty_time_offset)
last_timestamp = ram_dump.read_u64(dp + last_timestamp_offset)
print_out_ip("prot: {0}".format(prot_names[prot]))
print_out_ip("chan_id: {0}".format(ch_id))
if state > state_enum:
print_out_ip("state: {0}".format(state))
else:
print_out_ip("state: {0}".format(state_names[state]))
print_out_ip("poll_mode_counter: {0}".format(poll_mode))
print_out_ip("ring (")
print_out_ip(" base_va:0x{0:02x}".format(base_va))
print_out_ip(" base:0x{0:02x}".format(base))
print_out_ip(" wp:0x{0:02x}".format(wp))
print_out_ip(" rp:0x{0:02x}".format(rp))
print_out_ip(" wp_local:0x{0:02x}".format(wp_local))
print_out_ip(" rp_local:0x{0:02x}".format(rp_local))
print_out_ip(" ring_len: {0}".format(rlen))
print_out_ip(")")
print_out_ip("stats (")
print_out_ip(" queued: {0}".format(queued))
print_out_ip(" completed: {0}".format(completed))
print_out_ip(" callback_to_poll: {0}".format(callback_to_poll))
print_out_ip(" poll_to_callback: {0}".format(poll_to_callback))
print_out_ip("poll_pending_irq: {0}".format(poll_pending_irq))
print_out_ip(" invalid_tre_error: {0}".format(invalid_tre_error))
print_out_ip(" poll_ok: {0}".format(poll_ok))
print_out_ip(" poll_empty: {0}".format(poll_empty))
print_out_ip(" userdata_in_use: {0}".format(userdata_in_use))
print_out_ip(" ep (")
print_out_ip(" ch_below_lo: {0}".format(ch_below_lo))
print_out_ip(" ch_below_hi: {0}".format(ch_below_hi))
print_out_ip(" ch_above_hi: {0}".format(ch_above_hi))
print_out_ip(" empty_time: {0}".format(empty_time))
print_out_ip(" last_timestamp: {0}".format(last_timestamp))
print_out_ip(" )")
print_out_ip(")")
evtr = ram_dump.read_u64(chan_idx_addr + evtr_offset)
if evtr and evtr != 0x0:
evtr_state = ram_dump.read_u16(evtr + evtr_state_offset)
evtr_state_enum = self.ramdump.gdbmi.get_value_of('GSI_EVT_RING_STATE_ERROR')
evtr_state_names = ram_dump.gdbmi.get_enum_lookup_table(
'gsi_evt_ring_state', evtr_state_enum)
evtr_id = ram_dump.read_byte(evtr + evtr_id_offset)
evtr_ring = evtr + evtr_ring_offset
evtr_ring_base_va = ram_dump.read_u64(evtr_ring + base_va_offset)
evtr_ring_base = ram_dump.read_u64(evtr_ring + base_offset)
evtr_ring_wp = ram_dump.read_u64(evtr_ring + wp_offset)
evtr_ring_rp = ram_dump.read_u64(evtr_ring + rp_offset)
evtr_ring_wp_local = ram_dump.read_u64(evtr_ring + wp_local_offset)
evtr_ring_rp_local = ram_dump.read_u64(evtr_ring + rp_local_offset)
evt_len = ram_dump.read_u16(evtr_ring + rlen_offset)
evtr_stats = evtr + evtr_stats_offset
evtr_stats_completed = ram_dump.read_u64(evtr_stats + evt_completed_offset)
print_out_ip("evtr (")
print_out_ip(" evtr_state: {0}".format(evtr_state_names[evtr_state]))
print_out_ip(" evtr_id: {0}".format(evtr_id))
print_out_ip(" evtr_base_va:0x{0:02x}".format(evtr_ring_base_va))
print_out_ip(" evtr_base:0x{0:02x}".format(evtr_ring_base))
print_out_ip(" evtr_wp:0x{0:02x}".format(evtr_ring_wp))
print_out_ip(" evtr_rp:0x{0:02x}".format(evtr_ring_rp))
print_out_ip(" evtr_wp_local:0x{0:02x} ".format(evtr_ring_wp_local))
print_out_ip(" evtr_rp_local:0x{0:02x} ".format(evtr_ring_rp_local))
print_out_ip(" evtr_len: {0}".format(evt_len))
print_out_ip(" evtr_completed: {0}".format(evtr_stats_completed))
print_out_ip(")")
else:
print_out_ip("evtr ()")
print_out_ip(" --- ")
ep_idx = ep_idx + 1
#HOLB Stats
uc_ctx_offset = ram_dump.field_offset('struct ipa3_context','uc_ctx')
uc_inited_offset = ram_dump.field_offset('struct ipa3_uc_ctx','uc_inited')
uc_loaded_offset = ram_dump.field_offset('struct ipa3_uc_ctx','uc_loaded')
uc_failed_offset = ram_dump.field_offset('struct ipa3_uc_ctx','uc_failed')
holb_monitor_offset = ram_dump.field_offset('struct ipa3_uc_ctx','holb_monitor')
num_holb_clients_offset = ram_dump.field_offset('struct ipa_holb_monitor','num_holb_clients')
poll_period_offset = ram_dump.field_offset('struct ipa_holb_monitor','poll_period')
max_cnt_wlan_offset = ram_dump.field_offset('struct ipa_holb_monitor','max_cnt_wlan')
max_cnt_usb_offset = ram_dump.field_offset('struct ipa_holb_monitor','max_cnt_usb')
max_cnt_11ad_offset = ram_dump.field_offset('struct ipa_holb_monitor','max_cnt_11ad')
uc_ctx = ipa3_ctx_addr + uc_ctx_offset
holb_monitor = holb_monitor_offset + uc_ctx
num_holb_clients = ram_dump.read_u32(holb_monitor + num_holb_clients_offset)
max_cnt_wlan = ram_dump.read_u32(holb_monitor + max_cnt_wlan_offset)
max_cnt_usb = ram_dump.read_u32(holb_monitor + max_cnt_usb_offset)
max_cnt_11ad = ram_dump.read_u32(holb_monitor + max_cnt_11ad_offset)
hlob_monitor_client_offset = ram_dump.field_offset('struct ipa_holb_monitor','client')
gsi_chan_hdl_offset = ram_dump.field_offset('struct ipa_uc_holb_client_info','gsi_chan_hdl')
action_mask_offset = ram_dump.field_offset('struct ipa_uc_holb_client_info','action_mask')
max_stuck_cnt_offset = ram_dump.field_offset('struct ipa_uc_holb_client_info','max_stuck_cnt')
ee_offset = ram_dump.field_offset('struct ipa_uc_holb_client_info','ee')
debugfs_param_offset = ram_dump.field_offset('struct ipa_uc_holb_client_info','debugfs_param')
state_offset = ram_dump.field_offset('struct ipa_uc_holb_client_info','state')
enable_cnt_offset = ram_dump.field_offset('struct ipa_uc_holb_client_info','enable_cnt')
disable_cnt_offset = ram_dump.field_offset('struct ipa_uc_holb_client_info','disable_cnt')
current_idx_offset = ram_dump.field_offset('struct ipa_uc_holb_client_info','current_idx')
events_offset = ram_dump.field_offset('struct ipa_uc_holb_client_info','events')
hlob_monitor_client_addr = holb_monitor + hlob_monitor_client_offset
hold_idx=0
print_out_ip("HOLB Clients Info:")
while hold_idx < num_holb_clients:
print_out_ip("")
hlob_monitor_client = hlob_monitor_client_addr + ram_dump.sizeof('struct ipa_uc_holb_client_info') * hold_idx
print_out_ip("HOLB Client index: {0}".format(hold_idx))
gsi_chan_hdl = ram_dump.read_u16(hlob_monitor_client + gsi_chan_hdl_offset)
action_mask = ram_dump.read_u32(hlob_monitor_client + action_mask_offset)
max_stuck_cnt = ram_dump.read_u32(hlob_monitor_client + max_stuck_cnt_offset)
ee = ram_dump.read_byte(hlob_monitor_client + ee_offset)
debugfs_param = ram_dump.read_bool(hlob_monitor_client + debugfs_param_offset)
state = ram_dump.read_u64(hlob_monitor_client + state_offset)
enable_cnt = ram_dump.read_u64(hlob_monitor_client + enable_cnt_offset)
disable_cnt = ram_dump.read_u64(hlob_monitor_client + disable_cnt_offset)
current_idx = ram_dump.read_u64(hlob_monitor_client + current_idx_offset)
events = ram_dump.read_u64(hlob_monitor_client + events_offset)
print_out_ip("HOLB GSI Channel Handle: {0}".format(gsi_chan_hdl))
print_out_ip("HOLB Action Mask: {0}".format(action_mask))
print_out_ip("HOLB MAX Stuck Count: {0}".format(max_stuck_cnt))
print_out_ip("HOLB EE: {0}".format(ee))
print_out_ip("HOLB enabled via debugfs: {0}".format(debugfs_param))
print_out_ip("HOLB State: {0}".format(state))
print_out_ip("HOLB enable cnt: {0}".format(enable_cnt))
print_out_ip("HOLB disable cnt: {0}".format(disable_cnt))
print_out_ip("HOLB Events current idx: {0}".format(current_idx))
print_out_ip("")
print_out_ip("Events {0}".format(events))
print_out_ip("")
hold_idx=hold_idx+1
def parse(self):
setup_out_file(ipa_log, self)
self.ipa_parse(self.ramdump)

View File

@@ -0,0 +1,255 @@
"""
Copyright (c) 2015, 2020 The Linux Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of The Linux Foundation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Changes from Qualcomm Innovation Center are provided under the following license:
Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause-Clear
IPC Logging Extraction Tool
---------------------------
Can be used on RAM dumps (files containing binary data that are produced after
a crash) to extract logs generated by IPC Logging. The dumps must be given in
the order they were written, so that all data can be found.
"""
import os
import struct
from print_out import print_out_str
from parser_util import register_parser, RamParser
import linux_list as llist
TSV_TYPE_INVALID = 0
TSV_TYPE_TIMESTAMP = 1
TSV_TYPE_POINTER = 2
TSV_TYPE_INT32 = 3
TSV_TYPE_BYTE_ARRAY = 4
TSV_TYPE_QTIMER = 5
IPC_LOG_CONTEXT_MAGIC_NUM = 0x25874452
IPC_LOGGING_MAGIC_NUM = 0x52784425
@register_parser('--ipc_logging', 'print ipc_logging information')
class ipc_logging_cn(RamParser):
def __init__(self, *args):
super(ipc_logging_cn, self).__init__(*args)
def next_page(self, ipc_log_context, curr_read_page):
hdr_addr = self.ramdump.struct_field_addr(
curr_read_page, 'struct ipc_log_page', 'hdr')
list_addr = self.ramdump.struct_field_addr(
hdr_addr, 'struct ipc_log_page_header', 'list')
next = self.ramdump.read_word(list_addr)
pagelist_addr = self.ramdump.struct_field_addr(
ipc_log_context, 'struct ipc_log_context', 'page_list')
if next == pagelist_addr:
next = self.ramdump.read_word(next)
hdr = self.ramdump.container_of(
next, 'struct ipc_log_page_header', 'list')
ipc_log_page = self.ramdump.container_of(
hdr, 'struct ipc_log_page', 'hdr')
return ipc_log_page
def decode_buffer(self, final_buf, output):
tsv_header_struct_size = self.ramdump.sizeof('struct tsv_header')
pos = 0
while pos < len(final_buf):
tsv_msg_type, tsv_msg_size = struct.unpack_from("BB", final_buf, pos)
pos += tsv_header_struct_size
this_msg = pos
# TSV_TYPE_TIMESTAMP
this_msg_type, this_msg_size = struct.unpack_from("BB", final_buf, this_msg)
if this_msg_type == TSV_TYPE_TIMESTAMP:
this_msg = this_msg + tsv_header_struct_size
fmt = 'L'
if this_msg_size == 8:
fmt = 'Q'
TimeStamp, = struct.unpack_from(fmt, final_buf, this_msg)
this_msg = this_msg + this_msg_size
# TSV_TYPE_QTIMER
TimeQtimer = 0
this_msg_type, this_msg_size = struct.unpack_from("BB", final_buf, this_msg)
if this_msg_type == TSV_TYPE_QTIMER:
this_msg = this_msg + tsv_header_struct_size
fmt = 'L'
if this_msg_size == 8:
fmt = 'Q'
TimeQtimer, = struct.unpack_from(fmt, final_buf, this_msg)
this_msg = this_msg + this_msg_size
# TSV_TYPE_BYTE_ARRAY
this_msg_type, this_msg_size = struct.unpack_from("BB", final_buf, this_msg)
if this_msg_type == TSV_TYPE_BYTE_ARRAY:
this_msg = this_msg + tsv_header_struct_size
output_str = final_buf[this_msg:this_msg + this_msg_size].decode('utf-8', 'ignore')
if '\n' in output_str:
output.write(
"[ {0:10.9f} 0x{1:x}] {2}".format(
TimeStamp / 1000000000.0, TimeQtimer, output_str))
else:
output.write(
"[ {0:10.9f} 0x{1:x}] {2}\n".format(
TimeStamp / 1000000000.0, TimeQtimer, output_str))
pos += tsv_msg_size
def do_ipc_log_context_parser(self, ipc_log_context):
name_addr = self.ramdump.struct_field_addr(
ipc_log_context, 'struct ipc_log_context', 'name')
name = self.ramdump.read_cstring(name_addr)
nd_read_page = self.ramdump.read_structure_field(
ipc_log_context, 'struct ipc_log_context', 'nd_read_page')
start_read_page = nd_read_page
curr_read_page = start_read_page
hdr_addr = self.ramdump.struct_field_addr(
start_read_page, 'struct ipc_log_page', 'hdr')
magic = self.ramdump.read_structure_field(
hdr_addr, 'struct ipc_log_page_header', 'magic')
if magic != IPC_LOGGING_MAGIC_NUM:
print_out_str("wrong magic in ipc_log_page_header 0x{0:x} "
.format(hdr_addr))
return
file_name = "{0}.txt".format(name)
f_path = os.path.join(self.output_dir, file_name)
output = open(f_path, "w")
output.write("=====================================================\n")
output.write("IPC log for {0} \nv.v (struct ipc_log_context *)0x{1:x}\n"
.format(name, ipc_log_context))
output.write("=====================================================\n")
LOG_PAGE_DATA_OFFSET = self.ramdump.field_offset(
'struct ipc_log_page', 'data')
STRUCT_IPC_LOG_PAGE_SIZE = self.ramdump.sizeof('struct ipc_log_page')
LOG_PAGE_DATA_SIZE = STRUCT_IPC_LOG_PAGE_SIZE - LOG_PAGE_DATA_OFFSET
nd_read_offset = self.ramdump.read_structure_field(
hdr_addr, 'struct ipc_log_page_header', 'nd_read_offset')
write_offset = self.ramdump.read_structure_field(
hdr_addr, 'struct ipc_log_page_header', 'write_offset')
if nd_read_offset <= write_offset:
wrapped_around = 0
else:
wrapped_around = 1
final_buf = bytes()
stop_copy = 0
while stop_copy != 1:
hdr_addr = self.ramdump.struct_field_addr(
curr_read_page, 'struct ipc_log_page', 'hdr')
nd_read_offset = self.ramdump.read_structure_field(
hdr_addr, 'struct ipc_log_page_header', 'nd_read_offset')
write_offset = self.ramdump.read_structure_field(
hdr_addr, 'struct ipc_log_page_header', 'write_offset')
if nd_read_offset is None:
break
start_addr = curr_read_page + LOG_PAGE_DATA_OFFSET + nd_read_offset
if nd_read_offset <= write_offset:
bytes_to_copy = write_offset - nd_read_offset
else:
bytes_to_copy = LOG_PAGE_DATA_SIZE - nd_read_offset
if bytes_to_copy < 0:
stop_copy = 1
break
next_page = self.next_page(ipc_log_context, curr_read_page)
if next_page == start_read_page:
stop_copy = 1
buf = self.ramdump.read_physical(
self.ramdump.virt_to_phys(start_addr), bytes_to_copy)
final_buf += buf
if (wrapped_around == 0) and (write_offset < LOG_PAGE_DATA_SIZE):
break
curr_read_page = next_page
if wrapped_around == 1:
hdr_addr = self.ramdump.struct_field_addr(
start_read_page, 'struct ipc_log_page', 'hdr')
write_offset = self.ramdump.read_structure_field(
hdr_addr, 'struct ipc_log_page_header', 'write_offset')
bytes_to_copy = write_offset
start_addr = start_read_page + LOG_PAGE_DATA_OFFSET
buf = self.ramdump.read_physical(
self.ramdump.virt_to_phys(start_addr), bytes_to_copy)
final_buf += buf
self.decode_buffer(final_buf, output)
output.close()
def ipc_log_context_list_func(self, ipc_log_context):
magic = self.ramdump.read_structure_field(
ipc_log_context, 'struct ipc_log_context', 'magic')
if magic != IPC_LOG_CONTEXT_MAGIC_NUM:
print_out_str("wrong magic in ipc_log_context 0x{0:x} "
.format(ipc_log_context))
return
try:
self.do_ipc_log_context_parser(ipc_log_context)
except:
pass
def get_ipc_log_context_list(self, ram_dump):
ipc_log_context_list = ram_dump.address_of('ipc_log_context_list')
list_offset = self.ramdump.field_offset(
'struct ipc_log_context', 'list')
list_walker = llist.ListWalker(
self.ramdump, ipc_log_context_list, list_offset)
list_walker.walk(ipc_log_context_list, self.ipc_log_context_list_func)
def parse(self):
self.output_dir = os.path.join(os.path.abspath(
self.ramdump.outdir), "ipc_logging")
if os.path.exists(self.output_dir) is False:
os.makedirs(self.output_dir)
if self.ramdump.minidump and self.ramdump.autodump:
for file in os.listdir(self.ramdump.autodump):
if file.startswith('md_ipc_ctxt'):
ipc_file = os.path.splitext(file)[0].replace("md_", "")
ipc_seg = next((s for s in self.ramdump.elffile.iter_sections() if s.name == ipc_file), None)
if ipc_seg is not None:
ipc_log_context = ipc_seg['sh_addr']
self.ipc_log_context_list_func(ipc_log_context)
else:
self.get_ipc_log_context_list(self.ramdump)

View File

@@ -0,0 +1,299 @@
# Copyright (c) 2012-2015, 2017, 2020 The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from print_out import print_out_str
from parser_util import register_parser, RamParser
import maple_tree
@register_parser('--print-irqs', 'Print all the irq information', shortopt='-i')
class IrqParse(RamParser):
def print_irq_state_3_0(self, ram_dump):
print_out_str(
'=========================== IRQ STATE ===============================')
per_cpu_offset_addr = ram_dump.address_of('__per_cpu_offset')
cpus = ram_dump.get_num_cpus()
irq_desc = ram_dump.address_of('irq_desc')
foo, irq_desc_size = ram_dump.unwind_lookup(irq_desc, 1)
h_irq_offset = ram_dump.field_offset('struct irq_desc', 'handle_irq')
irq_num_offset = ram_dump.field_offset('struct irq_data', 'irq')
hwirq_num_offset = ram_dump.field_offset('struct irq_data', 'hwirq')
if ram_dump.kernel_version >= (4,4,0) :
affinity_offset = ram_dump.field_offset(
'struct irq_common_data', 'affinity')
irq_common_data_offset = ram_dump.field_offset(
'struct irq_desc', 'irq_common_data')
else:
affinity_offset = ram_dump.field_offset(
'struct irq_data', 'affinity')
irq_data_offset = ram_dump.field_offset('struct irq_desc', 'irq_data')
irq_count_offset = ram_dump.field_offset(
'struct irq_desc', 'irq_count')
irq_chip_offset = ram_dump.field_offset('struct irq_data', 'chip')
irq_action_offset = ram_dump.field_offset('struct irq_desc', 'action')
action_name_offset = ram_dump.field_offset('struct irqaction', 'name')
kstat_irqs_offset = ram_dump.field_offset(
'struct irq_desc', 'kstat_irqs')
chip_name_offset = ram_dump.field_offset('struct irq_chip', 'name')
irq_desc_entry_size = ram_dump.sizeof('irq_desc[0]')
cpu_str = ''
for i in ram_dump.iter_cpus():
cpu_str = cpu_str + '{0:10} '.format('CPU{0}'.format(i))
print_out_str('{0:4} {1:12} {2:10} {3} {4:30} {5:10}'
.format('IRQ', 'HWIRQ', 'affinity',
cpu_str, 'Name', 'Chip'))
for i in range(0, irq_desc_size, irq_desc_entry_size):
irqnum = ram_dump.read_word(irq_desc + i + irq_num_offset)
hwirq = ram_dump.read_word(irq_desc + i + hwirq_num_offset)
if ram_dump.kernel_version >= (4,4,0):
affinity = ram_dump.read_int(
irq_desc + irq_common_data_offset + affinity_offset)
else:
affinity = ram_dump.read_int(
irq_desc + affinity_offset)
irqcount = ram_dump.read_word(irq_desc + i + irq_count_offset)
action = ram_dump.read_word(irq_desc + i + irq_action_offset)
kstat_irqs_addr = ram_dump.read_word(
irq_desc + i + kstat_irqs_offset)
irq_stats_str = ''
for j in ram_dump.iter_cpus():
if per_cpu_offset_addr is None:
offset = 0
else:
offset = ram_dump.read_word(per_cpu_offset_addr + 4 * j)
irq_statsn = ram_dump.read_word(kstat_irqs_addr + offset)
irq_stats_str = irq_stats_str + \
'{0:10} '.format('{0}'.format(irq_statsn))
chip = ram_dump.read_word(
irq_desc + i + irq_data_offset + irq_chip_offset)
chip_name_addr = ram_dump.read_word(chip + chip_name_offset)
chip_name = ram_dump.read_cstring(chip_name_addr, 48)
if action != 0:
name_addr = ram_dump.read_word(action + action_name_offset)
name = ram_dump.read_cstring(name_addr, 48)
str = '{0:4} {1:12} {2:10} {3} {4:30} {5:10}'
print_out_str(
str.format(irqnum, hex(hwirq), hex(affinity),
irq_stats_str, name, chip_name))
def shift_to_maxindex(self, shift):
radix_tree_map_shift = 6
if int(self.ramdump.get_config_val("CONFIG_BASE_SMALL")) == 1:
radix_tree_map_shift = 4
radix_tree_map_size = 1 << radix_tree_map_shift
return (radix_tree_map_size << shift) - 1
def is_internal_node(self, addr):
radix_tree_entry_mask = 0x3
if self.ramdump.kernel_version > (4, 20, 0):
radix_tree_internal_node = 0x2
else:
radix_tree_internal_node = 0x1
return (addr & radix_tree_entry_mask) == radix_tree_internal_node
def entry_to_node(self, addr):
if self.ramdump.kernel_version > (4, 20, 0):
return addr & 0xfffffffffffffffd
else:
return addr & 0xfffffffffffffffe
def radix_tree_lookup_element_v2(self, ram_dump, root_addr, index):
if self.ramdump.kernel_version > (4, 20,0 ):
rnode_offset = ram_dump.field_offset('struct xarray', 'xa_head')
rnode_shift_offset = ram_dump.field_offset('struct xa_node', 'shift')
slots_offset = ram_dump.field_offset('struct xa_node', 'slots')
pointer_size = ram_dump.sizeof('struct xa_node *')
else:
rnode_offset = ram_dump.field_offset('struct radix_tree_root', 'rnode')
rnode_shift_offset = ram_dump.field_offset('struct radix_tree_node', 'shift')
slots_offset = ram_dump.field_offset('struct radix_tree_node', 'slots')
pointer_size = ram_dump.sizeof('struct radix_tree_node *')
# if CONFIG_BASE_SMALL=0: radix_tree_map_shift = 6
maxindex = 0
radix_tree_map_shift = 6
radix_tree_map_mask = 0x3f
# if CONFIG_BASE_SMALL=1: radix_tree_map_shift = 4
if int(ram_dump.get_config_val("CONFIG_BASE_SMALL")) == 1:
radix_tree_map_shift = 4
radix_tree_map_mask = 0xf
rnode_addr = ram_dump.read_word(root_addr + rnode_offset)
if self.is_internal_node(rnode_addr):
node_addr = self.entry_to_node(rnode_addr)
shift = ram_dump.read_byte(node_addr + rnode_shift_offset)
maxindex = self.shift_to_maxindex(shift)
if index > maxindex:
return None
while self.is_internal_node(rnode_addr):
parent_addr = self.entry_to_node(rnode_addr)
parent_shift = ram_dump.read_byte(parent_addr + rnode_shift_offset)
offset = (index >> parent_shift) & radix_tree_map_mask
rnode_addr = ram_dump.read_word(parent_addr + slots_offset +
(offset * pointer_size))
if rnode_addr == 0:
return None
return rnode_addr
def radix_tree_lookup_element(self, ram_dump, root_addr, index):
rnode_offset = ram_dump.field_offset('struct radix_tree_root', 'rnode')
if (ram_dump.kernel_version[0], ram_dump.kernel_version[1]) >= (3, 18):
rnode_height_offset = ram_dump.field_offset(
'struct radix_tree_node', 'path')
else:
rnode_height_offset = ram_dump.field_offset(
'struct radix_tree_node', 'height')
slots_offset = ram_dump.field_offset('struct radix_tree_node', 'slots')
pointer_size = ram_dump.sizeof('struct radix_tree_node *')
# if CONFIG_BASE_SMALL=0: radix_tree_map_shift = 6
radix_tree_map_shift = 6
radix_tree_map_mask = 0x3f
radix_tree_height_mask = 0xfff
height_to_maxindex = [0x0, 0x3F, 0x0FFF,
0x0003FFFF, 0x00FFFFFF, 0x3FFFFFFF, 0xFFFFFFFF]
if ram_dump.read_word(root_addr + rnode_offset) & 1 == 0:
if index > 0:
return None
return (ram_dump.read_word(root_addr + rnode_offset) & 0xfffffffffffffffe)
node_addr = ram_dump.read_word(root_addr + rnode_offset) & 0xfffffffffffffffe
height = ram_dump.read_int(node_addr + rnode_height_offset)
if (ram_dump.kernel_version[0], ram_dump.kernel_version[1]) >= (3, 18):
height = height & radix_tree_height_mask
if height > len(height_to_maxindex):
return None
if index > height_to_maxindex[height]:
return None
shift = (height - 1) * radix_tree_map_shift
for h in range(height, 0, -1):
node_addr = ram_dump.read_word(
node_addr + slots_offset + ((index >> shift) & radix_tree_map_mask) * pointer_size)
if node_addr == 0:
return None
shift -= radix_tree_map_shift
return (node_addr & 0xfffffffffffffffe)
def save_irq_desc(self, node, irq_desc):
if node:
irq_desc.append(node)
def dump_sparse_irq_state(self, irq_desc_addr):
irq_desc = self.ramdump.read_datatype(irq_desc_addr, 'struct irq_desc', attr_list=['irq_data', 'irq_common_data', 'irq_count', 'action', 'kstat_irqs'])
irqnum = irq_desc.irq_data.irq
hwirq = irq_desc.irq_data.hwirq
if self.ramdump.kernel_version >= (4,4,0):
affinity = irq_desc.irq_common_data.affinity.bits[0] & 0xFFFFFFFF
else:
affinity = irq_desc.irq_data.affinity.bits[0] & 0xFFFFFFFF
irqcount = irq_desc.irq_count
action = irq_desc.action
irq_stats_str = ''
if irq_desc.kstat_irqs is None:
return
for j in self.ramdump.iter_cpus():
irq_statsn = self.ramdump.read_int(irq_desc.kstat_irqs, cpu=j)
irq_stats_str = irq_stats_str + \
'{0:10} '.format('{0}'.format(irq_statsn))
try:
chip = self.ramdump.read_datatype(irq_desc.irq_data.chip, 'struct irq_chip', attr_list=['name'])
chip_name = self.ramdump.read_cstring(chip.name, 48)
except:
chip_name = "None"
if irq_desc.action != 0:
irqaction = self.ramdump.read_datatype(irq_desc.action, 'struct irqaction', attr_list=['name'])
if not irqaction.name:
name = "None"
else:
name = self.ramdump.read_cstring(irqaction.name, 48)
str = "{0:4} {1:12} {2:10} {3} {4:30} {5:15} " \
"v.v (struct irq_desc *)0x{6:<20x}"
print_out_str(
str.format(irqnum, hex(hwirq), hex(affinity),
irq_stats_str, name, chip_name, irq_desc_addr))
def get_all_irqs_desc(self, ram_dump):
irq_desc_tree = ram_dump.address_of('irq_desc_tree')
nr_irqs = ram_dump.read_int(ram_dump.address_of('nr_irqs'))
major, minor, patch = ram_dump.kernel_version
irq_descs = []
if (major, minor) >= (6, 4):
mt_walk = maple_tree.MapleTreeWalker(ram_dump)
sparse_irqs_addr = ram_dump.address_of('sparse_irqs')
mt_walk.walk(sparse_irqs_addr, self.save_irq_desc, irq_descs)
else:
for i in range(0, nr_irqs):
if (major, minor) >= (4, 9):
irq_desc = self.radix_tree_lookup_element_v2(
ram_dump, irq_desc_tree, i)
else:
irq_desc = self.radix_tree_lookup_element(
ram_dump, irq_desc_tree, i)
if irq_desc is None:
continue
irq_descs.append(irq_desc)
return irq_descs
def print_irq_state_sparse_irq(self, ram_dump):
irq_descs = []
nr_irqs = ram_dump.read_int(ram_dump.address_of('nr_irqs'))
cpu_str = ''
for i in ram_dump.iter_cpus():
cpu_str = cpu_str + '{0:10} '.format('CPU{0}'.format(i))
print_out_str('{0:4} {1:12} {2:10} {3} {4:30} {5:15} {6:20}'\
.format('IRQ', 'HWIRQ', 'affinity', cpu_str,
'Name', 'Chip', 'IRQ Structure'))
if nr_irqs > 50000:
return
irq_descs = self.get_all_irqs_desc(ram_dump)
for i in range(len(irq_descs)):
self.dump_sparse_irq_state(irq_descs[i])
def parse(self):
irq_desc = self.ramdump.address_of('irq_desc')
if self.ramdump.is_config_defined('CONFIG_SPARSE_IRQ'):
self.print_irq_state_sparse_irq(self.ramdump)
if irq_desc is None:
return
self.print_irq_state_3_0(self.ramdump)

View File

@@ -0,0 +1,96 @@
# Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser
@register_parser('--kbootlog', 'Print the kernel boot log', shortopt='-k')
class KBootLog(RamParser):
def __init__(self, *args):
super(KBootLog, self).__init__(*args)
self.wrap_cnt = 0
self.outfile = self.ramdump.open_file('kernel_boot_log.txt')
if (self.ramdump.sizeof('struct printk_log') is None):
self.struct_name = 'struct log'
else:
self.struct_name = 'struct printk_log'
def parse(self):
if self.ramdump.kernel_version >= (5, 10):
self.extract_kernel_boot_log()
else:
self.extract_kboot_log()
def log_next(self, idx, logbuf):
len_offset = self.ramdump.field_offset(self.struct_name, 'len')
msg = idx
msg_len = self.ramdump.read_u16(msg + len_offset)
if (msg_len == 0):
self.wrap_cnt += 1
return logbuf
else:
return idx + msg_len
def extract_kernel_boot_log(self):
logbuf_addr = self.ramdump.read_word(self.ramdump.address_of(
'boot_log_buf'))
logbuf_size = self.ramdump.read_u32("boot_log_buf_size")
if logbuf_size is None:
logbuf_pos = self.ramdump.read_word(self.ramdump.address_of(
'boot_log_pos'))
logbuf_left = self.ramdump.read_u32("boot_log_buf_left")
if logbuf_pos is not None and logbuf_left is not None:
logbuf_size = logbuf_pos - logbuf_addr + logbuf_left
else:
logbuf_size = 524288
if logbuf_addr:
data = self.ramdump.read_binarystring(logbuf_addr, logbuf_size)
self.outfile.write(data.decode('ascii', 'ignore').replace('\x00', ''))
else:
self.outfile.write("kernel boot log support is not present\n")
return
def extract_kboot_log(self):
last_idx_addr = self.ramdump.address_of('log_next_idx')
logbuf_addr = None
if self.ramdump.kernel_version >= (5, 4):
logbuf_addr = self.ramdump.read_word(self.ramdump.address_of(
'boot_log_buf'))
elif self.ramdump.kernel_version >= (4, 19):
logbuf_addr = self.ramdump.read_word(self.ramdump.address_of(
'init_log_buf'))
if logbuf_addr is None:
self.outfile.write("kernel boot log support is not present\n")
return
time_offset = self.ramdump.field_offset(self.struct_name, 'ts_nsec')
text_len_offset = self.ramdump.field_offset(self.struct_name,
'text_len')
log_size = self.ramdump.sizeof(self.struct_name)
first_idx = 0
last_idx = self.ramdump.read_u32(last_idx_addr)
curr_idx = logbuf_addr + first_idx
while curr_idx != logbuf_addr + last_idx and self.wrap_cnt < 1:
timestamp = self.ramdump.read_dword(curr_idx + time_offset)
text_len = self.ramdump.read_u16(curr_idx + text_len_offset)
text_str = self.ramdump.read_cstring(curr_idx + log_size, text_len)
for partial in text_str.split('\n'):
if text_len == 0:
break
f = '[{0:>5}.{1:0>6d}] {2}\n'.format(
timestamp // 1000000000, (timestamp % 1000000000) //
1000, partial)
self.outfile.write(f)
curr_idx = self.log_next(curr_idx, logbuf_addr)

View File

@@ -0,0 +1,107 @@
# Copyright (c) 2017, 2020 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import re
from parser_util import register_parser, RamParser
file_names = ['MSM_DUMP_DATA_L1_DATA_CACHE_0x0',
'MSM_DUMP_DATA_L1_DATA_CACHE_0x1',
'MSM_DUMP_DATA_L1_DATA_CACHE_0x2',
'MSM_DUMP_DATA_L1_DATA_CACHE_0x3',
'MSM_DUMP_DATA_L1_DATA_CACHE_0x4',
'MSM_DUMP_DATA_L1_DATA_CACHE_0x5',
'MSM_DUMP_DATA_L1_DATA_CACHE_0x6',
'MSM_DUMP_DATA_L1_DATA_CACHE_0x7']
@register_parser('--l1-compare', 'Compare L1 Cache Data with DDR contents')
class L1_Cache_Compare(RamParser):
def parse(self):
with self.ramdump.open_file('l1_cache.txt') as out_l1_cache:
for file_name in file_names:
file_path = os.path.join(self.ramdump.outdir, file_name)
if os.path.isfile(file_path):
out_l1_cache.write('\n-----begin ' + file_name + '-----\n')
addr = None
values_from_file = None
values_from_dump = None
fin = self.ramdump.open_file(file_path, 'r')
line_no = 0
for line in fin:
line_no += 1
if line == '':
continue
elif re.match('^Way Set P MOESI RAW_MOESI N.*', line):
continue
elif len(line) >= 195:
colm = line.split()
if len(colm) < 28:
out_l1_cache.write('Unexepected file format\n')
break
# Read address value from file
addr = colm[6]
# Read values from file
values_from_file = []
for i in range(11, 27):
values_from_file.append(colm[i])
# Read values from dump
values_from_dump = []
temp_addr = int(addr, 16)
val = None
if temp_addr > self.ramdump.phys_offset:
for i in range(0, 16):
try:
val = self.ramdump.read_physical(
temp_addr, 4)
except:
out_l1_cache.write(
'Exception while reading {0:x}'
.format(temp_addr))
val = val[::-1]
val = val.encode('hex')
values_from_dump.append(val)
temp_addr += 4
# compare both values
if values_from_dump != [] and \
values_from_dump != values_from_file:
out_l1_cache.write(
'phy addr: {0} Way:{1} Set:{2} P:{3} MOESI:{4}'
.format(addr,
colm[0],
colm[1],
colm[2],
colm[3]) +
'\n')
out_l1_cache.write('Cache content: ' +
' '.join(values_from_file) +
'\n')
out_l1_cache.write('DDR content : ' +
' '.join(values_from_dump) +
'\n\n')
fin.close()
out_l1_cache.write('------end '+file_name+'------\n')

View File

@@ -0,0 +1,70 @@
# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser, cleanupString
import linux_list as llist
@register_parser('--print-devices', 'Print devices info')
class DevicesList(RamParser):
def __init__(self, ramdump):
self.ramdump = ramdump
self.kobj_offset = self.ramdump.field_offset('struct device', 'kobj')
self.driver_data_offset = self.ramdump.field_offset('struct device', 'driver_data')
self.dev_busname_offset = self.ramdump.field_offset('struct device', 'bus')
self.dma_ops_offset = self.ramdump.field_offset('struct device', 'dma_ops')
self.archdata_offset = self.ramdump.field_offset('struct device', 'archdata')
self.entry_offset = self.ramdump.field_offset('struct kobject', 'entry')
self.name_offset = self.ramdump.field_offset('struct kobject', 'name')
self.device_lists = []
def list_func(self, device, fout):
kobj = self.ramdump.read_word(device + self.kobj_offset)
name = cleanupString(
self.ramdump.read_cstring(kobj + self.name_offset, 128))
if name == None or name == "":
return
driver_data = self.ramdump.read_word(device + self.driver_data_offset)
bus_name_addr = self.ramdump.read_word(device + self.dev_busname_offset)
bus_name = self.ramdump.read_cstring(self.ramdump.read_word(bus_name_addr))
dma_ops = self.ramdump.read_structure_field(device, 'struct device', 'dma_ops')
cma_area = self.ramdump.read_structure_field(device, 'struct device', 'cma_area')
archdata = (device + self.archdata_offset)
cma_name = ''
if cma_area != 0:
cma_name_addr_offset = self.ramdump.field_offset('struct cma', 'name')
cma_name = self.ramdump.read_cstring(cma_area + cma_name_addr_offset, 48)
a_ops_name = self.ramdump.unwind_lookup(dma_ops)
dma_ops_name = ''
if a_ops_name is not None:
dma_ops_name, a = a_ops_name
a_ops_string = '[{0}]'.format(dma_ops_name)
try:
if fout != None:
print("0x%x %-100s %-16s 0x%-32x 0x%-16x %s %-48s"
% (device, name, bus_name, driver_data, cma_area, cma_name, a_ops_string), file = fout)
except Exception as e: print_out_str(e)
self.device_lists.append([device, name, bus_name, driver_data, archdata])
def get_device_list(self, fout = None):
devices_kset_addr = self.ramdump.address_of('devices_kset')
list_head = devices_kset_addr
list_offset = self.kobj_offset + self.entry_offset
list_walker = llist.ListWalker(self.ramdump, list_head, list_offset)
list_walker.walk(list_head, self.list_func, fout)
return self.device_lists
def parse(self):
fout = open(self.ramdump.outdir + "/devices.txt", "w")
print("v.v (struct device) name bus_name driver_data v.v (struct cma) dma_ops \n", file=self.f)
self.get_device_list(fout)
fout.close()

View File

@@ -0,0 +1,220 @@
# Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser, cleanupString
from print_out import print_out_str
def test_bit(nr, addr, ramdump, my_task_out):
BITS_PER_LONG = 64
if not ramdump.arm64:
BITS_PER_LONG = 32
index = int(nr / BITS_PER_LONG)
data = ramdump.read_ulong(addr + index * ramdump.sizeof('unsigned long'))
#my_task_out.write("\ntest_bit: index = 0x{:x}, data: 0x{:x}".format(index, data))
if 1 & (data >> (nr & (BITS_PER_LONG - 1))):
#my_task_out.write("\ntest bit returned true")
return True
#my_task_out.write("\ntest bit returned false")
return False
def parse_held_locks(ramdump, task, my_task_out):
task_held_locks_offset = ramdump.field_offset('struct task_struct', 'held_locks')
held_locks = task + task_held_locks_offset
sizeof_held_lock = ramdump.sizeof('struct held_lock')
sizeof_lock_class = ramdump.sizeof('struct lock_class')
lock_classes_in_use_bitmap = ramdump.address_of('lock_classes_in_use')
lock_classes = ramdump.address_of('lock_classes')
#my_task_out.write("lock_classes : {:x} , lock_classes_bitmap : {:x}, lock_class_size : {:x}".format(lock_classes, lock_classes_in_use_bitmap, sizeof_lock_class))
task_lockdep_depth = ramdump.read_structure_field(task, 'struct task_struct', 'lockdep_depth')
my_task_out.write('\nlockdep_depth: {0}\n'.format(hex(task_lockdep_depth)))
for i in range (0, task_lockdep_depth):
held_lock_indx = held_locks + (i * sizeof_held_lock)
hl_prev_chain_key = ramdump.read_structure_field(held_lock_indx, 'struct held_lock', 'prev_chain_key')
if not hl_prev_chain_key:
break
hl_acquire_ip = ramdump.read_structure_field(held_lock_indx, 'struct held_lock','acquire_ip')
hl_acquire_ip_caller = ramdump.read_structure_field(held_lock_indx, 'struct held_lock','acquire_ip_caller')
if hl_acquire_ip_caller is None:
hl_acquire_ip_caller = 0x0
hl_instance = ramdump.read_structure_field(held_lock_indx, 'struct held_lock', 'instance')
hl_nest_lock = ramdump.read_structure_field(held_lock_indx, 'struct held_lock', 'nest_lock')
if (ramdump.is_config_defined('CONFIG_LOCK_STAT')):
hl_waittime_stamp = ramdump.read_structure_field(held_lock_indx, 'struct held_lock', 'waittime_stamp')
hl_holdtime_stamp = ramdump.read_structure_field(held_lock_indx, 'struct held_lock', 'holdtime_stamp')
hl_class_idx_full = hl_class_idx = ramdump.read_structure_field(held_lock_indx, 'struct held_lock', 'class_idx')
hl_class_idx = hl_class_idx_full & 0x00001FFF
hl_name = None
if test_bit(hl_class_idx, lock_classes_in_use_bitmap, ramdump, my_task_out):
#my_task_out.write("\nLock class stuct @ {:x}".format(lock_classes + sizeof_lock_class*hl_class_idx))
hl_name = ramdump.read_structure_field(lock_classes + sizeof_lock_class*hl_class_idx, 'struct lock_class', 'name')
hl_name = ramdump.read_cstring(hl_name)
else:
continue
lock_type = ramdump.type_of(hl_name)
hl_irq_context = (hl_class_idx_full & 0x00006000) >> 13
hl_trylock = (hl_class_idx_full & 0x00008000) >> 15
# 0 - exclusive
# 1 - shared
# 2 - shared_recursive
hl_read = (hl_class_idx_full & 0x00030000) >> 16
if hl_read:
# Handling for percpu_rw_semaphore
# if the task in writer is not NULL, it means that the reader is blocking the writer
try:
if "struct percpu_rw_semaphore" in lock_type:
my_task_out.write("\n lock type : {}".format(lock_type))
lock_struct = ramdump.container_of(hl_instance, lock_type, 'dep_map')
my_task_out.write("\n lock addr : 0x{:x}".format(lock_struct))
writer = lock_struct + ramdump.field_offset("struct percpu_rw_semaphore", "writer")
my_task_out.write("\n writer : 0x{:x}".format(writer))
writer_task = ramdump.read_structure_field(writer, 'struct rcuwait', 'task')
my_task_out.write("\n writer task : 0x{:x}".format(writer_task))
if writer_task != 0:
my_task_out.write("\n the reader task [Process: {0}, Pid: {1}] is blocking the writer task [Process: {2}, Pid: {3}]".format(
cleanupString(ramdump.read_cstring(task + ramdump.field_offset("struct task_struct", "comm"), 16)),
ramdump.read_int(task + ramdump.field_offset("struct task_struct", "pid")),
cleanupString(ramdump.read_cstring(writer_task + ramdump.field_offset("struct task_struct", "comm"), 16)),
ramdump.read_int(writer_task + ramdump.field_offset("struct task_struct", "pid"))))
except Exception as err:
my_task_out.write("\nError encountered while resolving read lock ownership")
my_task_out.write("\n{}\n".format(err))
pass
hl_check = (hl_class_idx_full & 0x00040000) >> 18
hl_hardirqs_off = (hl_class_idx_full & 0x00080000) >> 19
hl_references = (hl_class_idx_full & 0xFFF00000) >> 20
hl_pin_count = ramdump.read_structure_field(held_lock_indx, 'struct held_lock', 'pin_count')
if (ramdump.is_config_defined('CONFIG_LOCKDEP_CROSSRELEASE')):
hl_gen_id = ramdump.read_structure_field(held_lock_indx, 'struct held_lock', 'gen_id')
hl_acquire_ip_name_func = 'n/a'
wname = ramdump.unwind_lookup(hl_acquire_ip)
if wname is not None:
hl_acquire_ip_name_func, a = wname
my_task_out.write(
'\nheld_locks[{0}] [0x{1:x}]:\
\n\tprev_chain_key = {2},\
\n\tacquire_ip = {3},\
\n\tacquire_ip_caller = {4},\
\n\tinstance = {5},\
\n\tnest_lock = {6}\
\n\tclass_idx = {7},\
\n\tirq_context = {8},\
\n\ttrylock = {9},\
\n\tread = {10},\
\n\tcheck = {11},\
\n\thardirqs_off = {12},\
\n\treferences = {13},\
\n\tpin_count = {14},\
\n\tname = {15},\
\n\tacquire_ip_func = {16}'.format(
i, held_lock_indx,
hex(hl_prev_chain_key),
hex(hl_acquire_ip),
hex(hl_acquire_ip_caller),
hex(hl_instance),
hex(hl_nest_lock),
hex(hl_class_idx),
hex(hl_irq_context),
hex(hl_trylock),
hex(hl_read),
hex(hl_check),
hex(hl_hardirqs_off),
hex(hl_references),
hex(hl_pin_count),
hl_name,
hl_acquire_ip_name_func))
if (ramdump.is_config_defined('CONFIG_LOCK_STAT')):
my_task_out.write(
'\n\twaittime_stamp = {0}s\
\n\tholdtime_stamp = {1}s'.format(
(hl_waittime_stamp / 1000000000.0),
(hl_holdtime_stamp / 1000000000.0)))
"""
#define LOCK_CONTENDED(_lock, try, lock) \
do { \
if (!try(_lock)) { \
lock_contended(&(_lock)->dep_map, _RET_IP_); \
lock(_lock); \
} \
lock_acquired(&(_lock)->dep_map, _RET_IP_); \
} while (0)
void __sched down_read(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
}
"""
# for rwlock_t and rw_semaphore, lock operation is expanded by macro LOCK_CONTENDED
# there are 3 cases: here assume holdtime_stamp value is initialized to 100 and waittime_stamp will be initialized to 0.
# 1. if success to acquire lock in try(_lock), waittime_stamp value won't be updated. also, holdtime_stamp will not be updated in lock_acquired().
# waittime_stamp: 0
# holdtime_stamp: 100
# 2. if fail to acquire lock in try(_lock), waittime_stamp will be updated in lock_contended(). here assume it's updated to 105.
# a. if success to acquire lock in lock(_lock), holdtime_stamp will be updated in lock_acquired(). here assume it's updated to 110.
# waittime_stamp: 105
# holdtime_stamp: 110
# b. if fail to acquire lock in lock(_lock), will stuck at lock(_lock).
# waittime_stamp: 105
# holdtime_stamp: 100
# based on above, we can say that if waittime_stamp is greater than holdtime_stamp, the lock is not acquired.
if lock_type and ('struct rwlock_t' in lock_type or 'struct rw_semaphore' in lock_type):
if hl_waittime_stamp > hl_holdtime_stamp:
lock_acquired = 0
else:
lock_acquired = 1
my_task_out.write(
'\n\tlock_type = {0}\
\n\tlock_acquired = {1}'.format(lock_type, lock_acquired))
if (ramdump.is_config_defined('CONFIG_LOCKDEP_CROSSRELEASE')):
my_task_out.write(
'\n\tgen_id = {0}'.format( hex(hl_gen_id)))
my_task_out.write('\n\n')
def parse_mytaskstruct(ramdump):
my_task_out = ramdump.open_file('lockdep.txt')
my_task_out.write('============================================\n')
task_comm_offset = ramdump.field_offset('struct task_struct', 'comm')
task_pid_offset = ramdump.field_offset('struct task_struct', 'pid')
for process in ramdump.for_each_process():
for task in ramdump.for_each_thread(process):
thread_comm = task + task_comm_offset
thread_task_name = cleanupString(ramdump.read_cstring(thread_comm, 16))
thread_pid = task + task_pid_offset
thread_task_pid = ramdump.read_int(thread_pid)
my_task_out.write('\nProcess: {0}, [Pid: {1} Task: 0x{2:x}]\n'.format(
thread_task_name, thread_task_pid, task))
parse_held_locks(ramdump, task, my_task_out)
my_task_out.write('============================================\n')
my_task_out.close()
print_out_str('----wrote lockdep held locks info')
@register_parser('--lockdep-heldlocks', 'Extract lockdep held locks info per task from ramdump')
class LockdepParser(RamParser):
def parse(self):
if (self.ramdump.is_config_defined('CONFIG_LOCKDEP')):
print_out_str('----dumping lockdep held locks info')
parse_mytaskstruct(self.ramdump)
else:
print_out_str('CONFIG_LOCKDEP not present')

View File

@@ -0,0 +1,378 @@
# Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser, cleanupString
from print_out import print_out_str
import struct
from parsers.properties import Properties
import traceback
from utasklib import UTaskLib
from utasklib import ProcessNotFoundExcetion
@register_parser('--logcat', 'Extract logcat logs from ramdump ')
class Logcat(RamParser):
LOGCAT_BIN = "logcat.bin"
def __init__(self, *args):
super(Logcat, self).__init__(*args)
self.f_path_offset = self.ramdump.field_offset('struct file', 'f_path')
self.dentry_offset = self.ramdump.field_offset('struct path', 'dentry')
self.d_iname_offset = self.ramdump.field_offset('struct dentry', 'd_iname')
self.limit_size = int("0x20000000", 16)
self.vma_list = []
def swap64(self, val):
return struct.unpack("<Q", struct.pack(">Q", val))[0]
def empty_vma_list(self):
for i in range(len(self.vma_list)):
del self.vma_list[0]
return
def get_logd_cnt_and_addr(self, logdmap):
logdcount = 0
logdaddr = 0
# 3 logd vm_area, then heap
# 4 logd + 1 bss, then heap in case of Android Q
while logdmap != 0:
tmpstartVm = self.ramdump.read_structure_field(
logdmap, 'struct vm_area_struct', 'vm_start')
file = self.ramdump.read_structure_field(
logdmap, 'struct vm_area_struct', 'vm_file')
if file != 0:
dentry = self.ramdump.read_word(file + self.f_path_offset +
self.dentry_offset)
file_name = cleanupString(self.ramdump.read_cstring(
dentry + self.d_iname_offset, 16))
if file_name == "logd":
logdcount = logdcount + 1
if logdcount == 1:
logdaddr = self.ramdump.read_structure_field(
logdmap, 'struct vm_area_struct', 'vm_start')
logdaddr = (logdaddr & 0xFFFF000000) >> 0x18
elif logdaddr != 0:
checkaddr = tmpstartVm >> 0x18
if checkaddr == logdaddr:
logdcount = logdcount + 1
logdmap = self.ramdump.read_structure_field(
logdmap, 'struct vm_area_struct', 'vm_next')
if logdcount < 3:
print_out_str("found logd region {0} is smaller than expected. set " \
"logdcount to 3".format(logdcount))
logdcount = 3
return logdcount, logdaddr
def flattened_range(self, mmap, logdcount, logdaddr):
min = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_start')
max = 0
count = 0
vma_info = {}
while mmap != 0:
tmpstartVm = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_start')
if count == logdcount:
min = tmpstartVm
count = count + 1 # do not enter here again
file = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_file')
if file != 0:
dentry = self.ramdump.read_word(file + self.f_path_offset +
self.dentry_offset)
file_name = cleanupString(self.ramdump.read_cstring(
dentry + self.d_iname_offset, 16))
if file_name == "logd":
count = count + 1
if file_name.find("linker") == 0:
va_end = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_end')
if va_end > max:
max = va_end
elif logdaddr != 0:
checkaddr = (tmpstartVm) >> 0x18
if checkaddr == logdaddr:
count = count + 1
mmap = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_next')
size = max - min
if size > self.limit_size:
size = self.limit_size
vma_info['header'] = None
vma_info['start'] = min
vma_info['size'] = size
self.vma_list.append(vma_info)
return
def scattered_range(self, mmap, logdcount, logdaddr):
min = 0
max = 0
count = 0
prev_vmstart = 0
prev_vmend = 0
total_size = 0
store_offset = 0
meta_size = 32
magic = 0xCECEC0DE
while mmap != 0:
vm_start = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_start')
vm_end = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_end')
vm_flags = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_flags')
vm_file = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_file')
set_min = False
file_name = None
if vm_file != 0:
dentry = self.ramdump.read_word(vm_file + self.f_path_offset +
self.dentry_offset)
file_name = cleanupString(self.ramdump.read_cstring(
dentry + self.d_iname_offset, 16))
if count == logdcount:
if vm_file != 0 or vm_flags & 0x3 != 0x3:
mmap = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_next')
continue
min = vm_start
count = count + 1 # do not enter here again
elif count < logdcount:
if file_name == "logd":
count = count + 1
elif logdaddr != 0:
checkaddr = (vm_start) >> 0x18
if checkaddr == logdaddr:
count = count + 1
else:
if min == 0:
if vm_file == 0 and vm_flags & 0x3 == 0x3:
min = vm_start
mmap = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_next')
if mmap != 0:
prev_vmstart = vm_start
prev_vmend = vm_end
continue
if vm_file != 0 or vm_flags & 0x3 != 0x3:
max = prev_vmend
elif prev_vmend != 0 and prev_vmend != vm_start:
max = prev_vmend
set_min = True
elif prev_vmend != 0 and prev_vmend == vm_start:
max = 0
mmap = self.ramdump.read_structure_field(
mmap, 'struct vm_area_struct', 'vm_next')
if mmap != 0:
prev_vmstart = vm_start
prev_vmend = vm_end
if min != 0 and max != 0 and min < max:
vma_info = {}
size = max - min
total_size = total_size + size
vma_info['header'] = "{0:016x}{1:016x}{2:016x}{3:016x}".format(
self.swap64(magic), self.swap64(min), self.swap64(size),
self.swap64(store_offset + meta_size))
vma_info['start'] = min
vma_info['size'] = size
self.vma_list.append(vma_info)
store_offset = store_offset + size + meta_size
min = 0
max = 0
if set_min is True:
min = vm_start
if mmap == 0: #last
if vm_start != 0 and prev_vmend != vm_start and vm_file == 0 and vm_flags & 0x3 == 0x3:
min = vm_start
max = vm_end
elif vm_start != 0 and prev_vmend == vm_start:
max = vm_end
if min != 0 and max != 0 and min < max:
vma_info = {}
size = max - min
total_size = total_size + size
vma_info['header'] = "{0:016x}{1:016x}{2:016x}{3:016x}".format(
self.swap64(magic), self.swap64(min), self.swap64(size),
self.swap64(store_offset + meta_size))
vma_info['start'] = min
vma_info['size'] = size
self.vma_list.append(vma_info)
store_offset = store_offset + size + meta_size
if total_size > self.limit_size:
print_out_str("size({0:d}) is too big".format(total_size))
self.empty_vma_list()
return
def get_range(self, mmap, logdcount, logdaddr):
if self.ramdump.arm64:
self.scattered_range(mmap, logdcount, logdaddr)
else:
self.flattened_range(mmap, logdcount, logdaddr)
return
def generate_bin(self, mmu):
self.ramdump.remove_file(self.LOGCAT_BIN)
if len(self.vma_list) == 0:
print_out_str("Failed to generate "+self.LOGCAT_BIN)
else:
print_out_str(self.LOGCAT_BIN+" base address is {0:x}".format(self.vma_list[0]['start']))
with self.ramdump.open_file(self.LOGCAT_BIN, 'ab') as out_file:
for vma_info in self.vma_list:
min = vma_info['start']
size = vma_info['size']
header = vma_info['header']
if header is not None:
header = bytearray.fromhex(header)
out_file.write(header)
max = min + size
while(min < max):
phys = mmu.virt_to_phys(min)
if phys is None:
min = min + 0x1000
out_file.write(b'\x00' * 0x1000)
continue
out_file.write(self.ramdump.read_physical(phys, 0x1000))
min = min + 0x1000
return
def generate_logcat_bin(self, taskinfo):
'''
generate logcat.bin for the older android version
param taskinfo: utasklib.UTaskInfo
'''
meta_size = 32
magic = 0xCECEC0DE
store_offset = 0
mm_offset = self.ramdump.field_offset('struct task_struct', 'mm')
mm_addr = self.ramdump.read_word(taskinfo.task_addr + mm_offset)
mmap = self.ramdump.read_structure_field(mm_addr, 'struct mm_struct',
'mmap')
if mmap:
logdcount, logdaddr = self.get_logd_cnt_and_addr(mmap)
self.get_range(mmap, logdcount, logdaddr)
self.generate_bin(taskinfo.mmu)
else:
for vma in taskinfo.vmalist:
if vma.file != 0 or vma.flags & 0b11 != 0b11:
continue
vma_info = {}
size = vma.vm_end - vma.vm_start
vma_info['header'] = "{0:016x}{1:016x}{2:016x}{3:016x}".format(
self.swap64(magic), self.swap64(vma.vm_start), self.swap64(size),
self.swap64(store_offset + meta_size))
vma_info['start'] = vma.vm_start
vma_info['size'] = size
self.vma_list.append(vma_info)
store_offset = store_offset + size + meta_size
self.generate_bin(taskinfo.mmu)
def is_LE_process(self, taskinfo):
for vma in taskinfo.vmalist:
if vma.file_name == "liblog.so.0.0.0":
return True
return False
def is_openwrt_process(self, taskinfo):
for vma in taskinfo.vmalist:
if "libubus.so." in vma.file_name:
return True
return False
def parse(self):
if self.ramdump.logcat_limit_time == 0:
self.__parse()
else:
from func_timeout import func_timeout
print_out_str("Limit logcat parser running time to {}s".format(self.ramdump.logcat_limit_time))
func_timeout(self.ramdump.logcat_limit_time, self.__parse)
def __parse(self):
try:
try:
taskinfo = UTaskLib(self.ramdump).get_utask_info("logd")
except ProcessNotFoundExcetion:
print_out_str("logd process is not started")
return
propertyParser = Properties(self.ramdump)
ver = -1
try:
# generate system/vendor properties to Properties.txt
propertyParser.parse()
for name, value in propertyParser.proplist:
if name == "ro.build.version.sdk" or name == "ro.vndk.version":
ver = int(value)
except:
ver = -1
print_out_str("Current sdk version is "+ str(ver))
if ver >= 31: # Android S
from parsers.logcat_v3 import Logcat_v3
logcat = Logcat_v3(self.ramdump, taskinfo)
try:
is_success = logcat.parse()
except Exception as e:
is_success = False
print_out_str("logcat_v3 parser failed " + str(e))
traceback.print_exc()
if is_success:
print_out_str("logcat_v3 parse logcat success")
return
try:
from parsers.logcat_v3 import Logcat_vma
logcat = Logcat_vma(self.ramdump, taskinfo)
is_success = logcat.parse()
except Exception as e:
is_success = False
print_out_str("logcat_vma parser failed" + str(e))
traceback.print_exc()
if is_success:
print_out_str("logcat_vma parse logcat success")
else:
# generate logcat.bin when both logcat_v3 and logcat_vma parse failed
self.generate_logcat_bin(taskinfo)
elif self.is_LE_process(taskinfo):
print_out_str("LE ramdump")
from parsers.logcat_m import Logcat_m
#parser to supprot Android M
logcat = Logcat_m(self.ramdump, taskinfo)
logcat.parse()
elif self.is_openwrt_process(taskinfo):
print_out_str("Openwrt ramdump")
from parsers.logcat_openwrt import Logcat_openwrt
#parser to supprot openwrt platform
logcat = Logcat_openwrt(self.ramdump, taskinfo)
logcat.parse()
else:
self.generate_logcat_bin(taskinfo)
except Exception as result:
print_out_str(str(result))
traceback.print_exc()

View File

@@ -0,0 +1,261 @@
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause-Clear
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser, cleanupString
from print_out import print_out_str
import struct
from utasklib import UTaskLib
from utasklib import ProcessNotFoundExcetion
import datetime
class Logcat_m(RamParser):
'''
Logcat parser to support extract logcat from Androd M ram dump
'''
LOG_NAME = [ "main", "radio", "events", "system", "crash", "kernel"]
#log id
LOG_ID_MIN = 0
LOG_ID_MAIN = 0
LOG_ID_RADIO = 1
LOG_ID_EVENTS = 2
LOG_ID_SYSTEM = 3
LOG_ID_CRASH = 4
LOG_ID_KERNEL = 5
LOG_ID_MAX = 5
#log level
ANDROID_LOG_DEFAULT =1
ANDROID_LOG_VERBOSE = 2
ANDROID_LOG_DEBUG = 3
ANDROID_LOG_INFO = 4
ANDROID_LOG_WARN = 5
ANDROID_LOG_ERROR = 6
ANDROID_LOG_FATAL = 7
ANDROID_LOG_SILENT = 8
def __init__(self, ramdump, logd_task):
super().__init__(ramdump)
self.vmas = []
self.logd_task = logd_task
if self.ramdump.arm64:
self.addr_length = 8
else:
self.addr_length = 4
def find_logbuf_elem_list_addr(self, vma):
vma_size = vma["size"]
vma_data = vma["data"]
offset = 0
is_chunk = False
while offset < vma_size:
is_chunk = self.is_logbuf_elem_list_addr(vma_data, offset)
if is_chunk:
break
offset = offset + 4
if is_chunk:
return offset
return 0
def parse(self):
startTime = datetime.datetime.now()
self.get_vmas_with_rw()
# find address of std::list<LogBufferElement *> mLogElements
logbuf_addr = 0
for vma in self.vmas:
offset = self.find_logbuf_elem_list_addr(vma)
if offset != 0:
logbuf_addr = vma["vmstart"]+offset
print("found address",hex(logbuf_addr))
break
if logbuf_addr:
print_out_str("LogBuffer address 0x%x"% logbuf_addr)
self.process_logbuf_and_save(logbuf_addr)
print_out_str("Logcat parse cost "+str((datetime.datetime.now()-startTime).total_seconds())+" s")
else:
print_out_str("LogBuffer address was not found")
def is_logbuf_elem_list_addr(self, vma_data, offset):
if offset+24 > len(vma_data):
return False
nodes = struct.unpack('<QQQ', vma_data[offset:offset+24])
tail_node_addr = nodes[0]
head_node_addr = nodes[1]
list_count = nodes[2]
if tail_node_addr ==0 or head_node_addr ==0:
return False
next_node_addr = head_node_addr
prev_node = self.read_bytes(head_node_addr, self.addr_length) # prev_node = next_node_addr->prev
if prev_node == 0:
return False
if list_count == 0 and head_node_addr == tail_node_addr: # empty list
return False
index = 0
while next_node_addr != 0 and index < list_count:
next_prev_node = self.read_bytes(next_node_addr, self.addr_length)
if not next_prev_node or next_prev_node != prev_node:
return False
current_node_addr = next_node_addr + self.addr_length *2
current_node = self.read_bytes(current_node_addr, self.addr_length)
is_chunk = self.is_log_elem(current_node)
if not is_chunk:
return False
if next_node_addr == tail_node_addr: # loop complete
return True
prev_node = next_node_addr
next_node_addr = self.read_bytes(next_node_addr + self.addr_length, self.addr_length)
index = index +1
return False
def is_log_elem(self, addr):
log_id = self.read_bytes(addr + 0x8, 4)
uid = self.read_bytes(addr + 0xc, 4)
pid = self.read_bytes(addr + 0x10, 4)
msg_addr = self.read_bytes(addr + 0x18, 8)
msg_len = self.read_bytes(addr + 0x20, 2)
dropped = msg_len
if log_id < 0 or log_id > self.LOG_ID_MAX:
return False
if msg_len < 1 or msg_len > 4068: # max_payload
return False
if (pid < 0 or pid >= 65536) or (uid < 0 or uid >= 65536):
return False
if (log_id == 2) or (dropped == 1):
return True
else:
priority = self.read_bytes(msg_addr, 1)
if priority > self.ANDROID_LOG_SILENT or priority < self.ANDROID_LOG_DEFAULT:
return False
return True
def process_logbuf_and_save(self, logbuf_addr):
head_node_addr = self.read_bytes(logbuf_addr + self.addr_length, self.addr_length)
list_count = self.read_bytes(logbuf_addr + self.addr_length*2, self.addr_length)
next_node_addr = head_node_addr
index = 0
log_file = [0] * (self.LOG_ID_MAX + 1)
while index <= self.LOG_ID_MAX:
log_file[index] = self.ramdump.open_file(self.get_output_filename(index))
index = index + 1
index = 0
log_list = []
while next_node_addr != 0 and index < list_count:
current_node = self.read_bytes(next_node_addr + self.addr_length *2, self.addr_length)
self.save_log_elem(current_node, log_list)
next_node_addr = self.read_bytes(next_node_addr + self.addr_length, self.addr_length)
index = index +1
log_list.sort(key=lambda x: float(x['timestamp']))
for item in log_list:
log_file[item["logid"]].write(item["msg"])
index = 0
while index <= self.LOG_ID_MAX:
log_file[index].close()
index = index + 1
return
def save_log_elem(self, addr, log_list):
log_id = self.read_bytes(addr + 0x8, 4)
uid = self.read_bytes(addr + 0xc, 4)
pid = self.read_bytes(addr + 0x10, 4)
tid = self.read_bytes(addr + 0x14, 4)
msg_addr = self.read_bytes(addr + 0x18, 8)
msg_len = self.read_bytes(addr + 0x20, 2)
dropped = msg_len
tv_sec = self.read_bytes(addr + 0x30, 4)
tv_nsec = self.read_bytes(addr + 0x34, 4)
if (log_id == 2) or (dropped == 1):
return
if msg_len == 0:
return
priority = self.read_bytes(msg_addr, 1)
msg = self.read_binary(msg_addr + 1, msg_len - 1)
if not msg:
return
msgList = msg.decode('ascii', 'ignore').split('\0')
timestamp = self.format_time(tv_sec, tv_nsec)
if len(msgList) >= 2:
time_f = float("{}.{}".format(tv_sec,tv_nsec))
msg = "%s %5d %5d %5d %c %-8.*s: %s\n" % (timestamp, uid, pid, tid,
self.filter_pri_to_char(priority), len(msgList[0]),
cleanupString(msgList[0].strip()),
cleanupString(msgList[1].strip()))
log = {}
log['timestamp'] = time_f
log['msg'] = msg
log["logid"] = log_id
log_list.append(log)
def filter_pri_to_char(self, pri) :
if pri == self.ANDROID_LOG_VERBOSE:
return 'V'
elif pri == self.ANDROID_LOG_DEBUG:
return 'D'
elif pri == self.ANDROID_LOG_INFO:
return 'I'
elif pri == self.ANDROID_LOG_WARN:
return 'W'
elif pri == self.ANDROID_LOG_ERROR:
return 'E'
elif pri == self.ANDROID_LOG_FATAL:
return 'F'
elif pri == self.ANDROID_LOG_SILENT:
return 'S'
else:
return '?'
def get_vmas_with_rw(self):
'''
return vma list with read+write permissions
'''
for vma in self.logd_task.vmalist:
if vma.flags & 0b11 != 0b11:
continue
item = {}
item["vmstart"] = vma.vm_start
item["size"] = vma.vm_end - vma.vm_start
item["data"] = self.read_binary(item["vmstart"], item["size"])
self.vmas.append(item)
def read_bytes(self, addr, len):
val = UTaskLib.read_bytes(self.ramdump, self.logd_task.mmu, addr, len)
if not val:
val = 0
return val
def read_binary(self, addr, len):
return UTaskLib.read_binary(self.ramdump, self.logd_task.mmu, addr, len)
def get_output_filename(self, log_id):
if log_id >= self.LOG_ID_MIN and log_id <= self.LOG_ID_MAX:
return "Logcat_{}.txt".format(self.LOG_NAME[log_id])
else:
return None
def format_time(self, tv_sec, tv_nsec):
tv_nsec = str(tv_nsec // 1000000)
tv_nsec = str(tv_nsec).zfill(3)
date = datetime.datetime.utcfromtimestamp(tv_sec)
timestamp = date.strftime("%m-%d %H:%M:%S") + '.' + tv_nsec
return timestamp

View File

@@ -0,0 +1,261 @@
# Copyright (C) 2013 Felix Fietkau <nbd@openwrt.org>
# Copyright (C) 2013 John Crispin <blogic@openwrt.org>
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import RamParser
from print_out import print_out_str
from utasklib import UTaskLib
import datetime
import logging
import os
class Logcat_openwrt(RamParser):
'''
Logcat parser to support extract logcat from Openwrt platform
'''
LOG_EMERG = 0
LOG_ALERT = 1
LOG_CRIT = 2
LOG_ERR = 3
LOG_WARNING = 4
LOG_NOTICE = 5
LOG_INFO = 6
LOG_DEBUG = 7
LOG_PRIMASK = 7
INTERNAL_NOPRI = 0x10
LOG_KERN = (0 << 3)
LOG_USER = (1 << 3)
LOG_MAIL = (2 << 3)
LOG_DAEMON = (3 << 3)
LOG_AUTH = (4 << 3)
LOG_SYSLOG = (5 << 3)
LOG_LPR = (6 << 3)
LOG_NEWS = (7 << 3)
LOG_UUCP = (8 << 3)
LOG_CRON = (9 << 3)
LOG_AUTHPRIV = (10 << 3)
LOG_FTP = (11 << 3)
LOG_LOCAL0 = (16 << 3)
LOG_LOCAL1 = (17 << 3)
LOG_LOCAL2 = (18 << 3)
LOG_LOCAL3 = (19 << 3)
LOG_LOCAL4 = (20 << 3)
LOG_LOCAL5 = (21 << 3)
LOG_LOCAL6 = (22 << 3)
LOG_LOCAL7 = (23 << 3)
LOG_NFACILITIES = 24
INTERNAL_MARK = (LOG_NFACILITIES << 3)
LOG_PID = 0x01
LOG_CONS = 0x02
LOG_ODELAY = 0x04
LOG_NDELAY = 0x08
LOG_NOWAIT = 0x10
LOG_PERROR = 0x20
LOG_FACMASK = 0x3f8
prioritynames = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR,
"info": LOG_INFO,
"none": INTERNAL_NOPRI,
"notice": LOG_NOTICE,
"panic": LOG_EMERG,
"warn": LOG_WARNING,
"warning": LOG_WARNING,
"0": -1
}
facilitynames = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"mark": INTERNAL_MARK,
"news": LOG_NEWS,
"security": LOG_AUTH,
"sysLOG": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
"0": -1
}
def __init__(self, ramdump, logd_task):
super().__init__(ramdump)
self.logd_task = logd_task
if self.ramdump.arm64:
self.addr_length = 8
else:
self.addr_length = 4
self.SIZEOF_LOG_HEAD = 32
# logger init
self.logger = logging.getLogger(__name__)
path = os.path.join(self.ramdump.outdir, 'logcat_debug_log.txt')
self.logger.addHandler(logging.FileHandler(path, mode='w'))
self.logger.setLevel(logging.INFO)
def LOG_PRI(self, p):
return (p) & self.LOG_PRIMASK
def LOG_FAC(self, p):
return ((p) & self.LOG_FACMASK) >> 3
def getcodetext(self, value, codetable):
for name, val in codetable.items():
if val == value:
return name
return "<unknown>"
def PAD(self, x):
return (((x) - (x % 4)) + 4) if (x % 4) else (x)
def log_next(self, log_head_addr, size):
n = log_head_addr + self.SIZEOF_LOG_HEAD + self.PAD(size)
return self.log_start_addr if n > self.log_end_addr else n
def log_list(self):
h = self.log_oldest_addr
while h != self.log_newest_addr and h <= self.log_end_addr and h >= self.log_start_addr:
size = self.read_bytes(h, 4)
if size > 1024:
self.logger.warning("invalid size(%x), expected <= 1024" % size)
break
yield h
# read next log element
h = self.log_next(h, size)
size = self.read_bytes(h, 4)
if size == 0 and h > self.log_newest_addr: # reach to end of buffer
h = self.log_start_addr
def process_logbuf_and_save(self):
log_file = self.ramdump.open_file("Logcat_openwrt.txt")
for log_head_addr in self.log_list():
size = self.read_bytes(log_head_addr, 4)
id = self.read_bytes(log_head_addr + 4, 4)
priority = self.read_bytes(log_head_addr + 8, 4)
source = self.read_bytes(log_head_addr + 12, 4)
tv_sec = self.read_bytes(log_head_addr + 16, 8)
tv_nsec = self.read_bytes(log_head_addr + 24, 8)
timestamp = self.format_time(tv_sec, tv_nsec)
msg = self.read_binary(log_head_addr + self.SIZEOF_LOG_HEAD, size)
msg = msg.decode('ascii', 'ignore').strip('\0')
fmt_msg = "%s %s.%s %s %s\n" % (timestamp,
self.getcodetext(self.LOG_FAC(priority) << 3, self.facilitynames),
self.getcodetext(self.LOG_PRI(priority), self.prioritynames),
"" if source else (" kernel:"), msg)
log_file.write(fmt_msg)
def read_bytes(self, addr, len):
val = UTaskLib.read_bytes(self.ramdump, self.logd_task.mmu, addr, len)
if not val:
val = 0
return val
def read_binary(self, addr, len):
return UTaskLib.read_binary(self.ramdump, self.logd_task.mmu, addr, len)
def format_time(self, tv_sec, tv_nsec):
tv_sec = tv_sec & 0xffffffff
tv_nsec = str(tv_nsec // 1000000)
tv_nsec = str(tv_nsec).zfill(3)
date = datetime.datetime.utcfromtimestamp(tv_sec)
timestamp = date.strftime("%m-%d %H:%M:%S") + '.' + tv_nsec
return timestamp
def is_valid_logd_addr(self, addr):
try:
self.log_start_addr = self.read_bytes(addr + self.addr_length, self.addr_length)
self.log_end_addr = self.read_bytes(addr, self.addr_length)
self.log_oldest_addr = self.read_bytes(addr + 11 * self.addr_length, self.addr_length)
self.log_newest_addr = self.read_bytes(addr + 10 * self.addr_length, self.addr_length)
if not self.log_start_addr or not self.log_end_addr \
or not self.log_oldest_addr or not self.log_newest_addr:
self.logger.debug("Can't find log/log_end/oldest/newest address, LogBuffer address was not found on addr=0x%x" % addr)
return False
if self.log_start_addr < self.log_end_addr and \
self.log_start_addr <= self.log_oldest_addr <= self.log_end_addr and \
self.log_start_addr <= self.log_newest_addr <= self.log_end_addr:
self.logger.debug("log_start 0x%x log_end 0x%x log_oldest 0x%x log_newest 0x%x" % (
self.log_start_addr, self.log_end_addr, self.log_oldest_addr, self.log_newest_addr))
## check buffer size is valid
buffer_size = int((self.log_end_addr - self.log_start_addr)/1024)
if buffer_size > 1024:
return False
print_out_str("log buffer size %d k" % buffer_size)
return True
else:
self.logger.debug("invalid address--> log_start 0x%x log_end 0x%x log_oldest 0x%x log_newest 0x%x" % (
self.log_start_addr, self.log_end_addr, self.log_oldest_addr, self.log_newest_addr))
return False
except:
return False
def parse(self):
startTime = datetime.datetime.now()
data_start = 0
for vma in self.logd_task.vmalist:
if vma.file_name == "logd" and vma.flags & 0b11 == 0b11:
data_start = vma.vm_start
break
if not data_start:
print_out_str("logd process didn't have data section, LogBuffer address was not found")
return
if self.ramdump.arm64:
log_end_offset = 0x328
addr_length = 8
else:
log_end_offset = 0x1e0
addr_length = 4
valid = self.is_valid_logd_addr(vma.vm_start + log_end_offset)
if not valid:
## continue to search
print_out_str("log address was not found with offset 0x%x, go through whole vma" % log_end_offset)
addr = vma.vm_start
while addr < (vma.vm_end):
valid = self.is_valid_logd_addr(addr)
if valid:
print_out_str("found logd addr offset is 0x%x" % (addr - vma.vm_start))
break
addr += addr_length
if valid:
self.process_logbuf_and_save()
print_out_str("Logcat parse cost " + str((datetime.datetime.now() - startTime).total_seconds()) + " s")

View File

@@ -0,0 +1,322 @@
# Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser, cleanupString
from mmu import Armv8MMU, Armv7MMU
from print_out import print_out_str
import struct
import linecache
import datetime
import os
import subprocess
def read_bytes(mmu, ramdump, addr, len):
addr = mmu.virt_to_phys(addr)
s = ramdump.read_physical(addr, len)
if (s is None) or (s == ''):
return None
if len == 8:
s = struct.unpack('<Q', s)
elif len == 4:
s = struct.unpack('<I', s)
elif len == 2:
s = struct.unpack('<H', s)
elif len == 1:
s = struct.unpack('<B', s)
else:
print_out_str("This api used to unpack 1/2/4/8 bytes data, check the len\n")
exit()
return s[0]
def parse_logBuf_offset(ramdump):
space = " "
bss_sec_start = 0
logBuf_addr = 0
file_name = "symbols/logd"
objdump_path = ramdump.objdump_path
option_header = "-h"
option_symbols = "-t"
logd_file_path = os.path.join(ramdump.outdir, file_name)
if not os.path.exists(logd_file_path):
logd_tmp = ramdump.vmlinux.split("product")
product_tmp = logd_tmp[1].split("\\")
logd_file_path = logd_tmp[0] + "product/" + product_tmp[1] + "\\" + "symbols/system/bin/logd"
if not os.path.exists(logd_file_path):
logd_tmp = ramdump.vmlinux.split("product")
## android Q
logd_file_path = logd_tmp[0] + "product/" + product_tmp[0] + "\\" + "symbols/system/bin/logd"
if not os.path.exists(logd_file_path):
##android R
logd_file_path = logd_tmp[0] + "product/" + "qssi/symbols/system/bin/logd"
if os.path.exists(logd_file_path):
print_out_str("logd_file_path = %s\n" %(logd_file_path))
cmd = objdump_path + space + option_header + space + logd_file_path
objdump = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
out, err = objdump.communicate()
## merge the spaces
out = ' '.join(out.split())
## split by space
data = out.split(space)
i = 0
while True:
if i >=len(data):
break
if ".bss" in data[i]:
bss_sec_start = int(data[i+2], 16)
break
i = i + 1
if bss_sec_start == 0:
print_out_str("bss section not found in the logd")
exit()
cmd = objdump_path + space + option_symbols + space + logd_file_path
objdump = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
out, err = objdump.communicate()
## merge the spaces
out = ' '.join(out.split())
## split by space
data = out.split(space)
i = 0
while True:
if i >=len(data):
break
if ("logBuf" in data[i]) and ("ZL" in data[i]):
logBuf_addr = int(data[i-5], 16)
break
i = i + 1
if logBuf_addr == 0:
print_out_str("logBuf not found in logd\n")
exit()
else:
print_out_str("%s is not found. you can copy the logd symbols file to here.\n" %(logd_file_path));
return logBuf_addr - bss_sec_start;
def parse_logcat_v2(ramdump):
timestamp="timestamp"
MSG="msg"
uid="uid"
pid="pid"
tid="tid"
log_type="log_type"
priority="level"
logcat_file_list = ["logcat_v2_all.txt", "logcat_v2_main.txt", "logcat_v2_radio.txt", "logcat_v2_event.txt", "logcat_v2_system.txt",
"logcat_v2_crash.txt", "logcat_v2_stats.txt", "logcat_v2_security.txt", "logcat_v2_kernel.txt"]
log_type_list = ["0:main", "1:radio", "2:event", "3:system", "4:crash", "5:stats", "6:security", "7:kernel", "unknown"]
priority_list = ["0:UNKNOWN", "1:DEFAULT", "2:VERBOSE", "3:DEBUG", "4:INFO", "5:WARN", "6:ERR", "7:FATAL", "8:SILENT", "wrong priority"]
logcat_file_id = 0
log_id_max = 7
priority_max = 8
offset_comm = ramdump.field_offset('struct task_struct', 'comm')
mm_offset = ramdump.field_offset('struct task_struct', 'mm')
f_path_offset = ramdump.field_offset('struct file', 'f_path')
dentry_offset = ramdump.field_offset('struct path', 'dentry')
d_iname_offset = ramdump.field_offset('struct dentry', 'd_iname')
if ramdump.arm64:
addr_length = 8
else:
addr_length = 4
first_node_offset = addr_length * 2
next_node_offset = addr_length
element_obj_offset = addr_length * 2
msg_addr_offset = 0x14
msg_size_offset = msg_addr_offset + addr_length
logdaddr = 0
bss_start = 0
for task in ramdump.for_each_process():
task_name = task + offset_comm
task_name = cleanupString(ramdump.read_cstring(task_name, 16))
if task_name == 'logd':
mm_addr = ramdump.read_word(task + mm_offset)
mmap = ramdump.read_structure_field(mm_addr, 'struct mm_struct', 'mmap')
pgd = ramdump.read_structure_field(mm_addr, 'struct mm_struct', 'pgd')
pgdp = ramdump.virt_to_phys(pgd)
start_data = ramdump.read_structure_field(mm_addr, 'struct mm_struct', 'start_data')
end_data = ramdump.read_structure_field(mm_addr, 'struct mm_struct', 'end_data')
logdmap = mmap
# bss section is after data section
logd_count = 0
while logdmap != 0:
tmpstartVm = ramdump.read_structure_field(logdmap, 'struct vm_area_struct', 'vm_start')
tmpendVm = ramdump.read_structure_field(logdmap, 'struct vm_area_struct', 'vm_end')
logd_count = logd_count + 1
if (end_data > tmpstartVm) and (end_data < tmpendVm):
# android P and older : 3 logd vma, bss section is just after end_data
if logd_count < 3:
# data section is 4bytes align while bss section is 8 bytes align
bss_start = (end_data + 7) & 0x000000fffffffff8
else:
# android Q: 2 code vma + 2 data vma + 1bss, bss section is individual vma after end_data
if (start_data < tmpstartVm):
logdmap = ramdump.read_structure_field(logdmap, 'struct vm_area_struct', 'vm_next')
bss_start = ramdump.read_structure_field(logdmap, 'struct vm_area_struct', 'vm_start')
else:
# android R: 3 code vma and 1 data+bss vma, bss section is just after end_data, data section is addr_length align and bss section is 8 bytes align
if ramdump.arm64:
bss_start = end_data
else:
bss_start = (end_data + 7) & 0xfffffff8
first_node_offset = addr_length
print_out_str("bss_start: 0x%x\n" %(bss_start))
break
logdmap = ramdump.read_structure_field(logdmap, 'struct vm_area_struct', 'vm_next')
break
if ramdump.arm64:
mmu = Armv8MMU(ramdump, pgdp)
else:
mmu = Armv7MMU(ramdump, pgdp)
logbuf_offset = parse_logBuf_offset(ramdump);
print_out_str("logbuf_offset = 0x%x" %(logbuf_offset))
logbuf_addr = read_bytes(mmu, ramdump, bss_start + logbuf_offset, addr_length)
print_out_str("logbuf_addr = 0x%x" %(logbuf_addr))
first_node_addr = read_bytes(mmu, ramdump, logbuf_addr + first_node_offset, addr_length)
next_node_addr = first_node_addr
if next_node_addr is not None:
print_out_str("first_node_addr = 0x%x\n" %(next_node_addr))
index = 0
log_file = [0] * 9
while index < 9:
log_file[index] = ramdump.open_file(logcat_file_list[index])
log_file[index].write("\n\n========== PARSE START ==========\n\n\n")
log_file[index].write(timestamp.ljust(32))
log_file[index].write(uid.rjust(8))
log_file[index].write(pid.rjust(8))
log_file[index].write(tid.rjust(8))
log_file[index].write("\t")
log_file[index].write(log_type.ljust(16))
log_file[index].write(priority.center(16))
log_file[index].write(MSG.ljust(8))
log_file[index].write("\n")
index = index + 1
while next_node_addr != 0:
logbuffer_ele_obj = read_bytes(mmu, ramdump, next_node_addr + element_obj_offset, addr_length)
next_node_addr = read_bytes(mmu, ramdump, next_node_addr + next_node_offset, addr_length)
if next_node_addr == first_node_addr:
break
logbuffer_ele_obj_phys = mmu.virt_to_phys(logbuffer_ele_obj)
## uid pid tid and etc parsed from LogBufferElement
uid = read_bytes(mmu, ramdump, logbuffer_ele_obj, 4)
pid = read_bytes(mmu, ramdump, logbuffer_ele_obj + 0x4, 4)
tid = read_bytes(mmu, ramdump, logbuffer_ele_obj + 0x8, 4)
tv_second = read_bytes(mmu, ramdump, logbuffer_ele_obj + 0xC, 4)
tv_second_nano = read_bytes(mmu, ramdump, logbuffer_ele_obj + 0x10, 4)
msg_addr = read_bytes(mmu, ramdump, logbuffer_ele_obj + msg_addr_offset, addr_length)
msg_size = read_bytes(mmu, ramdump, logbuffer_ele_obj + msg_size_offset, 2)
log_id = read_bytes(mmu, ramdump, logbuffer_ele_obj + msg_size_offset + 0x2, 1)
dropped = read_bytes(mmu, ramdump, logbuffer_ele_obj + msg_size_offset + 0x3, 1)
if (log_id == 2) or (dropped == 1) :
continue
if logbuffer_ele_obj_phys is None or msg_size == 0:
break
uid = str(uid)
pid = str(pid)
tid = str(tid)
tv_second_nano = str(tv_second_nano // 1000)
tv_second_nano = str(tv_second_nano).zfill(6)
date = datetime.datetime.utcfromtimestamp(tv_second)
timestamp = date.strftime("%Y-%m-%d %H:%M:%S") + '.' + tv_second_nano
min = 0
x = 0
level = -1
MSG = ""
while msg_size != 0:
i = 0
msg_addr = msg_addr + min
# msg located in the same page
if msg_size < (0x1000 - msg_addr % 0x1000):
min = msg_size
# msg separated in two pages
else:
min = 0x1000 - msg_addr % 0x1000
msg_size = msg_size - min
msg_addr_phys = mmu.virt_to_phys(msg_addr)
if msg_addr_phys is None:
break
if x == 0:
## level is at the first Byte of MSG
level = read_bytes(mmu, ramdump, msg_addr, 1)
MSG_tmp = ramdump.read_physical(msg_addr_phys + 1, min - 1)
else:
if min == 0:
break
else:
MSG_tmp = ramdump.read_physical(msg_addr_phys, min)
MSG_ascii = list(MSG_tmp)
while i < len(MSG_ascii):
MSG_ascii[i] = chr(MSG_tmp[i] & 0x7F)
i = i + 1
MSG = MSG + ''.join(MSG_ascii)
MSG = MSG.replace('\000', '\040')
MSG = MSG.replace('\015', '\040')
MSG = MSG.replace('\012', '\040')
x = x + 1
if (log_id > log_id_max) or (log_id < 0):
continue
else:
logcat_file_id = log_id + 1
if (level > priority_max) or (level < 0):
level = priority_max + 1
log_type = log_type_list[log_id]
priority = priority_list[level]
log_file[0].write(timestamp.ljust(32))
log_file[0].write(uid.rjust(8))
log_file[0].write(pid.rjust(8))
log_file[0].write(tid.rjust(8))
log_file[0].write("\t")
log_file[0].write(log_type.ljust(16))
log_file[0].write(priority.center(16))
log_file[0].write(MSG)
log_file[0].write("\n")
log_file[logcat_file_id].write(timestamp.ljust(32))
log_file[logcat_file_id].write(uid.rjust(8))
log_file[logcat_file_id].write(pid.rjust(8))
log_file[logcat_file_id].write(tid.rjust(8))
log_file[logcat_file_id].write("\t")
log_file[logcat_file_id].write(log_type.ljust(16))
log_file[logcat_file_id].write(priority.center(16))
log_file[logcat_file_id].write(MSG)
log_file[logcat_file_id].write("\n")
if next_node_addr == first_node_addr:
break
index = 0
while index < 9:
log_file[index].close()
index = index + 1
return
@register_parser('--logcat-v2', 'Extract logcat logs from ramdump ')
class Logcat_v2(RamParser):
def parse(self):
parse_logcat_v2(self.ramdump)

View File

@@ -0,0 +1,936 @@
# Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import RamParser, cleanupString
from print_out import print_out_str
import struct
import datetime
import dmesglib
import print_out
from parsers.zram import Zram
from utasklib import UTaskLib
from concurrent import futures
import traceback
class Constants:
LOG_NAME = [ "main", "radio", "events", "system", "crash", "stats", "security", "kernel"]
#log id
LOG_ID_MIN = 0
LOG_ID_MAIN = 0
LOG_ID_RADIO = 1
LOG_ID_EVENTS = 2
LOG_ID_SYSTEM = 3
LOG_ID_CRASH = 4
LOG_ID_STATS = 5
LOG_ID_SECURITY = 6
LOG_ID_KERNEL = 7
LOG_ID_MAX = 7
#log level
ANDROID_LOG_DEFAULT =1
ANDROID_LOG_VERBOSE = 2
ANDROID_LOG_DEBUG = 3
ANDROID_LOG_INFO = 4
ANDROID_LOG_WARN = 5
ANDROID_LOG_ERROR = 6
ANDROID_LOG_FATAL = 7
ANDROID_LOG_SILENT = 8
SIZEOF_LOG_ENTRY = 30
SIZEOF_HEADER_T = 4
SIZEOF_EVT_LIST_T = 2
SIZEOF_EVT_INT_T = 5
SIZEOF_EVT_LONG_T = 9
SIZEOF_EVT_FLOAT_T = 5
SIZEOF_EVT_STRING_T = 5
#event type
EVENT_TYPE_INT = 0 #/* int32_t */
EVENT_TYPE_LONG = 1 #/* int64_t */
EVENT_TYPE_STRING = 2
EVENT_TYPE_LIST = 3
EVENT_TYPE_FLOAT = 4
NS_PER_SEC = 1000000000
def filter_pri_to_char(self, pri) :
if pri == self.ANDROID_LOG_VERBOSE:
return 'V'
elif pri == self.ANDROID_LOG_DEBUG:
return 'D'
elif pri == self.ANDROID_LOG_INFO:
return 'I'
elif pri == self.ANDROID_LOG_WARN:
return 'W'
elif pri == self.ANDROID_LOG_ERROR:
return 'E'
elif pri == self.ANDROID_LOG_FATAL:
return 'F'
elif pri == self.ANDROID_LOG_SILENT:
return 'S'
else:
return '?'
class LogEntry(Constants):
def __init__(self):
self.tv_sec = 0
self.tv_nsec = 0
self.pid = 0
self.uid = 0
self.tid = 0
self.prior = self.ANDROID_LOG_INFO
self.tag = ""
self.msg = ""
self.is_binary = False
self.is_dmesg = False
self.tz_minuteswest = 0
def wallTime(self):
return self.tv_sec * self.NS_PER_SEC + self.tv_nsec
def format_time(self):
sec = self.tv_sec
nsec = self.tv_nsec
sec -= 60 * self.tz_minuteswest
nsec = str(nsec // 1000000)
nsec = str(nsec).zfill(3)
date = datetime.datetime.utcfromtimestamp(sec)
rtc_timestamp = date.strftime("%m-%d %H:%M:%S") + '.' + nsec
return rtc_timestamp
def set_msg(self, msg):
self.msg = msg
def content(self):
if self.is_dmesg and self.tag == "":
return "%5d %5d %5d %c %-8.*s\n" % (
self.uid, self.pid, self.tid, \
self.filter_pri_to_char(self.prior), \
len(self.msg), self.msg)
elif self.is_binary:
return "%5d %5d %5d %c %s: %s\n" % (
self.uid, self.pid, self.tid, \
self.filter_pri_to_char(self.prior), \
cleanupString(self.tag), \
cleanupString(self.msg))
else:
preifx_line = "%5d %5d %5d %c %-8.*s: " % (
self.uid, self.pid, self.tid, \
self.filter_pri_to_char(self.prior), \
len(self.tag), cleanupString(self.tag))
multilines = self.msg.split("\n")
if len(multilines) > 1:
ret = []
for line in multilines:
if(len(cleanupString(line.strip())) > 0):
ret.append(preifx_line + "%s\n" % cleanupString(line))
return ret
else:
return preifx_line + "%s\n" % cleanupString(self.msg.strip())
def __str__(self):
content = self.content()
if type(content) is str:
return "%s %s" % (self.format_time(), content)
else:
ret = ""
for item in content:
ret += "%s %s" % (self.format_time(), item)
return ret
class LogEntry_Dmesg(LogEntry):
def __init__(self):
super().__init__()
self.mono_format = False
self.mono_tv_sec = 0
self.mono_tv_nsec = 0
self.msg_without_space = ""
self.is_dmesg = True
def set_msg(self, msg):
self.msg = msg
self.msg_without_space = msg.replace(" ", "")
def mono_time(self):
return self.mono_tv_sec * self.NS_PER_SEC + self.mono_tv_nsec
def is_same_content(self, other):
return self.msg_without_space.endswith(other.msg_without_space) or \
other.msg_without_space.endswith(self.msg_without_space)
def is_same_time(self, other):
return self.mono_tv_sec == other.mono_tv_sec and \
abs(self.mono_tv_nsec - other.mono_tv_nsec) <= 1000000
# self > other
def __cmp__(self,other):
if self.is_same_time(other) and \
self.is_same_content(other):
return 0
else:
return self.mono_time() - other.mono_time()
def __eq__(self, other):
return self.is_same_time(other) and \
self.is_same_content(other)
def __hash__(self):
walltime_nsec = self.wallTime()
walltime_msec = (int)(walltime_nsec / 1000000)
return hash(walltime_msec)*31 + hash(self.msg_without_space)
def set_mono_time(self, mono_nsec, wall_to_monotonic_tv_sec, wall_to_monotonic_tv_nsec):
self.mono_tv_sec = (int)(mono_nsec / self.NS_PER_SEC)
self.mono_tv_nsec = mono_nsec % self.NS_PER_SEC
self.tv_sec = self.mono_tv_sec + wall_to_monotonic_tv_sec
self.tv_nsec = self.mono_tv_nsec + wall_to_monotonic_tv_nsec
if self.tv_nsec >= self.NS_PER_SEC:
self.tv_sec += 1
self.tv_nsec -= self.NS_PER_SEC
def set_rtc_time(self, tv_sec, tv_nsec, wall_to_monotonic_tv_sec, wall_to_monotonic_tv_nsec):
self.tv_sec = tv_sec
self.tv_nsec = tv_nsec
sec = tv_sec
nsec = tv_nsec
if nsec <= wall_to_monotonic_tv_nsec:
sec -=1
nsec = self.NS_PER_SEC + nsec - wall_to_monotonic_tv_nsec
else:
nsec = nsec - wall_to_monotonic_tv_nsec
if sec < wall_to_monotonic_tv_sec:
sec = 0
nsec = 0
else:
sec = sec - wall_to_monotonic_tv_sec
self.mono_tv_sec = sec
self.mono_tv_nsec = nsec
def format_full_time(self):
rtc_timestamp = self.format_time()
nsec = str(self.mono_tv_nsec // 1000)
nsec = str(nsec).zfill(6)
return rtc_timestamp + "[" + str(self.mono_tv_sec)+"."+str(nsec) + "]"
def __str__(self):
if self.mono_format:
content = self.content()
if type(content) is str:
return "%s %s" % (self.format_full_time(), content)
else:
ret = ""
for item in content:
ret += "%s %s" % (self.format_time(), item)
return ret
else:
return super().__str__()
class Logcat_base(RamParser, Constants):
def __init__(self, ramdump, taskinfo):
super().__init__(ramdump)
self.taskinfo = taskinfo
self.mmu = taskinfo.mmu
self.logd_task = taskinfo.task_addr
self.sizeUsed = {}
self.maxSize = {}
self.is_success = False
self.zstd = None
try:
self.zstd = __import__('zstandard')
except ImportError as result:
print_out_str(str(result)+", try to use command 'py -3 -m pip install zstandard' to install")
print("\033[1;31m" + str(result) +
", try to use command 'py -3 -m pip install zstandard' to install \033[0m")
if self.ramdump.arm64:
self.addr_length = 8
else:
self.addr_length = 4
self.extra_offset = 0
sys_tz_addr = self.ramdump.address_of('sys_tz')
tz_minuteswest_offset = self.ramdump.field_offset(
'struct timezone ', 'tz_minuteswest')
self.tz_minuteswest = self.ramdump.read_s32(sys_tz_addr + tz_minuteswest_offset)
print_out_str("struct timezone --> tz_minuteswest= "+str(self.tz_minuteswest)+"min")
print("struct timezone --> tz_minuteswest= "+str(self.tz_minuteswest)+"min")
self.wall_to_mono_found = False
self.wall_to_monotonic_tv_sec = 0
self.wall_to_monotonic_tv_nsec = 0
self.dmesg_list={}
self.zram_parser = Zram(ramdump)
def find_bss_addrs(self):
""" find vma list of bss section """
bss_vms_list = []
# bss section is after data section
for index, vma in enumerate(self.taskinfo.vmalist):
tmpstartVm = vma.vm_start
tmpendVm = vma.vm_end
# rw flags
if vma.file_name == "logd" and vma.flags & 0b11 == 0b11:
bss_vms_list.append([tmpstartVm, tmpendVm])
# anon page
vma_next = self.taskinfo.vmalist[index + 1]
if not vma_next.file_name and vma_next.flags & 0b11 == 0b11:
bss_vms_list.append([vma_next.vm_start, vma_next.vm_end])
return bss_vms_list
#return offset of RTC to Mono
def findCorrection(self):
sec = 0
nsec = 0
found = False
correction_addr = 0
bss_addrs = self.find_bss_addrs()
for bss_start, bss_end in bss_addrs:
idx = 0
bss_size = bss_end - bss_start
while idx < bss_size:
if self.is_equal(bss_start + idx, 8, 3) and \
self.is_equal(bss_start + idx + 8*2, 8, 7) and \
self.is_equal(bss_start + idx + 8*4, 8, 5) and \
self.is_equal(bss_start + idx + 8*6, 8, 4):
correction_addr = bss_start + idx - 40
sec = self.read_bytes(correction_addr, 4)
nsec = self.read_bytes(correction_addr + 4, 4)
found = True
break
idx += 8
if found:
break
if found:
print_out_str(("Found &LogBuffer::Correction=0x%x LogBuffer::Correction=%ld.%ld")
% (correction_addr, sec, nsec))
else:
print_out_str("&LogBuffer::Correction not found")
return found, sec, nsec
def is_equal(self, addr, lengh, value):
val = self.read_bytes(addr, lengh)
return val == value
def read_bytes(self, addr, len):
return UTaskLib.read_bytes(self.ramdump, self.mmu, addr, len, self.zram_parser)
def read_binary(self, addr, len):
return UTaskLib.read_binary(self.ramdump, self.mmu, addr, len, self.zram_parser)
def get_output_filename(self, log_id):
if log_id >= self.LOG_ID_MIN and log_id <= self.LOG_ID_MAX:
return "{}_{}.txt".format(self.__class__.__name__, self.LOG_NAME[log_id])
else:
return None
def get_evt_data(self, data_array, pos):
if (pos + 1) > len(data_array):
return -1, -1, -1
evt_type = struct.unpack('<B', data_array[pos : pos + 1])[0]
length = 0
msg=""
if evt_type == self.EVENT_TYPE_INT :
if (pos + self.SIZEOF_EVT_INT_T) > len(data_array):
return -1, -1, -1
msg = str(struct.unpack('<I', data_array[pos+1 : pos + self.SIZEOF_EVT_INT_T])[0])
length = self.SIZEOF_EVT_INT_T
elif evt_type == self.EVENT_TYPE_LONG:
if (pos + self.SIZEOF_EVT_LONG_T) > len(data_array):
return -1, -1, -1
msg = str(struct.unpack('<Q', data_array[pos+1 : pos + self.SIZEOF_EVT_LONG_T])[0])
length = self.SIZEOF_EVT_LONG_T
elif evt_type == self.EVENT_TYPE_FLOAT:
if (pos + self.SIZEOF_EVT_FLOAT_T) > len(data_array):
return -1, -1, -1
msg = str(struct.unpack('<f', data_array[pos+1 : pos + self.SIZEOF_EVT_FLOAT_T])[0])
length = self.SIZEOF_EVT_FLOAT_T
elif evt_type == self.EVENT_TYPE_STRING:
if (pos + self.SIZEOF_EVT_STRING_T) > len(data_array):
return -1, -1, -1
#for event log, msg_len may be 0 like "I 1397638484: [121035042,4294967295,]"
msg_len = struct.unpack('I', data_array[pos+1 : pos + self.SIZEOF_EVT_STRING_T])[0]
# last msg_len-1 bytes
tmpmsg = data_array[pos + self.SIZEOF_EVT_STRING_T : pos+self.SIZEOF_EVT_STRING_T+msg_len]
length = self.SIZEOF_EVT_STRING_T + msg_len
msg = tmpmsg.decode('ascii', 'ignore').strip()
return evt_type, msg, length
def process_log_and_save(self, _data, log_id):
ret=[]
pos = 0
while pos < len(_data):
if pos +self.SIZEOF_LOG_ENTRY > len(_data):
break
# first [0-30] total 31 bytes
logEntry = struct.unpack('<IIIQIIHB', _data[pos:pos+self.SIZEOF_LOG_ENTRY+1])
pos = pos+self.SIZEOF_LOG_ENTRY + 1 + self.extra_offset
uid = logEntry[0]
pid = logEntry[1]
tid = logEntry[2]
sequence = logEntry[3]
tv_sec = logEntry[4]
tv_nsec = logEntry[5]
msg_len = logEntry[6]
priority = logEntry[7]
if msg_len is None or msg_len < 1:
break;
msg = _data[pos:pos+msg_len-1] # last msg_len-1 bytes
msgList = msg.decode('ascii', 'ignore').split('\0')
pos = pos + msg_len-1
if len(msgList) <2:
continue
try:
if log_id == self.LOG_ID_KERNEL:
entry = LogEntry_Dmesg()
entry.mono_format = self.wall_to_mono_found
entry.set_rtc_time(tv_sec, tv_nsec, self.wall_to_monotonic_tv_sec, self.wall_to_monotonic_tv_nsec)
else:
entry = LogEntry()
entry.tv_sec = tv_sec
entry.tv_nsec = tv_nsec
entry.pid = pid
entry.uid = uid
entry.tid = tid
entry.prior = priority
entry.tag = cleanupString(msgList[0].strip())
entry.set_msg(msgList[1])
entry.tz_minuteswest = self.tz_minuteswest
ret.append(entry)
except Exception as result:
print_out_str(str(result))
traceback.print_exc()
return ret
def process_binary_log_and_save(self, _data):
ret=[]
pos = 0
while pos < len(_data):
if pos +self.SIZEOF_LOG_ENTRY > len(_data):
break
# first [0-30] total 31 bytes
logEntry = struct.unpack('<IIIQIIH', _data[pos : pos + self.SIZEOF_LOG_ENTRY])
pos = pos + self.SIZEOF_LOG_ENTRY + self.extra_offset
uid = logEntry[0]
pid = logEntry[1]
tid = logEntry[2]
sequence = logEntry[3]
tv_sec = logEntry[4]
tv_nsec = logEntry[5]
msg_len = logEntry[6]
priority = self.ANDROID_LOG_INFO
if pos + self.SIZEOF_HEADER_T > len(_data):
break
tagidx = struct.unpack('<I', _data[pos : pos + self.SIZEOF_HEADER_T])[0] #4 bytes
pos = pos + self.SIZEOF_HEADER_T
evt_type, tmpmsg, length = self.get_evt_data(_data,pos)
pos = pos + length
if evt_type == -1:
break
if evt_type != self.EVENT_TYPE_LIST:
entry = LogEntry()
entry.is_binary = True
entry.tv_sec = tv_sec
entry.tv_nsec = tv_nsec
entry.pid = pid
entry.uid = uid
entry.tid = tid
entry.prior = priority
entry.tag = str(tagidx)
entry.set_msg(tmpmsg)
entry.tz_minuteswest = self.tz_minuteswest
ret.append(entry)
continue #--> read next log entry
if pos + self.SIZEOF_EVT_LIST_T > len(_data):
break
list_t = struct.unpack('<BB', _data[pos : pos + self.SIZEOF_EVT_LIST_T])
pos = pos + self.SIZEOF_EVT_LIST_T
evt_type = list_t[0]
evt_cnt = list_t[1]
i = 0
msg = ""
while i < evt_cnt:
evt_type, tmpmsg, length = self.get_evt_data(_data,pos)
if evt_type == -1:
break
pos = pos + length
msg = msg + tmpmsg
if i < evt_cnt -1:
msg = msg + ","
i = i+1
entry = LogEntry()
entry.is_binary = True
entry.tv_sec = tv_sec
entry.tv_nsec = tv_nsec
entry.pid = pid
entry.uid = uid
entry.tid = tid
entry.prior = priority
entry.tag = str(tagidx)
entry.set_msg("[" + msg + "]")
entry.tz_minuteswest = self.tz_minuteswest
ret.append(entry)
return ret
def process_work_chunk(self, _data, log_id, section, is_binary, write_active):
if write_active == 0:##parse zipped buffer
if not self.zstd: # no zstd library
return log_id, section, None
try:
_data = self.zstd.ZstdDecompressor().decompress(_data)
except:
print_out_str("decompress caused error on logid:section(%d:%d), size(%d)" %(log_id, section, len(_data)))
traceback.format_exc()
_data = None
ret = None
if _data:
try:
if is_binary:
ret = self.process_binary_log_and_save(_data)
else:
ret = self.process_log_and_save(_data, log_id)
except:
traceback.print_exc()
return log_id, section, ret
# loglist is a dict type
def save_log_to_file(self, loglist):
if not loglist or len(loglist) == 0:
return
if not self.is_success: # parse success and save to file
self.is_success = True
for log_id in loglist.keys():
if log_id == self.LOG_ID_KERNEL and self.wall_to_mono_found:
continue
sections = loglist[log_id]
if not sections:
continue
filename = self.get_output_filename(log_id)
if filename is None:
return
log_file = self.ramdump.open_file(filename)
for section in sorted(sections.keys()):
if sections[section] and len(sections[section]) >= 0:
if section ==0 or len(sections[section]) == 1:
head = "{} log buffer used: {}k Max size:{}k\n".format(
self.LOG_NAME[log_id],
round(self.sizeUsed[log_id]/1024,1),
round(self.maxSize[log_id]/1024,1))
log_file.write(head)
head="--------- beginning of {} section: {}\n".format(
self.LOG_NAME[log_id], str(section))
log_file.write(head)
for item in sections[section]:
log_file.write(str(item))
if not self.wall_to_mono_found:
return
# start to combine dmesg
dmesgDict = []
if self.LOG_ID_KERNEL in loglist.keys():
sections = loglist[self.LOG_ID_KERNEL]
if sections:
for section in sorted(sections.keys()):
dmesgDict.extend(sections[section])
filename = self.get_output_filename(self.LOG_ID_KERNEL)
log_file = self.ramdump.open_file(filename)
if len(dmesgDict) > 0:
head = "{} log buffer used: {}k Max size:{}k\n".format(
self.LOG_NAME[self.LOG_ID_KERNEL],
round(self.sizeUsed[self.LOG_ID_KERNEL]/1024,1),
round(self.maxSize[self.LOG_ID_KERNEL]/1024,1))
log_file.write(head)
same_log_count = 0
log_added_count = 0
if len(self.dmesg_list) <= 0:
for item in dmesgDict:
log_file.write(str(item))
else:
self.combine_dmesg(dmesgDict, log_file)
def combine_dmesg(self, dmesgDict, log_file):
# compare dmesg with kernel log from logd
same_log_count = 0
log_added_count = 0
keys = sorted(self.dmesg_list)
dmesg_time_start = keys[0]
index = 0
for item in dmesgDict:
if item.mono_time() < dmesg_time_start:
log_file.write(str(item))
else:
should_delete = []
while index < len(keys):
mono_time = keys[index]
s_pid = self.dmesg_list[mono_time][0]
s_line = self.dmesg_list[mono_time][1]
entry = LogEntry_Dmesg()
entry.mono_format = self.wall_to_mono_found
entry.set_mono_time(mono_time, self.wall_to_monotonic_tv_sec, self.wall_to_monotonic_tv_nsec)
entry.pid = s_pid
entry.uid = 0
entry.tid = s_pid
entry.set_msg(cleanupString(s_line))
entry.tz_minuteswest = self.tz_minuteswest
cmpval = item.__cmp__(entry)
if cmpval > 0: # time before
log_added_count += 1
index += 1
log_file.write(str(entry))
should_delete.append(mono_time)
continue
elif cmpval == 0:
index += 1
same_log_count += 1
should_delete.append(mono_time)
continue
else:
log_file.write(str(item))
break
for time in should_delete:
del self.dmesg_list[time]
for mono_time in self.dmesg_list:
s_pid = self.dmesg_list[mono_time][0]
s_line = self.dmesg_list[mono_time][1]
entry = LogEntry_Dmesg()
entry.mono_format = self.wall_to_mono_found
entry.set_mono_time(mono_time, self.wall_to_monotonic_tv_sec, self.wall_to_monotonic_tv_nsec)
entry.pid = s_pid
entry.uid = 0
entry.tid = s_pid
entry.set_msg(cleanupString(s_line))
entry.tz_minuteswest = self.tz_minuteswest
log_added_count += 1
log_file.write(str(entry))
print_out_str("Total dmesg log count %d, same count %d, added count %d" % \
(len(self.dmesg_list), same_log_count, log_added_count))
def read_dmesg(self):
self.dmesg_list = dmesglib.DmesgLib(self.ramdump, print_out.out_file).get_dmesg_as_dict()
def process_chunklist_and_save(self, logchunk_list_addr):
log_id = 0
threads = []
with futures.ThreadPoolExecutor(8) as executor:
while log_id <= self.LOG_ID_MAX:
is_binary = (log_id == self.LOG_ID_EVENTS) or (
log_id == self.LOG_ID_STATS) or (log_id == self.LOG_ID_SECURITY)
#--> address of std::list<SerializedLogChunk>
_addr = logchunk_list_addr + log_id * 0x18
#the first element of std::list<SerializedLogChunk>
first_node_addr = self.read_bytes(_addr + self.addr_length, self.addr_length)
list_count = self.read_bytes(_addr + self.addr_length *2, self.addr_length )
section = 0;
next_node_addr = first_node_addr
self.sizeUsed[log_id] = 0
self.maxSize[log_id] = 0
while (section < list_count):
current_node = next_node_addr + self.addr_length * 2 #-->SerializedLogChunk
write_offset = self.read_bytes(current_node + 0x10, 4) #--write_offset_
write_active = self.read_bytes(current_node + 0x18, 1) #--write_active_
_data = None
if write_active == 0: ##parse zipped buffer
if self.zstd:
compressed_log_addr = current_node + 0x28
_data_addr = self.read_bytes(compressed_log_addr, self.addr_length)
_data_size = self.read_bytes(
compressed_log_addr + self.addr_length, self.addr_length)
_data = self.read_binary(_data_addr, _data_size)
self.sizeUsed[log_id] = self.sizeUsed[log_id] + _data_size
else:
_data_addr = self.read_bytes(current_node, self.addr_length)
_data_size = self.read_bytes(current_node + self.addr_length, self.addr_length)
self.sizeUsed[log_id] = self.sizeUsed[log_id] + write_offset
self.maxSize[log_id] = _data_size * 4
# write_offset is data size for uncompressed secion
_data = self.read_binary(_data_addr, write_offset)
if _data:
future = executor.submit(self.process_work_chunk, _data, log_id, section, is_binary, write_active)
threads.append(future)
section = section + 1 # next loop
next_node_addr = self.read_bytes(next_node_addr + self.addr_length, self.addr_length)
log_id = log_id + 1
loglist = {}
for future in futures.as_completed(threads):
log_id, section, ret = future.result()
if not ret:
continue
if log_id in loglist:
sections = loglist[log_id]
else:
sections = {}
loglist[log_id] = sections
sections[section] = ret
self.save_log_to_file(loglist)
class Logcat_v3(Logcat_base):
def __init__(self, ramdump, taskinfo):
super().__init__(ramdump, taskinfo)
def get_logbuffer_addr(self):
stack_offset = self.ramdump.field_offset('struct task_struct', 'stack')
stack_addr = self.ramdump.read_word(self.logd_task + stack_offset)
pt_regs_size = self.ramdump.sizeof('struct pt_regs')
pt_regs_addr = self.ramdump.thread_size + stack_addr - pt_regs_size
user_regs_addr = pt_regs_addr + self.ramdump.field_offset('struct pt_regs', 'user_regs')
#find x22 register value
x22_r_addr = self.ramdump.array_index(user_regs_addr, 'unsigned long', 22)
x22_value = self.ramdump.read_word(x22_r_addr)
x22_logbuf_addr = self.read_bytes(x22_value + 0x88, self.addr_length)
logbuf_addrs = []
if x22_logbuf_addr and x22_logbuf_addr != 0: # for logd orginal code
logbuf_addrs.append(x22_logbuf_addr)
print_out_str("logbuf_addr from x22 = 0x%x" %(x22_logbuf_addr))
x21_r_addr = self.ramdump.array_index(user_regs_addr, 'unsigned long', 21)
x21_value = self.ramdump.read_word(x21_r_addr)
x21_logbuf_addr = self.read_bytes(x21_value + 0x88, self.addr_length)
if x21_logbuf_addr and x21_logbuf_addr != 0:
logbuf_addrs.append(x21_logbuf_addr)
print_out_str("logbuf_addr from x21 = 0x%x" %(x21_logbuf_addr))
x23_r_addr = self.ramdump.array_index(user_regs_addr, 'unsigned long', 23)
x23_value = self.ramdump.read_word(x23_r_addr)
x23_logbuf_addr = self.read_bytes(x23_value + 0x88, self.addr_length)
if x23_logbuf_addr or x23_logbuf_addr == 0:
logbuf_addrs.append(x23_logbuf_addr)
print_out_str("logbuf_addr from x23 = 0x%x" %(x23_logbuf_addr))
return logbuf_addrs
def parse(self):
self.read_dmesg()
self.wall_to_mono_found, self.wall_to_monotonic_tv_sec, self.wall_to_monotonic_tv_nsec = self.findCorrection()
logbuf_addrs = self.get_logbuffer_addr()
for __logbuf_addr in logbuf_addrs:
logchunk_list_addr = __logbuf_addr + 0x60
try:
self.process_chunklist_and_save(logchunk_list_addr)
except Exception as e:
print(str(e))
traceback.print_exc()
if self.is_success:
print_out_str("logbuf_addr = 0x%x" %(__logbuf_addr))
break
return self.is_success
class Logcat_vma(Logcat_base):
def __init__(self, ramdump, taskinfo):
super().__init__(ramdump, taskinfo)
self.HEAD_SIZE = 32
self.vmas = []
if int(ramdump.get_config_val("CONFIG_BASE_SMALL")) == 0:
self.PID_MAX = 0x8000
else:
self.PID_MAX = 0x1000
print_out_str("max pid = " + str(self.PID_MAX))
def read_bytes(self, addr, len):
addr = addr & 0x0000ffffffffffff
for vma in self.vmas:
if addr >= vma["vmstart"] and addr+len <= vma["vmstart"]+vma["size"]:
offset = addr - vma["vmstart"]
if len == 8 and offset+8 <=vma["size"]:
s = struct.unpack('<Q', vma["data"][offset:offset+8])
return s[0]
elif len == 4 and offset+4 <=vma["size"]:
s = struct.unpack('<I', vma["data"][offset:offset+4])
return s[0]
elif len == 2 and offset+2 <=vma["size"]:
s = struct.unpack('<H', vma["data"][offset:offset+2])
return s[0]
elif len == 1 and offset+1 <=vma["size"]:
s = struct.unpack('<B', vma["data"][offset:offset+1])
return s[0]
else:
print_out_str("This api used to unpack 1/2/4/8 bytes data, check the len\n")
exit()
return 0
def read_binary(self, addr, len):
addr = addr & 0x0000ffffffffffff
for vma in self.vmas:
if addr >= vma["vmstart"] and addr+len <= vma["vmstart"]+vma["size"]:
offset = addr - vma["vmstart"]
if offset + len >= vma["size"]:
len = vma["size"] - offset
return vma["data"][offset:offset+len]
return b''
def get_vmas_with_rw(self):
'''
return vma list with read+write permissions
'''
for vma in self.taskinfo.vmalist:
if vma.flags & 0b11 != 0b11:
continue
item = {}
item["vmstart"] = vma.vm_start
item["size"] = vma.vm_end - vma.vm_start
item["data"] = super().read_binary(item["vmstart"], item["size"])
self.vmas.append(item)
def has_valid_log(self, main_chunklist_addr):
end_node_addr = self.read_bytes(main_chunklist_addr, self.addr_length)
current_node = end_node_addr + self.addr_length * 2 #-->SerializedLogChunk
_data_addr = self.read_bytes(current_node, self.addr_length)
_data_size = self.read_bytes(current_node + self.addr_length, self.addr_length)
if _data_size <= self.SIZEOF_LOG_ENTRY:
return False
valid = self.has_valid_LogEntrys(_data_addr, _data_size, 10)
if valid is False: ## retry
extra_log_entry_offset = 4
valid = self.has_valid_LogEntrys(_data_addr, _data_size, 10, extra_log_entry_offset)
if valid:
self.extra_offset = extra_log_entry_offset
return valid
def has_valid_LogEntrys(self, _addr, _data_size, valid_count, extra_offset=0):
i = 0
valid = False
offset = 0
while i < valid_count:
valid, length = self.is_valid_LogEntry(_addr+offset)
if not valid:
return False
offset = offset + length + extra_offset
if offset > _data_size:
return True
i = i + 1
return True
def is_valid_LogEntry(self, _addr):
uid = self.read_bytes(_addr, 4)
pid = self.read_bytes(_addr + 0x4, 4)
tid = self.read_bytes(_addr + 0x8, 4)
sequence = self.read_bytes(_addr + 0xc, 8)
tv_sec = self.read_bytes(_addr + 0x14, 4)
tv_nsec = self.read_bytes(_addr + 0x18, 4)
msg_len = self.read_bytes(_addr + 0x1c, 2)
priority = self.read_bytes(_addr + self.SIZEOF_LOG_ENTRY, 1)
if pid <= self.PID_MAX and uid <= self.PID_MAX and priority \
<= self.ANDROID_LOG_SILENT and priority >= self.ANDROID_LOG_VERBOSE:
if msg_len >=1 and msg_len <= 4068: #max_payload
return True, 30 + msg_len
return False, 0
def parse(self):
self.read_dmesg()
startTime = datetime.datetime.now()
self.get_vmas_with_rw()
# find address of std::list<SerializedLogChunk>
chunklist_addr = 0
for vma in self.vmas:
offset = self.find_log_chunklist_addr(vma)
if offset != 0:
chunklist_addr = vma["vmstart"]+offset
break
if chunklist_addr == 0:
print("logbuf_addr was not found")
return False
# start parsing
is_valid_chunklist = self.has_valid_log(chunklist_addr + self.LOG_ID_MAIN * 0x18)
if not is_valid_chunklist:
print_out_str("There is no valid log in logbuf_addr = 0x%x" %(chunklist_addr-0x60))
return False
# start parsing
self.process_chunklist_and_save(chunklist_addr)
print_out_str("logbuf_addr = 0x%x" % (chunklist_addr-0x60))
print("logcat_vma parse logcat cost "+str((datetime.datetime.now()-startTime).total_seconds())+" s")
return self.is_success
def find_log_chunklist_addr(self, vma):
vma_size = vma["size"]
vma_data = vma["data"]
vma_startaddr = vma["vmstart"]
offset = 0
is_chunk = False
while offset < vma_size:
is_chunk = self.is_log_chunklist_addr(vma_data, offset)
if is_chunk:
log_id = 1
while log_id <= self.LOG_ID_MAX:
is_chunk = self.is_log_chunklist_addr(vma_data, offset+0x18*log_id)
if not is_chunk:
return 0
log_id = log_id + 1
break
offset = offset + 4
if is_chunk:
return offset
return 0
def is_log_chunklist_addr(self, vma_data, offset):
if offset+24 > len(vma_data):
return False
nodes = struct.unpack('<QQQ', vma_data[offset:offset+24])
tail_node_addr = nodes[0]
head_node_addr = nodes[1]
list_count = nodes[2]
if tail_node_addr ==0 or head_node_addr ==0 or list_count > 1000:
return False
next_node_addr = head_node_addr
prev_node = self.read_bytes(head_node_addr, self.addr_length) # prev_node = next_node_addr->prev
if prev_node == 0:
return False
if list_count == 0 and head_node_addr == tail_node_addr: # empty list
return True
index = 0
while next_node_addr != 0 and index < list_count:
next_prev_node = self.read_bytes(next_node_addr, self.addr_length)
if not next_prev_node or next_prev_node != prev_node:
return False
current_node = next_node_addr + self.addr_length *2
is_chunk = self.is_log_chunk_addr(current_node)
if not is_chunk:
return False
if next_node_addr == tail_node_addr: # loop complete
return True
prev_node = next_node_addr
next_node_addr = self.read_bytes(next_node_addr + self.addr_length, self.addr_length)
index = index +1
return False
def is_log_chunk_addr(self, addr):
data_addr = self.read_bytes(addr, self.addr_length)
write_offset = self.read_bytes(addr + 0x10, 4) #--write_offset_
write_active = self.read_bytes(addr + 0x18, 1) #--write_active_
data_size = self.read_bytes(addr + self.addr_length, self.addr_length)
compress_data_addr = self.read_bytes(addr + 0x28, self.addr_length)
compress_data_size = self.read_bytes(addr + 0x28 + self.addr_length, self.addr_length)
if (write_active == 1 and data_addr != 0 and (data_size
!=0) and write_offset !=0 and write_offset < data_size): #uncompressed chunk
return True
elif write_active == 0 and compress_data_addr != 0 and (compress_data_size
!= 0) and write_offset !=0 and compress_data_size<write_offset: #compressed chunk
return True
return False

View File

@@ -0,0 +1,598 @@
# Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import linux_list
from parser_util import register_parser, RamParser
from operator import itemgetter
from collections import OrderedDict
@register_parser('--lpm', 'Parse LPM Driver info')
class lpm(RamParser):
def __init__(self, *args):
super(lpm, self).__init__(*args)
self.head = ''
self.output = []
self.clusters = []
self.cpu_possible_bits = None
self.cpu_online_bits = 0
self.lpm_debug = []
self.related_cpus_bits = None
def get_bits(self):
bits_addr = self.ramdump.address_of('cpu_possible_bits')
if bits_addr is None:
bits_addr = self.ramdump.address_of('__cpu_possible_mask')
if bits_addr is None:
self.output.append("NOTE: 'cpu_possible_bits' not found")
return
self.cpu_possible_bits = self.ramdump.read_int(bits_addr)
cpus = bin(self.cpu_possible_bits).count('1')
self.output.append("{}\n".format('Available CPUs'))
for i in self.ramdump.iter_cpus():
self.output.append("{:10}{}:{}\n".format("", "CPU", i))
self.output.append("\n")
if (self.ramdump.kernel_version >= (4, 9, 0)):
if self.ramdump.is_config_defined('CONFIG_SMP'):
runqueues_addr = self.ramdump.address_of('runqueues')
online_offset = self.ramdump.field_offset('struct rq', 'online')
for i in self.ramdump.iter_cpus():
online = self.ramdump.read_int(runqueues_addr + online_offset, cpu=i)
self.cpu_online_bits |= (online << i)
else:
bits_addr = self.ramdump.address_of('cpu_online_bits')
if bits_addr is None:
bits_addr = self.ramdump.address_of('__cpu_online_mask')
if bits_addr is None:
self.output.append("NOTE: 'cpu_online_bits' not found")
return
self.cpu_online_bits = self.ramdump.read_int(bits_addr)
self.output.append("{}\n".format('Online CPUs'))
index = 0
while self.cpu_online_bits:
if self.cpu_online_bits & 1:
self.output.append("{:10}{}:{}\n".format("", "CPU", index))
self.cpu_online_bits = self.cpu_online_bits >> 1
index += 1
self.output.append("{}{}{}".format("\n", "-" * 120, "\n"))
def get_cluster_level_info(self, lpm_cluster):
offset = self.ramdump.field_offset('struct lpm_cluster', 'nlevels')
nlevels = self.ramdump.read_int(lpm_cluster + offset)
self.output.append("{:20}:{}\n".format("number of levels", nlevels))
offset = self.ramdump.field_offset('struct lpm_cluster', 'min_child_level')
node = self.ramdump.read_int(lpm_cluster + offset)
self.output.append("{:20}:{}\n".format("min child level", node))
offset = self.ramdump.field_offset('struct lpm_cluster', 'default_level')
node = self.ramdump.read_int(lpm_cluster + offset)
self.output.append("{:20}:{}\n".format("default level", node))
offset = self.ramdump.field_offset('struct lpm_cluster', 'last_level')
node = self.ramdump.read_int(lpm_cluster + offset)
self.output.append("{:20}:{}\n".format("last_level", node))
offset = self.ramdump.field_offset('struct lpm_cluster', 'levels')
levels = lpm_cluster + offset
self.output.append("\n")
cluster_level_size = self.ramdump.sizeof('struct lpm_cluster_level')
for i in range(nlevels):
# ToDo: Need a better way to arrive at the next level info.
level = levels + (i * cluster_level_size)
# Case to handle 'mode'.'mode' removed in kernel version ( >= 4.9) for core power lpm drivers.
if (self.ramdump.kernel_version < (4,9,0) ):
offset = self.ramdump.field_offset('struct lpm_cluster_level', 'mode')
addr = self.ramdump.read_word(level + offset, True)
node = self.ramdump.read_int(addr)
self.output.append("{:20}:{}\n".format("level mode", node))
offset = self.ramdump.field_offset('struct lpm_cluster_level', 'level_name')
addr = self.ramdump.read_word(level + offset, True)
name = self.ramdump.read_cstring(addr, 48)
self.output.append("{:20}:{}\n".format("level name", name))
offset = self.ramdump.field_offset('struct lpm_cluster_level', 'min_child_level')
addr = level + offset
node = self.ramdump.read_int(addr)
self.output.append("{:20}:{}\n".format("min child level", node))
offset = self.ramdump.field_offset('struct lpm_cluster_level', 'num_cpu_votes')
addr = level + offset
node = self.ramdump.read_int(addr)
self.output.append("{:20}:{}({})\n".format("num cpu votes", hex(node).rstrip("L"), bin(node).lstrip("0b")))
offset = self.ramdump.field_offset('struct lpm_cluster_level', 'available')
addr = level + offset
offset = self.ramdump.field_offset('struct lpm_level_avail', 'idle_enabled')
node = self.ramdump.read_bool(addr + offset)
self.output.append("{:20}:{}\n".format("idle_enabled", node))
offset = self.ramdump.field_offset('struct lpm_level_avail', 'suspend_enabled')
node = self.ramdump.read_bool(addr + offset)
self.output.append("{:20}:{}\n".format("suspend_enabled", node))
self.output.append("\n")
def get_cluster_info(self, lpm_cluster):
offset = self.ramdump.field_offset('struct lpm_cluster', 'cluster_name')
addr = self.ramdump.read_word(lpm_cluster + offset, True)
node = self.ramdump.read_cstring(addr, 48)
self.output.append("{:20}:{}\n".format("Cluster Name", node))
offset = self.ramdump.field_offset('struct lpm_cluster', 'child_cpus')
node = self.ramdump.read_int(lpm_cluster + offset)
self.output.append("{:20}:{}({})\n".format("child_cpus", hex(node).rstrip("L"), bin(node).lstrip("0b")))
if (self.ramdump.kernel_version >= (3, 18, 0) or
self.ramdump.kernel_version < (3, 14, 0) ):
offset = self.ramdump.field_offset(
'struct lpm_cluster', 'num_children_in_sync')
else:
offset = self.ramdump.field_offset(
'struct lpm_cluster', 'num_childs_in_sync')
node = self.ramdump.read_int(lpm_cluster + offset)
self.output.append("{:20}:{}({})\n".format(
"num_children_in_sync", hex(node).rstrip("L"),
bin(node).lstrip("0b")))
self.output.append("\n")
def lpm_walker(self, lpm_cluster):
if lpm_cluster == self.head:
return
self.clusters.append(lpm_cluster)
def get_pm_domains(self):
gpd_offset = self.ramdump.field_offset('struct generic_pm_domain', 'gpd_list_node')
head = self.ramdump.read_word(self.ramdump.address_of('gpd_list'), True)
self.head = head
gpd_walker = linux_list.ListWalker(self.ramdump, head, gpd_offset)
gpd_walker.walk(head, self.get_pm_domain_info)
def get_pm_domain_info(self, node):
if node == self.head:
return
name_offset = self.ramdump.field_offset('struct generic_pm_domain', 'name')
name = self.ramdump.read_cstring(self.ramdump.read_word(node + name_offset))
if not name:
return
name += ":idle_states:S{}"
state_count = self.ramdump.read_structure_field(node, 'struct generic_pm_domain', 'state_count')
#sanity check for state count , CPUIDLE_STATE_MAX
if state_count >= 10:
return
accounting_time = self.ramdump.read_structure_field(node, 'struct generic_pm_domain', 'accounting_time')
gpd_power_state_size = self.ramdump.sizeof('struct genpd_power_state')
power_state_offset = self.ramdump.field_offset('struct generic_pm_domain', 'states')
for i in range(state_count):
power_state_addr = self.ramdump.read_word(node + power_state_offset + i * gpd_power_state_size)
msec = self.ramdump.read_structure_field(power_state_addr, 'struct genpd_power_state', 'idle_time')
ktime = self.ramdump.read_s64('last_jiffies_update') - accounting_time
ktime = ktime // 10 ** 6
if msec:
msec += ktime
usage = self.ramdump.read_structure_field(power_state_addr, 'struct genpd_power_state', 'usage')
rejected = self.ramdump.read_structure_field(power_state_addr, 'struct genpd_power_state', 'rejected')
residency_ns = self.ramdump.read_structure_field(power_state_addr, 'struct genpd_power_state','residency_ns')
if not msec:
msec = "0"
if not usage:
usage = "0"
if not rejected:
rejected = "0"
if name:
msg = u"{name:30} {msec:30} {usage:30} {rejected:30}".format(name=name.format(i),msec=msec,usage=usage,rejected=rejected)
self.output.append(msg)
self.output.append("\n")
return
def print_pm_domain_info(self):
self.output.append("{}\n".format('Power Domain Info: '))
self.output.append("{:30}{:30}{:30}{:30} \n".format("Power domain", "Time Spent(msec)", "Usage ", "Rejected "))
self.get_pm_domains()
self.output.append("{}{}".format("-" * 81, "\n"))
def get_clusters(self):
lpm_root_node = self.ramdump.read_word(
self.ramdump.address_of('lpm_root_node'), True)
if lpm_root_node is None:
self.output_file.write("NOTE: 'lpm_root_node' not found\n")
return
self.clusters.append(lpm_root_node)
offset = self.ramdump.field_offset('struct lpm_cluster', 'child')
lpm_cluster = self.ramdump.read_word(lpm_root_node + offset, True)
self.head = lpm_root_node + offset
offset = self.ramdump.field_offset('struct lpm_cluster', 'list')
lpm_walker = linux_list.ListWalker(self.ramdump, lpm_cluster, offset)
lpm_walker.walk(lpm_cluster, self.lpm_walker)
def get_cpu_level_info(self, cpu_cluster_base, cpu, cpu_level):
self.output.append("{:20}:{}\n".format("CPU", cpu))
if self.ramdump.kernel_version >= (4,9,0):
cpu_level = cpu_level
else:
cpu_cluster = self.ramdump.read_word(cpu_cluster_base, cpu=cpu)
offset = self.ramdump.field_offset('struct lpm_cluster', 'cpu')
cpu_level = self.ramdump.read_word(cpu_cluster + offset, True)
offset = self.ramdump.field_offset('struct lpm_cpu', 'nlevels')
nlevels = self.ramdump.read_int(cpu_level + offset, True)
self.output.append("{:20}:{}\n".format("number of levels", nlevels))
offset = self.ramdump.field_offset('struct lpm_cpu', 'levels')
levels = cpu_level + offset
self.output.append("\n")
cpu_level_available = self.ramdump.address_of('cpu_level_available')
if cpu_level_available is None:
self.output.append("NOTE: 'cpu_level_available' not found\n")
return
cpu_level_available = cpu_level_available + self.ramdump.sizeof('long') * cpu
cpu_level_available = self.ramdump.read_word(cpu_level_available, True)
for i in range(0, nlevels):
level = levels + (i * self.ramdump.sizeof('struct lpm_cpu_level'))
offset = self.ramdump.field_offset('struct lpm_cpu_level', 'name')
addr = self.ramdump.read_word(level + offset, True)
node = self.ramdump.read_cstring(addr, 48)
self.output.append("{:20}:{}\n".format("level name", node))
# Case to handle 'mode'.'mode' removed in kernel version ( > = 4.9) for core power lpm drivers.
if (self.ramdump.kernel_version < (4,9,0) ):
offset = self.ramdump.field_offset('struct lpm_cpu_level', 'mode')
node = self.ramdump.read_int(level + offset, True)
self.output.append("{:20}:{}\n".format("level mode", node))
level_available = cpu_level_available + i * self.ramdump.sizeof('struct lpm_level_avail')
offset = self.ramdump.field_offset('struct lpm_level_avail', 'idle_enabled')
node = self.ramdump.read_bool(level_available + offset)
self.output.append("{:20}:{}\n".format("idle enabled", node))
offset = self.ramdump.field_offset('struct lpm_level_avail', 'suspend_enabled')
node = self.ramdump.read_bool(level_available + offset, True)
self.output.append("{:20}:{}\n".format("suspend enabled", node))
self.output.append("\n")
self.output.append("{}{}".format("-" * 120, "\n"))
def get_lpm(self):
self.get_clusters()
for i in self.clusters:
self.get_cluster_info(i)
self.get_cluster_level_info(i)
self.output.append("{}{}".format("-" * 120, "\n"))
if self.ramdump.kernel_version >= (4,9,0):
cpu_cluster_base = self.ramdump.address_of('lpm_root_node')
else:
cpu_cluster_base = self.ramdump.address_of('cpu_cluster')
if cpu_cluster_base is None:
self.output.append("NOTE: 'cpu_cluster' not found\n")
return
if self.ramdump.kernel_version >= (4,9,0):
cpu_cluster = self.ramdump.read_word(cpu_cluster_base)
related_cpus_offset = self.ramdump.field_offset('struct lpm_cpu', 'related_cpus')
bits_offset = self.ramdump.field_offset('struct cpumask', 'bits')
offset = self.ramdump.field_offset('struct lpm_cluster', 'cpu')
clust_node_list = ['next','prev']
for clust_node in clust_node_list:
cpunode_offset = self.ramdump.field_offset('struct list_head', clust_node)
offset = offset + cpunode_offset
cpu_level = self.ramdump.read_word(cpu_cluster + offset, True)
self.related_cpus_bits = self.ramdump.read_int(cpu_level + related_cpus_offset + bits_offset, True)
cpus = bin(self.related_cpus_bits).count('1')
cpu_info = self.related_cpus_bits
cpu_count = 0
while (cpu_info):
if ( cpu_info & 0x1):
self.get_cpu_level_info(cpu_cluster_base, cpu_count,cpu_level)
cpu_info = cpu_info >> 0x1
cpu_count = cpu_count + 1
else:
cpus = bin(self.cpu_possible_bits).count('1')
for i in range(0, cpus):
self.get_cpu_level_info(cpu_cluster_base, i,0x0)
def get_time_stats(self, tstats, nlevels):
for i in range(nlevels):
lstats = tstats + i * self.ramdump.sizeof('struct level_stats')
offset = self.ramdump.field_offset('struct level_stats', 'name')
addr = self.ramdump.read_word(lstats + offset, True)
self.output.append("{:20}:{}\n".format("lpm name", self.ramdump.read_cstring(addr + offset, 48)))
offset = self.ramdump.field_offset('struct level_stats', 'success_count')
self.output.append("{:20}:{}\n".format("success_count", self.ramdump.read_int(lstats + offset, True)))
offset = self.ramdump.field_offset('struct level_stats', 'failed_count')
self.output.append("{:20}:{}\n".format("failed_count", self.ramdump.read_int(lstats + offset, True)))
self.output.append("\n")
def get_cluster_stats(self, cluster):
offset = self.ramdump.field_offset('struct lpm_cluster', 'stats')
stats = self.ramdump.read_word(cluster + offset, True)
offset = self.ramdump.field_offset('struct lpm_stats', 'name')
self.output.append("{} {}\n\n".format(self.ramdump.read_cstring(stats + offset, 48), "lpm stats"))
offset = self.ramdump.field_offset('struct lpm_stats', 'num_levels')
nlevels = self.ramdump.read_int(stats + offset, True)
offset = self.ramdump.field_offset('struct lpm_stats', 'time_stats')
tstats = self.ramdump.read_word(stats + offset, True)
self.get_time_stats(tstats, nlevels)
self.output.append("{}{}".format("-" * 120, "\n"))
def get_cpu_stats(self, cpu_stats_base, cpu):
stats = cpu_stats_base + self.ramdump.per_cpu_offset(cpu)
offset = self.ramdump.field_offset('struct lpm_stats', 'name')
self.output.append("{} {}\n\n".format(self.ramdump.read_cstring(stats + offset, 48), "lpm stats"))
offset = self.ramdump.field_offset('struct lpm_stats', 'num_levels')
nlevels = self.ramdump.read_int(stats + offset, True)
offset = self.ramdump.field_offset('struct lpm_stats', 'time_stats')
tstats = self.ramdump.read_word(stats + offset, True)
self.get_time_stats(tstats, nlevels)
self.output.append("{}{}".format("-" * 120, "\n"))
def get_stats(self):
for i in self.clusters:
self.get_cluster_stats(i)
cpu_stats_base = self.ramdump.address_of('cpu_stats')
if cpu_stats_base is None:
self.output.append("NOTE: 'cpu_stats' not found\n")
return
cpus = bin(self.cpu_possible_bits).count('1')
for i in self.ramdump.iter_cpus():
self.get_cpu_stats(cpu_stats_base, i)
def get_debug_phys(self):
lpm_debug_phys = self.ramdump.address_of('lpm_debug_phys')
if lpm_debug_phys is None:
self.output.append("NOTE: 'lpm_debug data' not found\n")
return
lpm_debug_phys = self.ramdump.read_word(lpm_debug_phys, True)
for i in range(0, 256):
debug = []
addr = lpm_debug_phys + i * self.ramdump.sizeof('struct lpm_debug')
offset = self.ramdump.field_offset('struct lpm_debug', 'time')
time = self.ramdump.read_word(addr + offset, False)
debug.append(time)
offset = self.ramdump.field_offset('struct lpm_debug', 'evt')
evt = self.ramdump.read_int(addr + offset, False)
debug.append(evt)
offset = self.ramdump.field_offset('struct lpm_debug', 'cpu')
cpu = self.ramdump.read_int(addr + offset, False)
debug.append(cpu)
offset = self.ramdump.field_offset('struct lpm_debug', 'arg1')
arg1 = self.ramdump.read_int(addr + offset, False)
debug.append(arg1)
offset = self.ramdump.field_offset('struct lpm_debug', 'arg2')
arg2 = self.ramdump.read_int(addr + offset, False)
debug.append(arg2)
offset = self.ramdump.field_offset('struct lpm_debug', 'arg3')
arg3 = self.ramdump.read_int(addr + offset, False)
debug.append(arg3)
offset = self.ramdump.field_offset('struct lpm_debug', 'arg4')
arg4 = self.ramdump.read_int(addr + offset, False)
debug.append(arg4)
self.lpm_debug.append(debug)
def print_debug_phys(self):
debug = []
lpm_debug = []
self.output.append("\n")
self.output.append("{:16}".format("TimeStamp"))
self.output.append("{:8} {:8} {:8} ".format("Event", "CPU", "arg1"))
self.output.append("{:16}{:16}{:16}\n".format("arg2", "arg3", "arg4"))
self.output.append("{}{}".format("-" * 120, "\n"))
lpm_debug = sorted(self.lpm_debug, key=itemgetter(0))
for i in range(len(lpm_debug)):
debug = lpm_debug[i]
for j in range(len(debug)):
if j == 0 or j > 3:
self.output.append("{:16}".format(hex(debug[j]).rstrip("L")))
else:
self.output.append("{}{:8}".format(debug[j], ""))
self.output.append("\n")
def get_cpuidle_usage_details(self, state_usage_addr):
usage_stats = OrderedDict()
usage_stats['disable'] = self.ramdump.read_structure_field(state_usage_addr,
'struct cpuidle_state_usage',
'disabled')
usage_stats['usage'] = self.ramdump.read_structure_field(state_usage_addr,
'struct cpuidle_state_usage',
'usage')
usage_stats['time_ns'] = self.ramdump.read_structure_field(state_usage_addr,
'struct cpuidle_state_usage',
'time_ns')
usage_stats['rejected'] = self.ramdump.read_structure_field(state_usage_addr,
'struct cpuidle_state_usage',
'rejected')
usage_stats['s2idle_usage'] = self.ramdump.read_structure_field(state_usage_addr,
'struct cpuidle_state_usage',
's2idle_usage')
usage_stats['s2idle_time'] = self.ramdump.read_structure_field(state_usage_addr,
'struct cpuidle_state_usage',
's2idle_time')
return usage_stats
def print_state_description(self, data):
self.output.append("\n\n")
self.output.append("CPUIdle States Description\n\n")
self.output.append("{}{}".format("-" * 120, "\n"))
self.output.append(" {:^30} {:^50}\n".format("State", "Description"))
for key, value in data.items():
self.output.append("{:^30} {:^50}\n".format(key, value))
def print_cpuidle_stats(self, data):
self.output.append("\n\n")
self.output.append("CPUIdle Statistics\n\n")
self.output.append("{:^8}{:^20}{:^16}{:^16}{:^25}{:^16}{:^16}{:^16}\n".format("CPU",
"State", "Disabled", "Usage", "Time_ns",
"Rejected", "S2idle_usage", "S2idle_time"))
self.output.append("{}{}".format("-" * 120, "\n"))
for cpu in data.keys():
self.output.append("{:^8}\n".format(cpu))
for state in data[cpu].keys():
self.output.append("{:^8}".format(""))
self.output.append("{:^20}".format(state))
for key, value in data[cpu][state].items():
if not value:
value = "0"
if key == "time_ns":
self.output.append("{:^25}".format(value))
else:
self.output.append("{:^16}".format(value))
self.output.append("\n")
self.output.append("\n{}{}".format("-" * 120, "\n"))
def get_cpuidle_statistics(self):
try:
# Read cpuidle_device
cpuidle_devices = self.ramdump.address_of('cpuidle_devices')
cpuidle_drivers = self.ramdump.address_of('cpuidle_drivers')
usage_details = {}
state_dict = OrderedDict()
for i in self.ramdump.iter_cpus():
# Check for current cpu
cpuidle_dev = self.ramdump.read_word(cpuidle_devices, cpu=i)
cpuidle_drv = self.ramdump.read_word(cpuidle_drivers, cpu=i)
state_count = self.ramdump.read_structure_field(cpuidle_drv,
'struct cpuidle_driver',
'state_count')
cpuidle_drv_states = cpuidle_drv + self.ramdump.field_offset(
'struct cpuidle_driver',
'states')
if (self.ramdump.kernel_version >= (5, 10, 0)):
cpuidle_kobj_offset = self.ramdump.field_offset(
'struct cpuidle_device',
'kobjs')
kobjs_base = cpuidle_dev + cpuidle_kobj_offset
cpu = "CPU{}".format(i)
usage_details[cpu] = {}
for state_idx in range(state_count):
if state_count < 10: # CPUIDLE_STATE_MAX
temp = kobjs_base + state_idx * self.ramdump.sizeof('void*')
cpuidle_state_usage_base = self.ramdump.read_u64(kobjs_base + state_idx * self.ramdump.sizeof('void*'))
if cpuidle_state_usage_base != 0x0:
state_usage_offset = cpuidle_state_usage_base + self.ramdump.field_offset('struct cpuidle_state_kobj','state_usage')
state_usage_addr = self.ramdump.read_u64(state_usage_offset)
cpuidle_drv_offset = cpuidle_state_usage_base + self.ramdump.field_offset('struct cpuidle_state_kobj','state')
cpuidle_drv_state = self.ramdump.read_u64(cpuidle_drv_offset)
state_name = self.ramdump.read_cstring(cpuidle_drv_state, 16)
desc_offset = self.ramdump.field_offset('struct cpuidle_state', 'desc')
state_desc = self.ramdump.read_cstring(cpuidle_drv_state + desc_offset, 32)
# Usage data per cpu per state
state = "state{}({})".format(state_idx, state_name)
state_dict[state_name] = state_desc
usage_details[cpu][state] = self.get_cpuidle_usage_details(state_usage_addr)
else:
usage_details[cpu][state] = 0
else:
break
else:
cpuidle_state_usage_offset = self.ramdump.field_offset(
'struct cpuidle_device',
'states_usage')
cpuidle_state_usage_base = cpuidle_dev + cpuidle_state_usage_offset
cpu = "CPU{}".format(i)
usage_details[cpu] = {}
for state in range(state_count):
if state_count < 10: # CPUIDLE_STATE_MAX
state_usage_addr = cpuidle_state_usage_base + \
state * self.ramdump.sizeof('struct cpuidle_state_usage')
cpuidle_drv_state = cpuidle_drv_states + \
state * self.ramdump.sizeof('struct cpuidle_state')
state_name = self.ramdump.read_cstring(cpuidle_drv_state, 16)
desc_offset = self.ramdump.field_offset('struct cpuidle_state', 'desc')
state_desc = self.ramdump.read_cstring(cpuidle_drv_state + desc_offset, 32)
# Usage data per cpu per state
state = "state{}({})".format(state, state_name)
state_dict[state_name] = state_desc
usage_details[cpu][state] = self.get_cpuidle_usage_details(state_usage_addr)
else:
break
self.print_state_description(state_dict)
self.print_cpuidle_stats(usage_details)
except Exception as err:
self.output.append("\nUnable to extract CPUIdle Statistics\n\n")
self.output.append("\n{}{}".format("-" * 120, "\n"))
def parse(self):
self.output_file = self.ramdump.open_file('lpm.txt')
self.get_bits()
self.get_lpm()
self.get_stats()
self.get_debug_phys()
self.print_debug_phys()
self.print_pm_domain_info()
self.get_cpuidle_statistics()
for i in self.output:
self.output_file.write(i)
self.output_file.close()

View File

@@ -0,0 +1,198 @@
"""
Copyright (c) 2016, 2020 The Linux Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of The Linux Foundation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Changes from Qualcomm Innovation Center are provided under the following license:
Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause-Clear
"""
from parser_util import register_parser, RamParser
from parser_util import cleanupString
TASK_NAME_LENGTH = 16
def do_dump_lsof_info(self, ramdump, lsof_info):
for task_struct in ramdump.for_each_process():
file_descriptor = []
task_comm_offset = ramdump.field_offset('struct task_struct', 'comm')
client_name = ramdump.read_cstring(task_struct + task_comm_offset, TASK_NAME_LENGTH)
task_pid = ramdump.read_structure_field(task_struct, 'struct task_struct', 'pid')
files = ramdump.read_structure_field(task_struct, 'struct task_struct', 'files')
file_descriptor.append(files)
str_task_file = '\n Task: 0x{0:x}, comm: {1}, pid : {2:1}, files : 0x{3:x}'
lsof_info.write(str_task_file.format(task_struct, client_name, task_pid, files))
parse_task(self, ramdump, task_struct, lsof_info)
for curr in ramdump.for_each_thread(task_struct):
file_pointer = ramdump.read_structure_field(curr, 'struct task_struct', 'files')
#skip if fd is same as parent process or other child process or fd is Null
if ((len(file_descriptor) and file_pointer in file_descriptor) or file_pointer == 0x0):
continue
else:
file_descriptor.append((file_pointer))
str_task_file = '\n Thread: 0x{0:x}, thread_name: {1}, thread_pid : {2:1}, thread_files : 0x{3:x}'
lsof_info.write(str_task_file.format(curr,
ramdump.read_cstring(curr + task_comm_offset, TASK_NAME_LENGTH),
ramdump.read_structure_field(curr, 'struct task_struct', 'pid'),
file_pointer))
parse_task(self, ramdump, curr, lsof_info)
lsof_info.write("\n*********************************")
def get_dname_of_dentry(ramdump, dentry):
dentry_name_offset = ramdump.field_offset(
'struct dentry', 'd_name')
len_offset = ramdump.field_offset(
'struct qstr', 'len')
qst_name_offset = ramdump.field_offset(
'struct qstr', 'name')
name_address = ramdump.read_word(dentry + dentry_name_offset + qst_name_offset)
len_address = dentry + dentry_name_offset + len_offset
len = ramdump.read_u32(len_address)
name = cleanupString(ramdump.read_cstring(
name_address, len))
return name
def get_pathname_by_file(ramdump, file):
f_pathoffset = ramdump.field_offset(
'struct file', 'f_path')
f_path = f_pathoffset + file
mnt_offset_in_path = ramdump.field_offset('struct path', 'mnt')
mnt = ramdump.read_word(f_path + mnt_offset_in_path)
mnt_offset_in_mount = ramdump.field_offset('struct mount', 'mnt')
mnt_parent_offset = ramdump.field_offset('struct mount', 'mnt_parent')
mount = mnt - mnt_offset_in_mount
mnt_mountpoint_offset = ramdump.field_offset(
'struct mount', 'mnt_mountpoint')
d_parent_offset = ramdump.field_offset(
'struct dentry', 'd_parent')
mnt_parent_pre = 0
mnt_parent = mount
mount_name = []
while mnt_parent_pre != mnt_parent:
mnt_parent_pre = mnt_parent
mnt_mountpoint = ramdump.read_word(mnt_parent + mnt_mountpoint_offset)
name = get_dname_of_dentry(ramdump, mnt_mountpoint)
mnt_parent = ramdump.read_word(mnt_parent + mnt_parent_offset)
if name == None or name == '/':
break
if mnt_parent == 0:
break
mount_name.append(name)
# walk to get the fullname of mountpoint
d_parent = ramdump.read_word(mnt_mountpoint + d_parent_offset)
d_parent_pre = 0
while d_parent_pre != d_parent:
d_parent_pre = d_parent
name = get_dname_of_dentry(ramdump, d_parent)
d_parent = ramdump.read_word(d_parent + d_parent_offset)
if name == None or name == '/':
break
mount_name.append(name)
if d_parent == 0:
break
dentry = ramdump.read_structure_field(
f_path, 'struct path', 'dentry')
d_parent = dentry
d_parent_pre = 0
names = []
while d_parent_pre != d_parent:
d_parent_pre = d_parent
name = get_dname_of_dentry(ramdump, d_parent)
d_parent = ramdump.read_word(d_parent + d_parent_offset)
if name == None or name == '/':
break
names.append(name)
if d_parent == 0:
break
full_name = ''
for item in mount_name:
names.append(item)
names.reverse()
for item in names:
full_name += '/' + item
return full_name
def parse_task(self, ramdump, task, lsof_info):
index = 0
if self.ramdump.arm64:
addressspace = 8
else:
addressspace = 4
files = ramdump.read_structure_field(
task, 'struct task_struct', 'files')
if files == 0x0:
return
fdt = ramdump.read_structure_field(
files, 'struct files_struct', 'fdt')
max_fds = ramdump.read_structure_field(
fdt, 'struct fdtable', 'max_fds')
fd = ramdump.read_structure_field(
fdt, 'struct fdtable', 'fd')
ion_str = "\n {0:8d} file : 0x{1:16x} {2:32s} {3:32s} client : 0x{4:x}"
str = "\n {0:8d} file : 0x{1:16x} {2:32s} {3:32s}"
while index < max_fds:
file = ramdump.read_word(fd + (index * addressspace))
if file != 0:
fop = ramdump.read_structure_field(
file, 'struct file', 'f_op')
priv_data = ramdump.read_structure_field(
file, 'struct file', 'private_data')
look = ramdump.unwind_lookup(fop)
if look is None:
index = index + 1
continue
fop, offset = look
iname = get_pathname_by_file(ramdump, file)
if fop.find("ion_fops", 0, 8) != -1:
lsof_info.write(ion_str.format(
index, file, fop, iname, priv_data))
else:
lsof_info.write(str.format(index, file, fop, iname))
index = index + 1
return
@register_parser('--print-lsof', 'Print list of open files', optional=True)
class DumpLsof(RamParser):
def parse(self):
with self.ramdump.open_file('lsof.txt') as lsof_info:
if (self.ramdump.kernel_version < (3, 18, 0)):
lsof_info.write('Kernel version 3.18 \
and above are supported, current version {0}.\
{1}'.format(self.ramdump.kernel_version[0],
self.ramdump.kernel_version[1]))
return
do_dump_lsof_info(self, self.ramdump, lsof_info)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,146 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
from parser_util import register_parser, RamParser, cleanupString
class memory_area:
def __init__(self, name, base, size):
self.name = name
self.base = base
self.size = size
@register_parser('--print-memory_map', 'Print memory_map information')
class memory_map(RamParser):
def get_reserved_mem(self, ramdump, list_memory_area):
reserved_mem_addr = ramdump.address_of('reserved_mem')
reserved_mem_count_addr = ramdump.address_of('reserved_mem_count')
reserved_mem_count = ramdump.read_int(reserved_mem_count_addr)
base_offset = ramdump.field_offset('struct reserved_mem', 'base')
size_offset = ramdump.field_offset('struct reserved_mem', 'size')
for i in range(0, reserved_mem_count):
addr_index = ramdump.array_index(reserved_mem_addr, 'struct reserved_mem', i)
name = ramdump.read_structure_cstring(addr_index, 'struct reserved_mem', 'name')
if ramdump.arm64:
base = ramdump.read_u64(addr_index + base_offset)
size = ramdump.read_word(addr_index + size_offset)
else:
base = ramdump.read_u32(addr_index + base_offset)
size = ramdump.read_u32(addr_index + size_offset)
memory_area_instance = memory_area(name, base, size)
list_memory_area.append(memory_area_instance)
list_memory_area.sort(key=lambda c: c.base)
def get_kernel_resource(self, list_memory_area):
mem_res_mem_addr = self.ramdump.address_of('mem_res')
start_offset = self.ramdump.field_offset('struct resource', 'start')
end_offset = self.ramdump.field_offset('struct resource', 'end')
start = 0
end = 0
for i in range(0, 2):
mem_res_mem_addr_index = self.ramdump.array_index(mem_res_mem_addr, 'struct resource', i)
name = self.ramdump.read_structure_cstring(mem_res_mem_addr_index, 'struct resource', 'name')
if self.ramdump.arm64:
start = self.ramdump.read_u64(mem_res_mem_addr_index + start_offset)
end = self.ramdump.read_u64(mem_res_mem_addr_index + end_offset)
else:
start = self.ramdump.read_u32(mem_res_mem_addr_index + start_offset)
end = self.ramdump.read_u32(mem_res_mem_addr_index + end_offset)
memory_area_instance = memory_area(name, start, end - start)
list_memory_area.append(memory_area_instance)
def get_cma_areas(self, ramdump, list_memory_area):
cma_area_count = ramdump.read_u32('cma_area_count')
cma_area_base_addr = ramdump.address_of('cma_areas')
for cma_index in range(0, cma_area_count):
cma_area = ramdump.array_index(cma_area_base_addr, 'struct cma', cma_index)
base_pfn = ramdump.read_structure_field(
cma_area, 'struct cma', 'base_pfn')
cma_size = ramdump.read_structure_field(
cma_area, 'struct cma', 'count')
if (ramdump.kernel_version >= (5, 10, 0)):
name_addr_offset = ramdump.field_offset('struct cma', 'name')
name_addr = (cma_area + name_addr_offset)
name = ramdump.read_cstring(name_addr, 64)
else:
name_addr = ramdump.read_structure_field(cma_area, 'struct cma', 'name')
name = ramdump.read_cstring(name_addr, 48)
memory_area_instance = memory_area(name, base_pfn << 12, cma_size << 12)
if any(x.base == memory_area_instance.base for x in list_memory_area) == False:
list_memory_area.append(memory_area_instance)
def get_memory_block(self, ramdump, list_memory_area):
cnt = ramdump.read_structure_field('memblock', 'struct memblock', 'memory.cnt')
total_size = ramdump.read_structure_field('memblock', 'struct memblock', 'memory.total_size')
region = ramdump.read_structure_field('memblock', 'struct memblock', 'memory.regions')
for i in range(cnt):
start = ramdump.read_structure_field(region, 'struct memblock_region', 'base')
size = ramdump.read_structure_field(region, 'struct memblock_region', 'size')
memory_area_instance = memory_area("memory", start, size)
if any(x.base == memory_area_instance.base for x in list_memory_area) == False:
list_memory_area.append(memory_area_instance)
cnt = ramdump.read_structure_field('memblock', 'struct memblock', 'reserved.cnt')
region = ramdump.read_structure_field('memblock', 'struct memblock', 'reserved.regions')
for i in range(cnt):
start = ramdump.read_structure_field(region, 'struct memblock_region', 'base')
size = ramdump.read_structure_field(region, 'struct memblock_region', 'size')
memory_area_instance = memory_area("reserved", start, size)
if any(x.base == memory_area_instance.base for x in list_memory_area) == False:
list_memory_area.append(memory_area_instance)
def get_iomem_resource(self, ramdump, list_memory_area):
iomem_resource_addr = ramdump.address_of('iomem_resource')
iomem_resource_start = iomem_resource_addr
start = ramdump.read_structure_field(iomem_resource_start, 'struct resource', 'start')
end = ramdump.read_structure_field(iomem_resource_start, 'struct resource', 'end')
offset_name = ramdump.field_offset('struct resource', 'name')
sibling = ramdump.read_structure_field(iomem_resource_start, 'struct resource', 'sibling')
child = ramdump.read_structure_field(iomem_resource_start, 'struct resource', 'child')
name_address = ramdump.read_pointer(iomem_resource_start + offset_name)
name = cleanupString(ramdump.read_cstring(name_address, 16))
iomem_resource_start = child
while iomem_resource_start != 0:
start = ramdump.read_structure_field(iomem_resource_start, 'struct resource', 'start')
end = ramdump.read_structure_field(iomem_resource_start, 'struct resource', 'end')
offset_name = ramdump.field_offset('struct resource', 'name')
sibling = ramdump.read_structure_field(iomem_resource_start, 'struct resource', 'sibling')
child = ramdump.read_structure_field(iomem_resource_start, 'struct resource', 'child')
name_address = ramdump.read_pointer(iomem_resource_start + offset_name)
name = cleanupString(ramdump.read_cstring(name_address))
memory_area_instance = memory_area(name, start, end - start)
list_memory_area.append(memory_area_instance)
if sibling == 0:
break
iomem_resource_start = sibling
def parse(self):
list_memory_area = []
self.get_reserved_mem(self.ramdump, list_memory_area)
self.get_cma_areas(self.ramdump, list_memory_area)
self.get_kernel_resource(list_memory_area)
self.get_memory_block(self.ramdump, list_memory_area)
self.get_iomem_resource(self.ramdump, list_memory_area)
fmap = open(self.ramdump.outdir + "/memory_map.txt", "w")
print("name base end size size in KB\n", file = fmap)
new_list = sorted(list_memory_area, key=lambda c: c.base, reverse=False)
for i in range(len(new_list)):
memory_area_instance = new_list[i]
print("----------------------------------------------------------------------------------------------------------------------------------------------",
file=fmap)
print("%-64s 0x%-16x 0x%-16x 0x%-16x %16d" % (memory_area_instance.name,
memory_area_instance.base,
memory_area_instance.base + memory_area_instance.size,
memory_area_instance.size, memory_area_instance.size / 1024),
file=fmap)
fmap.close()

View File

@@ -0,0 +1,365 @@
# Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser
import os
import linux_list as llist
import linux_radix_tree
VM_ALLOC = 0x00000002
@register_parser('--print-memstat', 'Print memory stats ')
class MemStats(RamParser):
def __init__(self, dump):
super(MemStats, self).__init__(dump)
if (self.ramdump.kernel_version >= (5, 4)):
self.zram_dev_rtw = linux_radix_tree.RadixTreeWalker(self.ramdump)
self.zram_mem_mb = 0
def list_func(self, vmlist):
vm = self.ramdump.read_word(vmlist + self.vm_offset)
if vm is None:
return
pages = self.ramdump.read_structure_field(
vm, 'struct vm_struct', 'nr_pages')
vm_flags = self.ramdump.read_structure_field(
vm, 'struct vm_struct', 'flags')
if vm_flags is None:
return
if (vm_flags & VM_ALLOC == VM_ALLOC):
self.vmalloc_size = self.vmalloc_size + pages
def pages_to_mb(self, pages):
val = 0
if pages != 0:
val = ((pages * 4) // 1024)
return val
def bytes_to_mb(self, bytes):
val = 0
if bytes != 0:
val = (bytes // 1024) // 1024
return val
def pages_to_mb(self, pages):
val = 0
if pages != 0:
val = (pages * 4) // 1024
return val
def calculate_vmalloc(self):
if self.ramdump.address_of('nr_vmalloc_pages') is None:
next_offset = self.ramdump.field_offset('struct vmap_area', 'list')
vmlist = self.ramdump.read_word('vmap_area_list')
vm_offset = self.ramdump.field_offset('struct vmap_area', 'vm')
self.vm_offset = vm_offset
list_walker = llist.ListWalker(self.ramdump, vmlist, next_offset)
list_walker.walk(vmlist, self.list_func)
self.vmalloc_size = self.pages_to_mb(self.vmalloc_size)
else:
val = self.ramdump.read_word('nr_vmalloc_pages')
self.vmalloc_size = self.pages_to_mb(val)
def calculate_vm_stat(self):
# Other memory : NR_ANON_PAGES + NR_FILE_PAGES + NR_PAGETABLE \
# + NR_KERNEL_STACK - NR_SWAPCACHE
vmstat_anon_pages = self.ramdump.read_word(
'vm_stat[NR_ANON_PAGES]')
vmstat_file_pages = self.ramdump.read_word(
'vm_stat[NR_FILE_PAGES]')
vmstat_pagetbl = self.ramdump.read_word(
'vm_stat[NR_PAGETABLE]')
vmstat_kernelstack = self.ramdump.read_word(
'vm_stat[NR_KERNEL_STACK]')
vmstat_swapcache = self.ramdump.read_word(
'vm_stat[NR_SWAPCACHE]')
other_mem = (vmstat_anon_pages + vmstat_file_pages + vmstat_pagetbl +
vmstat_kernelstack - vmstat_swapcache)
other_mem = self.pages_to_mb(other_mem)
return other_mem
def calculate_cached(self):
if self.ramdump.kernel_version >= (4, 9):
vmstat_file_pages = self.ramdump.read_word(
'vm_node_stat[NR_FILE_PAGES]')
cached = self.pages_to_mb(vmstat_file_pages)
else:
vmstat_file_pages = self.ramdump.read_word(
'vm_stat[NR_FILE_PAGES]')
cached = self.pages_to_mb(vmstat_file_pages)
return cached
def calculate_vm_node_zone_stat(self):
# Other memory : NR_ANON_MAPPED + NR_FILE_PAGES + NR_PAGETABLE \
# + NR_KERNEL_STACK_KB
vmstat_anon_pages = self.ramdump.read_word(
'vm_node_stat[NR_ANON_MAPPED]')
vmstat_file_pages = self.ramdump.read_word(
'vm_node_stat[NR_FILE_PAGES]')
if self.ramdump.kernel_version >= (5, 15):
vmstat_pagetbl = self.ramdump.read_word(
'vm_node_stat[NR_PAGETABLE]')
vmstat_kernelstack = self.ramdump.read_word(
'vm_node_stat[NR_KERNEL_STACK_KB]')
else:
vmstat_pagetbl = self.ramdump.read_word(
'vm_zone_stat[NR_PAGETABLE]')
vmstat_kernelstack = self.ramdump.read_word(
'vm_zone_stat[NR_KERNEL_STACK_KB]')
other_mem = (vmstat_anon_pages + vmstat_file_pages + vmstat_pagetbl +
(vmstat_kernelstack // 4))
other_mem = self.pages_to_mb(other_mem)
return other_mem
def calculate_ionmem(self):
if self.ramdump.kernel_version >= (5, 10):
grandtotal = 0
elif self.ramdump.kernel_version >= (5, 4):
grandtotal = self.ramdump.read_u64('total_heap_bytes')
else:
number_of_ion_heaps = self.ramdump.read_int('num_heaps')
heap_addr = self.ramdump.read_word('heaps')
offset_total_allocated = \
self.ramdump.field_offset(
'struct ion_heap', 'total_allocated')
size = self.ramdump.sizeof(
'((struct ion_heap *)0x0)->total_allocated')
if offset_total_allocated is None:
return "ion buffer debugging change is not there in this kernel"
if self.ramdump.arm64:
addressspace = 8
else:
addressspace = 4
heap_addr_array = []
grandtotal = 0
for i in range(0, number_of_ion_heaps):
heap_addr_array.append(heap_addr + i * addressspace)
temp = self.ramdump.read_word(heap_addr_array[i])
if size == 4:
total_allocated = self.ramdump.read_int(
temp + offset_total_allocated)
if size == 8:
total_allocated = self.ramdump.read_u64(
temp + offset_total_allocated)
if total_allocated is None:
total_allocated = 0
break
grandtotal = grandtotal + total_allocated
grandtotal = self.bytes_to_mb(grandtotal)
return grandtotal
def calculate_zram_dev_mem_allocated(self, zram):
mem_pool = zram + self.ramdump.field_offset('struct zram', 'mem_pool')
mem_pool = self.ramdump.read_word(mem_pool)
pages_allocated = mem_pool + self.ramdump.field_offset('struct zs_pool',
'pages_allocated')
stat_val = self.ramdump.read_word(pages_allocated)
if stat_val is None:
stat_val = 0
else:
stat_val = self.pages_to_mb(stat_val)
self.zram_mem_mb += stat_val
def print_mem_stats(self, out_mem_stat):
# Total memory
if(self.ramdump.kernel_version > (4, 20, 0)):
total_mem = self.ramdump.read_word('_totalram_pages')
else:
total_mem = self.ramdump.read_word('totalram_pages')
total_mem = self.pages_to_mb(total_mem)
if (self.ramdump.kernel_version < (4, 9, 0)):
# Free Memory
total_free = self.ramdump.read_word('vm_stat[NR_FREE_PAGES]')
total_free = self.pages_to_mb(total_free)
# slab Memory
slab_rec = \
self.ramdump.read_word('vm_stat[NR_SLAB_RECLAIMABLE]')
slab_unrec = \
self.ramdump.read_word('vm_stat[NR_SLAB_UNRECLAIMABLE]')
total_slab = self.pages_to_mb(slab_rec + slab_unrec)
#others
other_mem = self.calculate_vm_stat()
else:
# Free Memory
total_free = self.ramdump.read_word('vm_zone_stat[NR_FREE_PAGES]')
total_free = self.pages_to_mb(total_free)
# slab Memory
if self.ramdump.kernel_version >= (5, 10):
slab_rec = self.ramdump.read_word(
'vm_node_stat[NR_SLAB_RECLAIMABLE_B]')
slab_unrec = self.ramdump.read_word(
'vm_node_stat[NR_SLAB_UNRECLAIMABLE_B]')
elif (self.ramdump.kernel_version >= (4, 14)):
slab_rec = self.ramdump.read_word(
'vm_node_stat[NR_SLAB_RECLAIMABLE]')
slab_unrec = self.ramdump.read_word(
'vm_node_stat[NR_SLAB_UNRECLAIMABLE]')
else:
slab_rec = self.ramdump.read_word(
'vm_zone_stat[NR_SLAB_RECLAIMABLE]')
slab_unrec = self.ramdump.read_word(
'vm_zone_stat[NR_SLAB_UNRECLAIMABLE]')
total_slab = self.pages_to_mb(slab_rec + slab_unrec)
# others
other_mem = self.calculate_vm_node_zone_stat()
cached = self.calculate_cached()
# ion memory
ion_mem = self.calculate_ionmem()
# kgsl memory
# Duplicates gpuinfo_510.py@parse_kgsl_mem()'s 'KGSL Total'
try:
kgsl_memory = self.ramdump.read_word(
'kgsl_driver.stats.page_alloc')
kgsl_memory += self.ramdump.read_word(
'kgsl_driver.stats.coherent')
kgsl_memory += self.ramdump.read_word(
'kgsl_driver.stats.secure')
kgsl_memory = self.bytes_to_mb(kgsl_memory)
except TypeError as e:
out_mem_stat.write("Failed to retrieve total kgsl memory\n")
kgsl_memory = 0
# zcompressed ram
if self.ramdump.kernel_version >= (4, 4):
if self.ramdump.kernel_version >= (4, 14):
zram_index_idr = self.ramdump.address_of('zram_index_idr')
else:
zram_index_idr = self.ramdump.read_word('zram_index_idr')
if zram_index_idr is None:
stat_val = 0
else:
#'struct radix_tree_root' was replaced by 'struct xarray' on kernel 5.4+
if self.ramdump.kernel_version >= (5, 4):
self.zram_dev_rtw.walk_radix_tree(zram_index_idr,
self.calculate_zram_dev_mem_allocated)
stat_val = self.zram_mem_mb
else:
if self.ramdump.kernel_version >= (4, 14):
idr_layer_ary_offset = self.ramdump.field_offset(
'struct radix_tree_root', 'rnode')
idr_layer_ary = self.ramdump.read_word(zram_index_idr +
idr_layer_ary_offset)
else:
idr_layer_ary_offset = self.ramdump.field_offset(
'struct idr_layer', 'ary')
idr_layer_ary = self.ramdump.read_word(zram_index_idr +
idr_layer_ary_offset)
try:
zram_meta = idr_layer_ary + self.ramdump.field_offset(
'struct zram', 'meta')
zram_meta = self.ramdump.read_word(zram_meta)
mem_pool = zram_meta + self.ramdump.field_offset(
'struct zram_meta', 'mem_pool')
mem_pool = self.ramdump.read_word(mem_pool)
except TypeError:
mem_pool = idr_layer_ary + self.ramdump.field_offset(
'struct zram', 'mem_pool')
mem_pool = self.ramdump.read_word(mem_pool)
if mem_pool is None:
stat_val = 0
else:
page_allocated = mem_pool + self.ramdump.field_offset(
'struct zs_pool', 'pages_allocated')
stat_val = self.ramdump.read_word(page_allocated)
if stat_val is None:
stat_val = 0
stat_val = self.pages_to_mb(stat_val)
else:
zram_devices_word = self.ramdump.read_word('zram_devices')
if zram_devices_word is not None:
zram_devices_stat_offset = self.ramdump.field_offset(
'struct zram', 'stats')
stat_addr = zram_devices_word + zram_devices_stat_offset
stat_val = self.ramdump.read_u64(stat_addr)
stat_val = self.bytes_to_mb(stat_val)
else:
stat_val = 0
self.out_mem_stat = out_mem_stat
self.vmalloc_size = 0
# vmalloc area
self.calculate_vmalloc()
# Output prints
out_mem_stat.write('{0:30}: {1:8} MB'.format(
"Total RAM", total_mem))
out_mem_stat.write('\n{0:30}: {1:8} MB\n'.format(
"Free memory:", total_free))
out_mem_stat.write('\n{0:30}: {1:8} MB'.format(
"Total Slab memory:", total_slab))
if self.ramdump.kernel_version >= (5, 10):
log_location = os.path.dirname(out_mem_stat.name)
try:
dma_heap_file = os.path.join(log_location, "total_dma_heap.txt")
if os.path.isfile(dma_heap_file):
fin = open(dma_heap_file, 'r')
fin_list = fin.readlines()
fin.close()
ion_mem = int(fin_list[0].split(" ")[-1].replace("MB", ""))
out_mem_stat.write("\n{0:30}: {1:8} MB".format("Total DMA memory", ion_mem))
else:
out_mem_stat.write("\n{0:30}: Please parse ionbuffer first, use --print-ionbuffer.".format(
"Total DMA memory"))
except:
ion_mem = "Please refer total_dma_heap.txt"
out_mem_stat.write('\nTotal ion memory: Please refer total_dma_heap.txt')
else:
out_mem_stat.write('\n{0:30}: {1:8} MB'.format(
"Total ion memory:", ion_mem))
out_mem_stat.write('\n{0:30}: {1:8} MB'.format(
"KGSL ", kgsl_memory))
out_mem_stat.write('\n{0:30}: {1:8} MB'.format(
"ZRAM compressed ", stat_val))
out_mem_stat.write('\n{0:30}: {1:8} MB'.format(
"vmalloc ", self.vmalloc_size))
out_mem_stat.write('\n{0:30}: {1:8} MB'.format(
"Others ", other_mem))
out_mem_stat.write('\n{0:30}: {1:8} MB'.format(
"Cached ",cached))
if type(ion_mem) is str:
accounted_mem = total_free + total_slab + kgsl_memory + stat_val + \
self.vmalloc_size + other_mem
else:
accounted_mem = total_free + total_slab + ion_mem + kgsl_memory + \
stat_val + self.vmalloc_size + other_mem
unaccounted_mem = total_mem - accounted_mem
out_mem_stat.write('\n\n{0:30}: {1:8} MB'.format(
"Total Unaccounted Memory ",unaccounted_mem))
def parse(self):
with self.ramdump.open_file('mem_stat.txt') as out_mem_stat:
if (self.ramdump.kernel_version < (3, 18, 0)):
out_mem_stat.write('Kernel version 3.18 \
and above are supported, current version {0}.\
{1}'.format(self.ramdump.kernel_version[0],
self.ramdump.kernel_version[1]))
return
self.print_mem_stats(out_mem_stat)

View File

@@ -0,0 +1,222 @@
# Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from print_out import print_out_str
from parser_util import register_parser, RamParser, cleanupString
from linux_list import ListWalker
from parsers.filetracking import FileTracking
""" Returns number of pages """
def get_shmem_swap_usage(ramdump, memory_file):
shmem_swaplist = ramdump.address_of("shmem_swaplist")
if not shmem_swaplist:
return 0
offset = ramdump.field_offset('struct shmem_inode_info', 'swaplist')
if not offset:
return 0
inode_offset = ramdump.field_offset('struct shmem_inode_info', 'vfs_inode')
if not inode_offset:
return 0
iter = ListWalker(ramdump, shmem_swaplist, offset)
total = 0
seen = {}
for shmem_inode_info in iter:
swap_pages = ramdump.read_structure_field(
shmem_inode_info, 'struct shmem_inode_info', 'swapped')
if swap_pages is None:
print_out_str("Invalid addr is found: {}".format(hex(shmem_inode_info)))
break
inode = shmem_inode_info + inode_offset
addres_space = ramdump.read_structure_field(inode, 'struct inode',
'i_mapping')
if addres_space in seen:
seen[addres_space] = seen[addres_space] + swap_pages
else:
seen[addres_space] = swap_pages
total += swap_pages
sortlist = sorted(seen.items(), key=lambda kv: kv[1],
reverse=True)
i = 0
string = "TOP 3 swapped SHMEM files are:\n"
pathtracking = FileTracking(ramdump)
for k,v in sortlist:
#k is struct address_space
if i < 3:
i = i + 1
addr_space_format = "Address_space 0x{0:x} Allocated {1} pages\n".format(k,v)
string = string + addr_space_format
inode = ramdump.read_structure_field(k, 'struct address_space',
'host')
if inode is not None:
dentry_list = ramdump.read_structure_field(inode, 'struct inode',
'i_dentry')
if dentry_list is not None:
dentry = ramdump.container_of(dentry_list, 'struct dentry',
'd_u')
if dentry is not None:
d_name_ptr = (dentry + ramdump.field_offset('struct dentry ',
'd_name')) + ramdump.field_offset('struct qstr', 'name')
name = ramdump.read_cstring(ramdump.read_pointer(d_name_ptr),
100)
if name is not None:
path, cycle_flag = pathtracking.get_filepath('', name, dentry)
else:
path = 'None'
path = "file name: " + path + '\n'
string = string + path
else:
break
return total,string
def do_dump_process_memory(ramdump):
if ramdump.kernel_version < (4, 9):
total_free = ramdump.read_word('vm_stat[NR_FREE_PAGES]')
slab_rec = ramdump.read_word('vm_stat[NR_SLAB_RECLAIMABLE]')
slab_unrec = ramdump.read_word('vm_stat[NR_SLAB_UNRECLAIMABLE]')
total_shmem = ramdump.read_word('vm_stat[NR_SHMEM]')
else:
total_free = ramdump.read_word('vm_zone_stat[NR_FREE_PAGES]')
# slab memory
if ramdump.kernel_version >= (5, 10):
slab_rec = ramdump.read_word('vm_node_stat[NR_SLAB_RECLAIMABLE_B]')
slab_unrec = ramdump.read_word(
'vm_node_stat[NR_SLAB_UNRECLAIMABLE_B]')
elif ramdump.kernel_version >= (4, 14):
slab_rec = ramdump.read_word('vm_node_stat[NR_SLAB_RECLAIMABLE]')
slab_unrec = ramdump.read_word(
'vm_node_stat[NR_SLAB_UNRECLAIMABLE]')
else:
slab_rec = ramdump.read_word('vm_zone_stat[NR_SLAB_RECLAIMABLE]')
slab_unrec = ramdump.read_word(
'vm_zone_stat[NR_SLAB_UNRECLAIMABLE]')
total_shmem = ramdump.read_word('vm_node_stat[NR_SHMEM]')
memory_file = ramdump.open_file('memory.txt')
total_shmem_swap, shmem_swap_file = get_shmem_swap_usage(ramdump,memory_file)
total_slab = slab_rec + slab_unrec
if(ramdump.kernel_version > (4, 20, 0)):
total_mem = ramdump.read_word('_totalram_pages') * 4
else:
total_mem = ramdump.read_word('totalram_pages') * 4
offset_comm = ramdump.field_offset('struct task_struct', 'comm')
offset_signal = ramdump.field_offset('struct task_struct', 'signal')
offset_adj = ramdump.field_offset('struct signal_struct', 'oom_score_adj')
offset_pid = ramdump.field_offset('struct task_struct', 'pid')
task_info = []
memory_file.write('Total RAM: {0:,}kB\n'.format(total_mem))
memory_file.write('Total free memory: {0:,}kB({1:.1f}%)\n'.format(
total_free * 4, (100.0 * total_free * 4) / total_mem))
memory_file.write('Slab reclaimable: {0:,}kB({1:.1f}%)\n'.format(
slab_rec * 4, (100.0 * slab_rec * 4) / total_mem))
memory_file.write('Slab unreclaimable: {0:,}kB({1:.1f}%)\n'.format(
slab_unrec * 4, (100.0 * slab_unrec * 4) / total_mem))
memory_file.write('Total Slab memory: {0:,}kB({1:.1f}%)\n'.format(
total_slab * 4, (100.0 * total_slab * 4) / total_mem))
memory_file.write('Total SHMEM (PAGECACHE): {0:,}kB({1:.1f}%)\n'.format(
total_shmem * 4, (100.0 * total_shmem * 4) / total_mem))
memory_file.write('Total SHMEM (SWAP): {0:,}kB({1:.1f}%)\n\n'.format(
total_shmem_swap * 4, (100.0 * total_shmem_swap * 4) / total_mem))
memory_file.write('{0}\n'.format(shmem_swap_file))
for task in ramdump.for_each_process():
next_thread_comm = task + offset_comm
thread_task_name = cleanupString(
ramdump.read_cstring(next_thread_comm, 16))
next_thread_pid = task + offset_pid
thread_task_pid = ramdump.read_int(next_thread_pid)
signal_struct = ramdump.read_word(task + offset_signal)
if signal_struct == 0 or signal_struct is None:
continue
adj = ramdump.read_u16(signal_struct + offset_adj)
if adj & 0x8000:
adj = adj - 0x10000
rss, swap, anon_rss, file_rss, shmem_rss = get_rss(ramdump, task)
if rss != 0 or swap != 0:
task_info.append([thread_task_name, thread_task_pid, rss, swap, rss + swap, adj, anon_rss, file_rss, shmem_rss])
task_info = sorted(task_info, key=lambda l: l[4], reverse=True)
str = '{0:<17s}{1:>8s}{2:>19s}{3:>19s}{4:>6}{5:>16}{6:>16}{7:>16}\n'.format(
'Task name', 'PID', 'RSS in kB', 'SWAP in kB', 'ADJ', "anon_rss in kB", "file_rss in kB", "shmem_rss in kB")
memory_file.write(str)
for item in task_info:
str = '{taskname:<17s}{pid:8d}{rss:13,d}({rss_pct:4.1f}%){swap:13,d}({swap_pct:2.1f}%){adj:6} {anon_rss:>16,d} {file_rss:>16,d} {shmem_rss:>10,d}\n'.format(
taskname = item[0], pid = item[1],
rss = item[2], rss_pct = (100.0 * item[2]) / total_mem,
swap = item[3], swap_pct = (100.0 * item[3]) / total_mem,
adj = item[5], anon_rss=item[6], file_rss=item[7], shmem_rss=item[8])
memory_file.write(str)
memory_file.close()
print_out_str('---wrote meminfo to memory.txt')
def percpu_counter_rss_stat(ramdump, rss_stat):
count = rss_stat.count
for core in ramdump.iter_cpus():
count += ramdump.read_int(rss_stat.counters + ramdump.per_cpu_offset(core))
return count
def get_mm_counter(ramdump, rss_stat):
count = rss_stat.count
return count
def get_rss(ramdump, task_struct):
offset_mm = ramdump.field_offset('struct task_struct', 'mm')
offset_rss_stat = ramdump.field_offset('struct mm_struct', 'rss_stat')
mm_struct = ramdump.read_word(task_struct + offset_mm)
if mm_struct == 0:
return 0, 0, 0, 0, 0
if ramdump.kernel_version >= (6, 2):
# /* 6.2: struct percpu_counter rss_stat[NR_MM_COUNTERS] */
mm = ramdump.read_datatype(mm_struct, 'struct mm_struct')
file_rss = get_mm_counter(ramdump, mm.rss_stat[0])
anon_rss = get_mm_counter(ramdump, mm.rss_stat[1])
swap_rss = get_mm_counter(ramdump, mm.rss_stat[2])
shmem_rss = get_mm_counter(ramdump, mm.rss_stat[3])
else:
offset_file_rss = ramdump.field_offset('struct mm_rss_stat', 'count')
offset_anon_rss = ramdump.field_offset('struct mm_rss_stat', 'count[1]')
offset_swap_rss = ramdump.field_offset('struct mm_rss_stat', 'count[2]')
if ramdump.kernel_version >= (4, 9):
offset_shmem_rss = ramdump.field_offset('struct mm_rss_stat', 'count[3]')
anon_rss = ramdump.read_word(mm_struct + offset_rss_stat + offset_anon_rss)
swap_rss = ramdump.read_word(mm_struct + offset_rss_stat + offset_swap_rss)
file_rss = ramdump.read_word(mm_struct + offset_rss_stat + offset_file_rss)
if ramdump.kernel_version >= (4, 9):
shmem_rss = ramdump.read_word(mm_struct + offset_rss_stat + offset_shmem_rss)
else:
shmem_rss = 0
# Ignore negative RSS values
if anon_rss > 0x80000000:
anon_rss = 0
if swap_rss > 0x80000000:
swap_rss = 0
if file_rss > 0x80000000:
file_rss = 0
if shmem_rss > 0x80000000:
shmem_rss = 0
total_rss = anon_rss + file_rss + shmem_rss
return total_rss * 4 , swap_rss * 4 , anon_rss * 4 , file_rss * 4, shmem_rss * 4
@register_parser('--print-memory-info', 'Print memory usage info')
class DumpProcessMemory(RamParser):
def parse(self):
do_dump_process_memory(self.ramdump)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,630 @@
# Copyright (c) 2016 The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import parser_util
from parser_util import register_parser, RamParser
from print_out import print_out_str
@register_parser(
'--print-wlan',
'Print WLAN debugging information(if enabled)',
optional=True)
class ModuleWlan(RamParser):
""" This class defines when WLAN module is loaded """
def __init__(self, *args):
super(ModuleWlan, self).__init__(*args)
self.dir_char = ''
self.dir_char_out = ''
self.opt_dbg = False
self.wlan_path = ''
self.wlan_module_addr = 0
self.wlan_text_addr = 0
self.wlan_data_addr = 0
self.wlan_bss_addr = 0
def convert_dir_for_arg(self, ori):
"""
Convert full path as an argument of function
"""
if self.opt_dbg is True:
print_out_str('** convert_dir_for_arg() **')
if self.dir_char == '':
if parser_util.get_system_type() == 'Linux':
self.dir_char = '/'
self.dir_char_out = '/'
else:
self.dir_char = '\\'
self.dir_char_out = '\\\\'
dst = ''
for c in ori:
if c == self.dir_char:
dst = dst + self.dir_char_out
else:
dst = dst + c
if self.opt_dbg is True:
print_out_str('ori - [{}]'.format(ori))
print_out_str('dst - [{}]'.format(dst))
return dst
def load_wlan_ko(self):
""" Load wlan.ko to GDB """
if self.opt_dbg is True:
print_out_str('** load_wlan_ko() **')
if self.wlan_text_addr == 0:
print_out_str('self.wlan_text_addr is zero')
return False
cmd = 'add-symbol-file '
cmd = cmd + self.convert_dir_for_arg(self.wlan_path)
cmd = cmd + ' {} -s .data {} -s .bss {}'.format(
self.wlan_text_addr, self.wlan_data_addr,
self.wlan_bss_addr)
return self.ramdump.gdbmi._run_for_first(cmd)
def get_sections_of_wlan(self):
"""
Get wlan.ko's sectino addresses
"""
if self.opt_dbg is True:
print_out_str('** get_sections_of_wlan() **')
# Step-A) Find wlan.ko
modules_addr = self.ramdump.address_of('modules')
next_module_addr = self.ramdump.read_structure_field(
modules_addr, 'struct list_head', 'next')
name_offset = self.ramdump.field_offset('struct module', 'name')
module_addr = 0
idx = 0
while modules_addr != next_module_addr:
module_addr = self.ramdump.container_of(
next_module_addr, 'struct module', 'list')
module_name_addr = module_addr + name_offset
module_name_str = self.ramdump.read_cstring(
module_name_addr, 32, True)
if module_name_str == 'wlan':
self.wlan_module_addr = module_addr
break
if self.opt_dbg is True:
print_out_str(
'[{}]th - next_module[{}], module[{}], name[{}]'.format(
hex(idx), hex(next_module_addr),
hex(module_addr), module_name_str))
# move the list entry to the next
next_module_addr = self.ramdump.read_structure_field(
modules_addr, 'struct list_head', 'next')
idx = idx + 1
if self.wlan_module_addr == 0:
print_out_str('[Caution] Fail to find wlan.ko')
return False
# Step-B) get sections in wlan.ko
sect_attrs_addr = self.ramdump.read_structure_field(
module_addr, 'struct module', 'sect_attrs')
nsections = self.ramdump.read_structure_field(
sect_attrs_addr,
'struct module_sect_attrs',
'nsections')
attrs_offset = self.ramdump.field_offset(
'struct module_sect_attrs', 'attrs')
attrs_addr = sect_attrs_addr + attrs_offset
module_sect_attr_size = self.ramdump.sizeof('struct module_sect_attr')
if self.opt_dbg is True:
print_out_str('module_addr : {}'.format(hex(module_addr)))
print_out_str('sect_attrs_addr : {}'.format(hex(sect_attrs_addr)))
print_out_str('nsections : {}'.format(hex(nsections)))
print_out_str('attrs_offset : {}'.format(hex(attrs_offset)))
if attrs_addr is not None:
print_out_str('attrs_addr : {}'.format(hex(attrs_addr)))
else:
print_out_str('attrs_addr : {}'.format(attrs_addr))
section_name_offset = self.ramdump.field_offset(
'struct module_sect_attr', 'name')
idx = 0
while idx < nsections:
section_attr_address = attrs_addr + idx * module_sect_attr_size
section_name_addr = self.ramdump.read_pointer(
section_attr_address + section_name_offset)
section_name_str = self.ramdump.read_cstring(
section_name_addr, 32, True)
section_address = self.ramdump.read_structure_field(
section_attr_address,
'struct module_sect_attr', 'address')
if self.opt_dbg is True:
print_out_str('section[{}]th - name[{}], attr[{}]'.format(
hex(idx), section_name_str, hex(section_address)))
if section_name_str == '.text':
self.wlan_text_addr = section_address
if section_name_str == '.data':
self.wlan_data_addr = section_address
if section_name_str == '.bss':
self.wlan_bss_addr = section_address
idx = idx + 0x1
print_out_str('wlan_text_addr : {}'.format(hex(self.wlan_text_addr)))
print_out_str('wlan_data_addr : {}'.format(hex(self.wlan_data_addr)))
print_out_str('wlan_bss_addr : {}'.format(hex(self.wlan_bss_addr)))
return True
def run(self):
"""
Main
"""
if self.ramdump.arm64 is None:
print_out_str('[Caution] this script supports on ARM64')
return False
if self.ramdump.wlan == "INTEGRATED":
print_out_str('self.wlan doen\'t exist, skip')
else:
print_out_str('self.wlan exist {}'.format(self.ramdump.wlan))
self.wlan_path = self.ramdump.wlan
if self.get_sections_of_wlan() is False:
print_out_str('wlan.ko is not loaded.')
return False
else:
print_out_str('** Find WLAN Module **')
self.load_wlan_ko()
self.get_wmi_command_log_buffer()
self.get_host_wmi_command_tx_cmp_buf()
self.get_host_wmi_event_buf()
self.get_host_wmi_rx_event_buf()
self.get_host_extract_log()
return True
###################################
# Parse internal variables
def get_wmi_command_log_buffer(self):
"""
Parse 'struct wmi_command_debug'
"""
if self.opt_dbg is True:
print_out_str('*** get_wmi_command_log_buffer() ***')
element_size = self.ramdump.sizeof('struct wmi_command_debug')
if (element_size is None):
print_out_str('[Caution] symbols of host driver do not exist')
return False
out_file = self.ramdump.open_file("wmi_command_log_buffer.txt")
wmi_total_size = self.ramdump.sizeof('wmi_command_log_buffer')
num_elements = wmi_total_size / element_size
if self.opt_dbg is True:
print_out_str('** wlan_host_wmi_command_log_buffer **')
print_out_str('*************************************')
print_out_str('wmi_total_size({})'.format(hex(wmi_total_size)))
print_out_str('element_size({})'.format(hex(element_size)))
print_out_str('num_elements({})'.format(hex(num_elements)))
print_out_str('*************************************')
# info of the data structure
command_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'command')
data0_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'data[0]')
data1_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'data[1]')
data2_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'data[2]')
data3_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'data[3]')
time_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'time')
if self.opt_dbg is True:
print_out_str('command_offset({})'.format(command_offset))
print_out_str('data0_offset({})'.format(data0_offset))
print_out_str('data1_offset({})'.format(data1_offset))
print_out_str('data2_offset({})'.format(data2_offset))
print_out_str('data3_offset({})'.format(data3_offset))
print_out_str('time_offset({})'.format(time_offset))
print_out_str('*************************************')
buffer_start_address = self.ramdump.address_of(
'wmi_command_log_buffer')
wmi_command_buf_idx = self.ramdump.read_u32(
self.ramdump.address_of(
'g_wmi_command_buf_idx'))
cnt = 0
idx = wmi_command_buf_idx
while cnt < num_elements:
if idx == num_elements:
idx = 0
buffer_address = buffer_start_address + idx * element_size
command = self.ramdump.read_u32(buffer_address)
data0 = self.ramdump.read_u32(buffer_address + data0_offset)
data1 = self.ramdump.read_u32(buffer_address + data1_offset)
data2 = self.ramdump.read_u32(buffer_address + data2_offset)
data3 = self.ramdump.read_u32(buffer_address + data3_offset)
time = self.ramdump.read_u64(buffer_address + time_offset)
idx = idx + 1
cnt = cnt + 1
out_buf = '{0} us'.format(float(time/100000.0))
out_buf = out_buf + ' : command({})'.format(hex(command))
out_buf = out_buf + ', data[{}'.format(hex(data0))
out_buf = out_buf + ', {}'.format(hex(data1))
out_buf = out_buf + ', {}'.format(hex(data2))
out_buf = out_buf + ', {}]'.format(hex(data3))
if self.opt_dbg is True:
print_out_str(out_buf)
out_file.write(out_buf + '\n')
out_file.close()
return True
def get_host_wmi_command_tx_cmp_buf(self):
"""
Parse 'struct wmi_command_debug wmi_command_tx_cmp_log_buffer'
"""
if self.opt_dbg is True:
print_out_str('*** get_host_wmi_command_tx_cmp_buf() ***')
element_size = self.ramdump.sizeof('struct wmi_command_debug')
if (element_size is None):
print_out_str('[Caution] symbols of host driver do not exist')
return False
out_file = self.ramdump.open_file("wmi_command_tx_cmp_buf.txt")
wmi_total_size = self.ramdump.sizeof('wmi_command_tx_cmp_log_buffer')
num_elements = wmi_total_size / element_size
if self.opt_dbg is True:
print_out_str('** wlan_host_wmi_command_tx_cmp_buf **')
print_out_str('*************************************')
print_out_str('wmi_total_size({})'.format(hex(wmi_total_size)))
print_out_str('element_size({})'.format(hex(element_size)))
print_out_str('num_elements({})'.format(hex(num_elements)))
print_out_str('*************************************')
# info of the data structure
command_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'command')
data0_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'data[0]')
data1_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'data[1]')
data2_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'data[2]')
data3_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'data[3]')
time_offset = self.ramdump.field_offset(
'struct wmi_command_debug', 'time')
if self.opt_dbg is True:
print_out_str("command_offset({})".format(command_offset))
print_out_str("data0_offset({})".format(data0_offset))
print_out_str("data1_offset({})".format(data1_offset))
print_out_str("data2_offset({})".format(data2_offset))
print_out_str("data3_offset({})".format(data3_offset))
print_out_str("time_offset({})".format(time_offset))
print_out_str('*************************************')
log_buffer_address = self.ramdump.address_of(
'wmi_command_tx_cmp_log_buffer')
wmi_command_buf_idx = self.ramdump.read_u32(
self.ramdump.address_of(
'g_wmi_command_tx_cmp_buf_idx'))
cnt = 0
idx = wmi_command_buf_idx
while cnt < num_elements:
if idx == num_elements:
idx = 0
buffer_address = log_buffer_address + idx * element_size
command = self.ramdump.read_u32(buffer_address)
data0 = self.ramdump.read_u32(buffer_address + data0_offset)
data1 = self.ramdump.read_u32(buffer_address + data1_offset)
data2 = self.ramdump.read_u32(buffer_address + data2_offset)
data3 = self.ramdump.read_u32(buffer_address + data3_offset)
time = self.ramdump.read_u64(buffer_address + time_offset)
idx = idx + 1
cnt = cnt + 1
out_buf = '{0} us'.format(float(time/100000.0))
out_buf = out_buf + ' : command({})'.format(hex(command))
out_buf = out_buf + ', data[{}'.format(hex(data0))
out_buf = out_buf + ', {}'.format(hex(data1))
out_buf = out_buf + ', {}'.format(hex(data2))
out_buf = out_buf + ', {}]'.format(hex(data3))
if self.opt_dbg is True:
print_out_str(out_buf)
out_file.write(out_buf + '\n')
out_file.close()
return True
def get_host_wmi_event_buf(self):
"""
Parse 'struct wmi_event_debug wmi_event_log_buffer[]'
"""
if self.opt_dbg is True:
print_out_str('*** get_host_wmi_event_buf() ***')
element_size = self.ramdump.sizeof('struct wmi_event_debug')
if (element_size is None):
print_out_str('[Caution] symbols of host driver do not exist')
return False
out_file = self.ramdump.open_file("wmi_event_log_buffer.txt")
wmi_total_size = self.ramdump.sizeof('wmi_event_log_buffer')
num_elements = wmi_total_size / element_size
if self.opt_dbg is True:
print_out_str('[Debug] wmi_total_size({})'.format(
hex(wmi_total_size)))
print_out_str('[Debug] element_size({})'.format(hex(element_size)))
print_out_str('[Debug] num_elements({})'.format(hex(num_elements)))
# info of the data structure
event_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'event')
data0_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'data[0]')
data1_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'data[1]')
data2_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'data[2]')
data3_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'data[3]')
time_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'time')
if self.opt_dbg is True:
print_out_str("[Debug] event_offset({})".format(event_offset))
print_out_str("[Debug] data0_offset({})".format(data0_offset))
print_out_str("[Debug] data1_offset({})".format(data1_offset))
print_out_str("[Debug] data2_offset({})".format(data2_offset))
print_out_str("[Debug] data3_offset({})".format(data3_offset))
print_out_str("[Debug] time_offset({})".format(time_offset))
wmi_log_address = self.ramdump.address_of('wmi_event_log_buffer')
wmi_event_buf_idx = self.ramdump.read_u32(
self.ramdump.address_of('g_wmi_event_buf_idx'))
cnt = 0
idx = wmi_event_buf_idx
while cnt < num_elements:
if idx == num_elements:
idx = 0
buffer_address = wmi_log_address + idx * element_size
event = self.ramdump.read_u32(buffer_address)
data0 = self.ramdump.read_u32(buffer_address + data0_offset)
data1 = self.ramdump.read_u32(buffer_address + data1_offset)
data2 = self.ramdump.read_u32(buffer_address + data2_offset)
data3 = self.ramdump.read_u32(buffer_address + data3_offset)
time = self.ramdump.read_u64(buffer_address + time_offset)
idx = idx + 1
cnt = cnt + 1
out_buf = '{0} us'.format(float(time/100000.0))
out_buf = out_buf + ' : event({})'.format(hex(event))
out_buf = out_buf + ', data[{}'.format(hex(data0))
out_buf = out_buf + ', {}'.format(hex(data1))
out_buf = out_buf + ', {}'.format(hex(data2))
out_buf = out_buf + ', {}]'.format(hex(data3))
if self.opt_dbg is True:
print_out_str(out_buf)
out_file.write(out_buf + '\n')
out_file.close()
return True
def get_host_wmi_rx_event_buf(self):
"""
Parse 'struct wmi_event_debug wmi_rx_event_log_buffer'
"""
if self.opt_dbg is True:
print_out_str('*** get_host_wmi_rx_event_buf() ***')
wmi_elem_size = self.ramdump.sizeof('struct wmi_event_debug')
if (wmi_elem_size is None):
print_out_str('[Caution] symbols of host driver do not exist')
return False
wmi_total_size = self.ramdump.sizeof('wmi_rx_event_log_buffer')
num_elements = wmi_total_size / wmi_elem_size
out_file = self.ramdump.open_file("wmi_rx_event_log_buffer.txt")
# info of the data structure
event_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'event')
data0_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'data[0]')
data1_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'data[1]')
data2_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'data[2]')
data3_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'data[3]')
time_offset = self.ramdump.field_offset(
'struct wmi_event_debug', 'time')
wmi_event_address = self.ramdump.address_of('wmi_rx_event_log_buffer')
wmi_event_buf_idx = self.ramdump.read_u32(
self.ramdump.address_of(
'g_wmi_rx_event_buf_idx'))
if self.opt_dbg is True:
print_out_str('[Debug] wmi_total_size({})'.format(wmi_total_size))
print_out_str('[Debug] wmi_elem_size({})'.format(wmi_elem_size))
print_out_str('[Debug] num_elements({})'.format(num_elements))
print_out_str('[Debug] event_offset({})'.format(event_offset))
print_out_str('[Debug] data0_offset({})'.format(data0_offset))
print_out_str('[Debug] data1_offset({})'.format(data1_offset))
print_out_str('[Debug] data2_offset({})'.format(data2_offset))
print_out_str('[Debug] data3_offset({})'.format(data3_offset))
print_out_str('[Debug] time_offset({})'.format(time_offset))
cnt = 0
idx = wmi_event_buf_idx
while cnt < num_elements:
if idx == num_elements:
idx = 0
buffer_address = wmi_event_address + idx * wmi_elem_size
event = self.ramdump.read_u32(buffer_address)
data0 = self.ramdump.read_u32(buffer_address + data0_offset)
data1 = self.ramdump.read_u32(buffer_address + data1_offset)
data2 = self.ramdump.read_u32(buffer_address + data2_offset)
data3 = self.ramdump.read_u32(buffer_address + data3_offset)
time = self.ramdump.read_u64(buffer_address + time_offset)
out_buf = '{0} us'.format(float(time/100000.0))
out_buf = out_buf + ' : event({})'.format(hex(event))
out_buf = out_buf + ', data[{}'.format(hex(data0))
out_buf = out_buf + ', {}'.format(hex(data1))
out_buf = out_buf + ', {}'.format(hex(data2))
out_buf = out_buf + ', {}]'.format(hex(data3))
if self.opt_dbg is True:
print_out_str(out_buf)
out_file.write(out_buf + '\n')
idx = idx + 1
cnt = cnt + 1
out_file.close()
return True
def get_host_extract_log(self):
"""
refer functions in wlan_logging_sock_svc.c
"""
if self.opt_dbg is True:
print_out_str('*** wlan_host_extract_log() ***')
out_file = self.ramdump.open_file("gwlan_logging.txt")
# get number of struct wlan_logging
num_buf = self.ramdump.read_s32(
self.ramdump.address_of('gwlan_logging') +
self.ramdump.field_offset(
'struct wlan_logging', 'num_buf'))
if self.opt_dbg is True:
print_out_str('num_buf : {}'.format(num_buf))
# gwlan_logging
element_size = self.ramdump.sizeof('struct log_msg')
if element_size % 32:
elem_aligned_size = element_size + (element_size % 32)
if self.opt_dbg is True:
print_out_str('element_size({})'.format(hex(element_size)))
print_out_str('element_align_size({})'.format(
hex(elem_aligned_size)))
else:
elem_aligned_size = element_size
if self.opt_dbg is True:
print_out_str('element_size({})'.format(hex(element_size)))
print_out_str('element_align_size({})'.format(
hex(elem_aligned_size)))
filled_length_offset = self.ramdump.field_offset(
'struct log_msg',
'filled_length')
logbuf_offset = self.ramdump.field_offset(
'struct log_msg', 'logbuf')
logbuf_size = element_size - logbuf_offset
gplog_msg_address = self.ramdump.read_pointer('gplog_msg')
if self.opt_dbg is True:
print_out_str('filled_length_offset : {}'.format(
hex(filled_length_offset)))
print_out_str('logbuf_size : {}'.format(hex(logbuf_size)))
print_out_str('gplog_msg_address : {}'.format(
hex(gplog_msg_address)))
cnt = 0
while cnt < num_buf:
buffer_address = gplog_msg_address + cnt * elem_aligned_size
filled_length = self.ramdump.read_u32(
buffer_address + filled_length_offset)
v_address = buffer_address + logbuf_offset + 4
p_address = self.ramdump.virt_to_phys(v_address)
if self.opt_dbg is True:
print_out_str('** gplog_msg[{}] : {}, {}, VA{}-PA{} **'.format(
cnt,
hex(buffer_address),
hex(filled_length),
hex(v_address),
hex(p_address)))
out_file.write('** gplog_msg[{}] : {}, {}, VA{}-PA{} **\n'.format(
cnt,
hex(buffer_address),
hex(filled_length),
hex(v_address),
hex(p_address)))
if filled_length != 0:
left_bytes = filled_length
logbuf_str = ""
while left_bytes > 0:
p_address = self.ramdump.virt_to_phys(v_address)
logbuf_out = self.ramdump.read_physical(p_address, 4)
logbuf_str = logbuf_str + logbuf_out
v_address = v_address + 4
left_bytes = left_bytes - 4
if self.opt_dbg is True:
print_out_str(logbuf_str)
out_file.write(logbuf_str)
out_file.write('\n')
# We may be able to delete first []
# like [VosMCThread] or [kworker/0:0]
cnt = cnt + 1
out_file.close()
return True
def parse(self):
self.run()
return True

View File

@@ -0,0 +1,55 @@
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import print_out
from parser_util import RamParser, cleanupString, register_parser
import module_table
@register_parser('--modules_table', 'Dump modules_table')
class Modules_table(RamParser):
def retrieve_modules_cn(self):
mod_list = self.ramdump.address_of('modules')
next_offset = self.ramdump.field_offset('struct list_head', 'next')
list_offset = self.ramdump.field_offset('struct module', 'list')
name_offset = self.ramdump.field_offset('struct module', 'name')
scmversion_offset = self.ramdump.field_offset('struct module', 'scmversion')
if self.ramdump.kernel_version > (4, 9, 0):
module_core_offset = self.ramdump.field_offset('struct module', 'core_layout.base')
else:
module_core_offset = self.ramdump.field_offset('struct module', 'module_core')
kallsyms_offset = self.ramdump.field_offset('struct module', 'kallsyms')
next_list_ent = self.ramdump.read_pointer(mod_list + next_offset)
while next_list_ent and next_list_ent != mod_list:
mod_tbl_ent = module_table.module_table_entry()
module = next_list_ent - list_offset
name_ptr = module + name_offset
mod_tbl_ent.name = self.ramdump.read_cstring(name_ptr)
svmversion_addr = self.ramdump.read_pointer(scmversion_offset+module)
svmversion = self.ramdump.read_cstring(svmversion_addr)
mod_tbl_ent.module_offset = self.ramdump.read_pointer(module + module_core_offset)
if mod_tbl_ent.module_offset is None:
mod_tbl_ent.module_offset = 0
mod_tbl_ent.kallsyms_addr = self.ramdump.read_pointer(module + kallsyms_offset)
self.module_table_cn.add_entry(mod_tbl_ent)
self.modules_list.append((mod_tbl_ent.module_offset, mod_tbl_ent.name, module, svmversion))
next_list_ent = self.ramdump.read_pointer(next_list_ent + next_offset)
self.modules_list.sort()
for item in self.modules_list:
print("%-32s 0x%-32x v.v (struct module)0x%-32x %s" % (item[1], item[0], item[2], item[3]), file=self.f)
def parse(self):
self.module_table_cn = module_table.module_table_class()
self.f = open(self.ramdump.outdir + "/modules_table.txt", "w")
self.modules_list =[]
self.retrieve_modules_cn()
self.f.close();

View File

@@ -0,0 +1,429 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
from parser_util import register_parser, RamParser, cleanupString
from print_out import print_out_str
from utasklib import UTaskLib
from utasklib import ProcessNotFoundExcetion
import linux_list as llist
from collections import namedtuple
class BaseFs(RamParser):
def __init__(self, ramdump):
super().__init__(ramdump)
self.mnt_tuple = namedtuple("MntTuple", ["mount", "vfsmount", "super_block", "dname", "mpath"])
def get_dname_of_dentry(self, dentry):
dentry_name_offset = self.ramdump.field_offset(
'struct dentry', 'd_name')
len_offset = self.ramdump.field_offset(
'struct qstr', 'len')
qst_name_offset = self.ramdump.field_offset(
'struct qstr', 'name')
name_address = self.ramdump.read_word(dentry + dentry_name_offset + qst_name_offset)
len_address = dentry + dentry_name_offset + len_offset
len = self.ramdump.read_u32(len_address)
name = cleanupString(self.ramdump.read_cstring(name_address, len))
return name
def get_pathname(self, vfsmount):
mnt_root = self.ramdump.read_structure_field(vfsmount, 'struct vfsmount', 'mnt_root')
mnt_offset_in_mount = self.ramdump.field_offset('struct mount', 'mnt')
mnt_parent_offset = self.ramdump.field_offset('struct mount', 'mnt_parent')
mount = vfsmount - mnt_offset_in_mount
mnt_mountpoint_offset = self.ramdump.field_offset(
'struct mount', 'mnt_mountpoint')
d_parent_offset = self.ramdump.field_offset(
'struct dentry', 'd_parent')
mnt_parent_pre = 0
mnt_parent = mount
mount_name = []
while mnt_parent_pre != mnt_parent:
mnt_parent_pre = mnt_parent
mnt_mountpoint = self.ramdump.read_word(mnt_parent + mnt_mountpoint_offset)
name = self.get_dname_of_dentry(mnt_mountpoint)
mnt_parent = self.ramdump.read_word(mnt_parent + mnt_parent_offset)
if name == None or name == '/':
break
if mnt_parent == 0:
break
mount_name.append(name)
# walk to get the fullname of mountpoint
d_parent = self.ramdump.read_word(mnt_mountpoint + d_parent_offset)
d_parent_pre = 0
while d_parent_pre != d_parent:
d_parent_pre = d_parent
name = self.get_dname_of_dentry(d_parent)
d_parent = self.ramdump.read_word(d_parent + d_parent_offset)
if name == None or name == '/':
break
mount_name.append(name)
if d_parent == 0:
break
full_name = ''
names = []
for item in mount_name:
names.append(item)
names.reverse()
for item in names:
full_name += '/' + item
if len(names) == 0:
return '/'
return full_name
def parse(self):
pid = 1
try:
args = self.parse_param()
try:
pid = int(args["pid"])
except:
pid = args["proc"]
except:
pid = 1
finally:
print_out_str("Dump info from process {}".format(pid))
try:
taskinfo = UTaskLib(self.ramdump).get_utask_info(pid)
except ProcessNotFoundExcetion:
print_out_str("pid={} process is not started".format(pid))
return
nsproxy = self.ramdump.read_structure_field(taskinfo.task_addr, 'struct task_struct', 'nsproxy')
fs = self.ramdump.read_structure_field(taskinfo.task_addr, 'struct task_struct', 'fs')
root = fs + self.ramdump.field_offset('struct fs_struct', 'root')
mnt_ns = self.ramdump.read_structure_field(nsproxy, 'struct nsproxy', 'mnt_ns')
mount_list_addr = mnt_ns + self.ramdump.field_offset("struct mnt_namespace", 'list')
field_next_offset = self.ramdump.field_offset('struct mount', 'mnt_list')
self.output.write(f"Process: {taskinfo.name}, (struct task_struct*)=0x{taskinfo.task_addr:x} \
(struct nsproxy*)=0x{nsproxy:x} (struct mnt_namespace*)=0x{mnt_ns:x}\n\n\n")
self.print_header()
list_walker = llist.ListWalker(self.ramdump, mount_list_addr, field_next_offset)
list_walker.walk(mount_list_addr, self.__show_info, mount_list_addr)
def print_header(self):
pass
def __show_info(self, mount, mount_list_addr):
field_next_offset = self.ramdump.field_offset('struct mount', 'mnt_list')
if mount_list_addr == mount + field_next_offset:
return
vfsmount = mount + self.ramdump.field_offset('struct mount', 'mnt')
d_name_addr = self.ramdump.read_word(mount + self.ramdump.field_offset('struct mount', 'mnt_devname'))
d_name = self.ramdump.read_cstring(d_name_addr, 48)
if d_name == "rootfs":
return
mount_path = self.get_pathname(vfsmount)
mnt_root = self.ramdump.read_structure_field(vfsmount, 'struct vfsmount', 'mnt_root')
sb = self.ramdump.read_structure_field(mnt_root, 'struct dentry', 'd_sb')
mtuple = self.mnt_tuple(mount, vfsmount, sb, d_name, mount_path)
self.show_info(mtuple)
@register_parser('--mount', 'Extract mount info logs from ramdump', optional=True)
class Mount(BaseFs):
def __init__(self, ramdump):
super().__init__(ramdump)
self.output = self.ramdump.open_file("mounts.txt")
def is_anon_ns(self, mnt_namespace):
return self.ramdump.read_u64(mnt_namespace, "seq") == 0
def show_type(self, super_block):
s_type = self.ramdump.read_structure_field(super_block, "struct super_block", 's_type')
name = self.ramdump.read_structure_field(s_type, "struct file_system_type", 'name')
type_name = self.ramdump.read_cstring(name, 24)
s_subtype = self.ramdump.read_structure_field(super_block, "struct super_block", 's_subtype')
if s_subtype:
subname = self.ramdump.read_cstring(s_subtype, 24)
type_name = type_name + "." + subname
return type_name
def mnt_is_readonly(self, vfsmount):
MNT_READONLY = 0x40
SB_RDONLY = 1
self.mnt_flags = self.ramdump.read_int(vfsmount + self.ramdump.field_offset("struct vfsmount", 'mnt_flags'))
mnt_sb = self.ramdump.read_structure_field(vfsmount, "struct vfsmount", 'mnt_sb')
self.s_flags = self.ramdump.read_word(mnt_sb + self.ramdump.field_offset('struct super_block', 's_flags'))
return (self.mnt_flags & MNT_READONLY) or (self.s_flags &SB_RDONLY)
def show_sb_opts(self, super_block):
SB_SYNCHRONOUS = 1 << 4
SB_DIRSYNC = 1 << 7
SB_MANDLOCK = 1 << 6
SB_LAZYTIME = 1 << 25
fs_opts= {
SB_SYNCHRONOUS : ",sync",
SB_DIRSYNC : ",dirsync",
SB_MANDLOCK : ",mand",
SB_LAZYTIME :",lazytime",
}
ret = ""
for flag, flag_str in fs_opts.items():
if self.s_flags & flag:
ret = ret + flag_str
self.output.write(ret)
self.selinux_sb_show_options(super_block)
def selinux_superblock(self, super_block):
s_security = self.ramdump.read_structure_field(super_block, "struct super_block", 's_security')
selinux_blob_sizes = self.ramdump.address_of('selinux_blob_sizes')
try:
##lbs_superblock not exist on kernel 5.1
lbs_superblock = self.ramdump.read_int(
selinux_blob_sizes + self.ramdump.field_offset("struct lsm_blob_sizes", 'lbs_superblock'))
except:
lbs_superblock = 0
return s_security + lbs_superblock
def selinux_initialized(self):
selinux_state = self.ramdump.address_of('selinux_state')
initialized = self.ramdump.read_bool(
selinux_state + self.ramdump.field_offset("struct selinux_state", 'initialized'))
return initialized
def selinux_sb_show_options(self, super_block):
SE_SBINITIALIZED = 0x0100
sbsec = self.selinux_superblock(super_block)
s_flags = self.ramdump.read_u16(sbsec + self.ramdump.field_offset("struct superblock_security_struct", 'flags'))
if (s_flags & SE_SBINITIALIZED) == 0:
return
if not self.selinux_initialized():
return
CONTEXT_MNT = 0x01
FSCONTEXT_MNT = 0x02
ROOTCONTEXT_MNT = 0x04
DEFCONTEXT_MNT = 0x08
SBLABEL_MNT = 0x10
CONTEXT_STR = "context"
FSCONTEXT_STR = "fscontext"
ROOTCONTEXT_STR = "rootcontext"
DEFCONTEXT_STR = "defcontext"
SECLABEL_STR = "seclabel"
if s_flags & FSCONTEXT_MNT:
self.output.write("," + FSCONTEXT_STR)
return
if s_flags & CONTEXT_MNT:
self.output.write("," + CONTEXT_STR)
return
if s_flags & DEFCONTEXT_MNT:
self.output.write("," + DEFCONTEXT_STR)
return
if s_flags & ROOTCONTEXT_MNT:
self.output.write("," + ROOTCONTEXT_STR)
return
if s_flags & SBLABEL_MNT:
self.output.write("," + SECLABEL_STR)
return
def show_mnt_opts(self, vfsmount):
MNT_NOSUID = 0x01
MNT_NODEV = 0x02
MNT_NOEXEC = 0x04
MNT_NOATIME = 0x08
MNT_NODIRATIME = 0x10
MNT_RELATIME = 0x20
MNT_READONLY = 0x40
MNT_NOSYMFOLLOW = 0x80
mnt_opts = {
MNT_NOSUID : ",nosuid",
MNT_NODEV : ",nodev",
MNT_NOEXEC : ",noexec",
MNT_NOATIME : ",noatime",
MNT_NODIRATIME : ",nodiratime",
MNT_RELATIME : ",relatime",
MNT_NOSYMFOLLOW : ",nosymfollow",
}
ret = ""
for flag, flag_str in mnt_opts.items():
if self.mnt_flags & flag:
ret = ret + flag_str
if self.is_idmapped_mnt(vfsmount):
ret = ret + ",idmapped"
self.output.write(ret)
def is_idmapped_mnt(self, vfsmount):
mnt_idmap = self.ramdump.read_structure_field(vfsmount, "struct vfsmount", 'mnt_idmap')
nop_mnt_idmap = self.ramdump.address_of('nop_mnt_idmap')
return mnt_idmap != nop_mnt_idmap
def print_header(self):
self.output.write("{:<16s} {:<16s} {:<29s} {:<32s} {:<16s} {:<16s}\n".format(
"(struct mount *)", "(struct super_block *)", "dev_name", "mount_path", "fs_type", "flags"))
def show_info(self, mtuple):
mount = mtuple.mount
vfsmount = mtuple.vfsmount
sb = mtuple.super_block
dname = mtuple.dname
mount_path = mtuple.mpath
typename = self.show_type(sb)
self.output.write("0x{:16x} 0x{:16x} {:<32s} {:<32s} {:<16s} ".format(
mount, sb, dname, mount_path, typename))
if self.mnt_is_readonly(vfsmount):
self.output.write("ro")
else:
self.output.write("rw")
self.show_sb_opts(sb)
self.show_mnt_opts(vfsmount)
self.output.write("\n")
@register_parser('--df', 'Extract df info from ramdump', optional=True)
class Df(BaseFs):
def __init__(self, ramdump):
super().__init__(ramdump)
self.output = self.ramdump.open_file("df.txt")
self.dev_ids = []
def devid(self, s_dev):
MINORBITS = 20
MINORMASK = ((1 << MINORBITS) - 1)
major = s_dev >> MINORBITS
minor = s_dev & MINORMASK
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12)
def ext4_statfs(self, super_block, s_fs_info):
sbi = self.ramdump.read_datatype(s_fs_info, 'struct ext4_sb_info',
["s_es", "s_overhead", "s_resv_clusters",
"s_cluster_bits", "s_mount_opt",
"s_freeclusters_counter", "s_dirtyclusters_counter"])
_es = sbi.s_es
es = self.ramdump.read_datatype(_es, 'struct ext4_super_block',
["s_r_blocks_count_hi", "s_r_blocks_count_lo",
"s_blocks_count_hi", "s_blocks_count_lo"])
s_resv_clusters = sbi.s_resv_clusters.counter
s_cluster_bits = sbi.s_cluster_bits
resv_blocks = s_resv_clusters << s_cluster_bits
s_mount_opt = sbi.s_mount_opt
EXT4_MOUNT_MINIX_DF = 0x00080
overhead = 0
if s_mount_opt & EXT4_MOUNT_MINIX_DF == 0:
overhead = sbi.s_overhead
f_bsize = self.ramdump.read_word(super_block + self.ramdump.field_offset("struct super_block", "s_blocksize"))
s_blocks_count = es.s_blocks_count_hi << 32 | es.s_blocks_count_lo
f_blocks = s_blocks_count - overhead << s_cluster_bits
bfree = self.percpu_counter(sbi.s_freeclusters_counter.count, sbi.s_freeclusters_counter.counters) + \
self.percpu_counter(sbi.s_dirtyclusters_counter.count, sbi.s_dirtyclusters_counter.counters)
bfree = bfree if bfree > 0 else 0
f_bfree = bfree << s_cluster_bits
self.writeback(f_bsize, f_blocks, f_bfree)
def writeback(self, f_bsize, f_blocks, f_bfree):
if f_bsize * f_blocks <= 0:
self.output.write("{:<6s} {:<6s} {:<6s} {:<6.0%}".format("_","_","_",0))
return
total = f_bsize * f_blocks
used = (f_blocks - f_bfree) * f_bsize
free = f_bfree * f_bsize
used_per = used/total
self.output.write("{:<6s} {:<6s} {:<6s} {:<6.0%}".format(self.human_str(total), self.human_str(used), self.human_str(free), used_per))
def f2fs_statfs(self, super_block, s_fs_info):
sbi = self.ramdump.read_datatype(s_fs_info, 'struct f2fs_sb_info',
["raw_super", "blocksize", "user_block_count",
"total_valid_block_count", "current_reserved_blocks"])
raw_super = self.ramdump.read_datatype(sbi.raw_super, 'struct f2fs_super_block',
["block_count", "segment0_blkaddr"])
total_count = raw_super.block_count
start_count = raw_super.segment0_blkaddr
f_bsize = sbi.blocksize
f_blocks = total_count - start_count
f_bfree = sbi.user_block_count - sbi.total_valid_block_count - sbi.current_reserved_blocks
self.writeback(f_bsize, f_blocks, f_bfree)
def shmem_statfs(self, super_block, s_fs_info):
sbinfo = self.ramdump.read_datatype(s_fs_info, 'struct shmem_sb_info', ["max_blocks", "used_blocks"])
f_bsize = self.ramdump.get_page_size()
f_blocks = 0
f_bfree = 0
if sbinfo.max_blocks > 0:
f_blocks = sbinfo.max_blocks
used_blocks = self.percpu_counter(sbinfo.used_blocks.count, sbinfo.used_blocks.counters)
f_bfree = sbinfo.max_blocks - used_blocks
self.writeback(f_bsize, f_blocks, f_bfree)
def fat_statfs(self, super_block, s_fs_info):
sbi = self.ramdump.read_datatype(s_fs_info, 'struct msdos_sb_info',
["cluster_size", "max_cluster", "free_clusters"])
FAT_START_ENT = 2
f_bsize = sbi.cluster_size
f_blocks = sbi.max_cluster - FAT_START_ENT
f_bfree = sbi.free_clusters
self.writeback(f_bsize, f_blocks, f_bfree)
def fuse_statfs(self, super_block, s_fs_info):
return 0
def efivarfs_statfs(self, super_block, s_fs_info):
self.writeback(0, 0, 0)
def human_str(self, size):
if size < 1024:
return " %.0f " % (size)
if size < 1024 * 1024:
return " %.0fK " % (size/1024)
elif size < 1024 * 1024 * 1024:
return " %.0fM " % (size/(1024 * 1024))
else:
return " %.1fG " % (size/(1024 * 1024 * 1024))
def percpu_counter(self, count, counters):
for core in self.ramdump.iter_cpus():
try:
count += self.ramdump.read_int(counters + self.ramdump.per_cpu_offset(core))
except:
continue
return count
def print_header(self):
self.output.write("{:<16s} {:<16s} {:<28s} {:<6s} {:6s} {:<6s} {:<6s} {:<16s}\n".format(
"(struct mount *)", "(struct super_block *)", "dev_name",
"Size", "Used", "Avail", "Use%", "Mounted On"))
def show_info(self, mtuple):
mount = mtuple.mount
vfsmount = mtuple.vfsmount
sb = mtuple.super_block
dname = mtuple.dname
mount_path = mtuple.mpath
sbi = self.ramdump.read_datatype(sb, 'struct super_block', ['s_fs_info', "s_dev"])
if sbi.s_dev in self.dev_ids:
return
self.dev_ids.append(sbi.s_dev)
s_op = self.ramdump.read_structure_field(sb, 'struct super_block', 's_op')
statfs = self.ramdump.read_structure_field(s_op, 'struct super_operations', 'statfs')
look = self.ramdump.unwind_lookup(statfs)
if look:
fop, _ = look
if hasattr(self, fop):
self.output.write("0x{:16x} 0x{:16x} {:<32s}".format(mount, sb, dname))
eval("self." + fop)(sb, sbi.s_fs_info)
self.output.write("{:<32s} {:<16s}".format(mount_path, fop))
self.output.write("\n")

View File

@@ -0,0 +1,22 @@
# Copyright (c) 2013, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from parser_util import register_parser, RamParser
from print_out import print_out_str
@register_parser('--dump-page-tables', 'Dumps page tables')
class PageTableDump(RamParser):
def parse(self):
with self.ramdump.open_file('page_tables.txt') as f:
self.ramdump.mmu.dump_page_tables(f)
print_out_str('Page tables dumped to page_tables.txt')

View File

@@ -0,0 +1,108 @@
# Copyright (c) 2012,2014-2017 The Linux Foundation. All rights reserved.
# Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import struct
from print_out import print_out_str
from parser_util import register_parser, RamParser
from mm import pfn_to_page, page_buddy , page_address ,get_debug_flags
@register_parser('--print-pagealloccorruption', 'print pagealloc corruption information (if available)')
class PageallocCorruption(RamParser):
def parse(self):
if not self.ramdump.is_config_defined('CONFIG_DEBUG_PAGEALLOC'):
print_out_str('CONFIG_DEBUG_PAGEALLOC Not enabled')
return
out_corruption_summary = self.ramdump.open_file('page_corruption_summary.txt')
out_pfn_ranges = self.ramdump.open_file('page_ranges.txt')
memblock_addr = self.ramdump.address_of('memblock')
memblock_memory_offset = self.ramdump.field_offset('struct memblock', 'memory')
memblock_memory_cnt_offset = self.ramdump.field_offset('struct memblock_type', 'cnt')
cnt = self.ramdump.read_word(memblock_addr + memblock_memory_offset + memblock_memory_cnt_offset)
region_offset = self.ramdump.field_offset('struct memblock_type', 'regions')
regions_baseaddr = self.ramdump.read_word(memblock_addr + memblock_memory_offset + region_offset)
page_ext_offset = self.ramdump.field_offset(
'struct mem_section', 'page_ext')
page_flags_offset = self.ramdump.field_offset(
'struct page_ext', 'flags')
mem_section_size = self.ramdump.sizeof("struct mem_section")
mem_section = self.ramdump.read_word('mem_section')
page_ext_size = self.ramdump.sizeof("struct page_ext")
for r in range(0,cnt) :
region_addr = regions_baseaddr + r * self.ramdump.sizeof('struct memblock_region')
start_addr_offset = self.ramdump.field_offset('struct memblock_region', 'base')
start_addr = self.ramdump.read_u32(region_addr + start_addr_offset)
size_offset = self.ramdump.field_offset('struct memblock_region', 'size')
region_size = self.ramdump.read_u32(region_addr + size_offset)
end_addr = start_addr + region_size
min_pfn = start_addr >> self.ramdump.page_shift
max_pfn = end_addr >> self.ramdump.page_shift
out_pfn_ranges.write("min_pfn : %s,max_pfn: %s\n" %(hex(min_pfn),hex(max_pfn)))
for pfn in range(min_pfn, max_pfn):
page = pfn_to_page(self.ramdump, pfn)
page_pa = (pfn << self.ramdump.page_shift)
if (self.ramdump.kernel_version > (3, 18, 0)):
free = 0
offset = page_pa >> 30
mem_section_0_offset = (
mem_section + (offset * mem_section_size))
page_ext = self.ramdump.read_word(
mem_section_0_offset + page_ext_offset)
temp_page_ext = page_ext + (pfn * page_ext_size)
page_ext_flags = self.ramdump.read_word(
temp_page_ext + page_flags_offset)
# enum PAGE_EXT_DEBUG_POISON ( == 0th bit is set ) for page poisioning
free = page_ext_flags & 1
else:
# debug_flags value should be 1 for pages having poisoned value 0xaa
free = get_debug_flags(self.ramdump, page)
if free == 1:
flag = 0;
for i in range(0,1024):
readval = self.ramdump.read_u32(page_pa+i*4, False)
if readval == None:
break
if readval!=0xaaaaaaaa:
flag = 1
diff = 0xaaaaaaaa-readval
if diff < 0:
diff = diff * (-1)
isBitFlip = not (diff & diff-1)
if isBitFlip:
out_corruption_summary.write("Single Bit Error at %s" %("%#0.8x"%(page_pa+i*4)))
out_corruption_summary.write("\n")
else:
out_corruption_summary.write("Corruption at %s" %("%#0.8x"%(page_pa+i*4)))
out_corruption_summary.write("\n")
end_addr = page_pa + i*4 + 0x00000100
end_page_addr = page_pa | 0x00000fff
if end_addr > end_page_addr:
end_addr = end_page_addr
count = 0
for wordaddr in range(page_pa + i*4,end_addr,0x00000004):
if count == 0:
out_corruption_summary.write("%s " %("%#0.8x"%(wordaddr)))
readval = self.ramdump.read_u32(wordaddr, False)
out_corruption_summary.write("%s " %("%#0.8x"%(readval)))
count = count+1
if count == 8:
count = 0
out_corruption_summary.write ("\n");
break
if flag == 1 :
out_corruption_summary.write("\n")
out_corruption_summary.close()
out_pfn_ranges.close()

Some files were not shown because too many files have changed in this diff Show More