Initial commit
This commit is contained in:
@@ -0,0 +1,13 @@
|
||||
# Copyright (c) 2016, Intel Corporation.
|
||||
#
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
"""Build performance tests"""
|
||||
from .base import (BuildPerfTestCase,
|
||||
BuildPerfTestLoader,
|
||||
BuildPerfTestResult,
|
||||
BuildPerfTestRunner,
|
||||
KernelDropCaches,
|
||||
runCmd2)
|
||||
from .test_basic import *
|
||||
@@ -0,0 +1,504 @@
|
||||
# Copyright (c) 2016, Intel Corporation.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
"""Build performance test base classes and functionality"""
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import resource
|
||||
import socket
|
||||
import shutil
|
||||
import time
|
||||
import unittest
|
||||
import xml.etree.ElementTree as ET
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime, timedelta
|
||||
from functools import partial
|
||||
from multiprocessing import Process
|
||||
from multiprocessing import SimpleQueue
|
||||
from xml.dom import minidom
|
||||
|
||||
import oe.path
|
||||
from oeqa.utils.commands import CommandError, runCmd, get_bb_vars
|
||||
from oeqa.utils.git import GitError, GitRepo
|
||||
|
||||
# Get logger for this module
|
||||
log = logging.getLogger('build-perf')
|
||||
|
||||
# Our own version of runCmd which does not raise AssertErrors which would cause
|
||||
# errors to interpreted as failures
|
||||
runCmd2 = partial(runCmd, assert_error=False, limit_exc_output=40)
|
||||
|
||||
|
||||
class KernelDropCaches(object):
|
||||
"""Container of the functions for dropping kernel caches"""
|
||||
sudo_passwd = None
|
||||
|
||||
@classmethod
|
||||
def check(cls):
|
||||
"""Check permssions for dropping kernel caches"""
|
||||
from getpass import getpass
|
||||
from locale import getdefaultlocale
|
||||
cmd = ['sudo', '-k', '-n', 'tee', '/proc/sys/vm/drop_caches']
|
||||
ret = runCmd2(cmd, ignore_status=True, data=b'0')
|
||||
if ret.output.startswith('sudo:'):
|
||||
pass_str = getpass(
|
||||
"\nThe script requires sudo access to drop caches between "
|
||||
"builds (echo 3 > /proc/sys/vm/drop_caches).\n"
|
||||
"Please enter your sudo password: ")
|
||||
cls.sudo_passwd = bytes(pass_str, getdefaultlocale()[1])
|
||||
|
||||
@classmethod
|
||||
def drop(cls):
|
||||
"""Drop kernel caches"""
|
||||
cmd = ['sudo', '-k']
|
||||
if cls.sudo_passwd:
|
||||
cmd.append('-S')
|
||||
input_data = cls.sudo_passwd + b'\n'
|
||||
else:
|
||||
cmd.append('-n')
|
||||
input_data = b''
|
||||
cmd += ['tee', '/proc/sys/vm/drop_caches']
|
||||
input_data += b'3'
|
||||
runCmd2(cmd, data=input_data)
|
||||
|
||||
|
||||
def str_to_fn(string):
|
||||
"""Convert string to a sanitized filename"""
|
||||
return re.sub(r'(\W+)', '-', string, flags=re.LOCALE)
|
||||
|
||||
|
||||
class ResultsJsonEncoder(json.JSONEncoder):
|
||||
"""Extended encoder for build perf test results"""
|
||||
unix_epoch = datetime.utcfromtimestamp(0)
|
||||
|
||||
def default(self, obj):
|
||||
"""Encoder for our types"""
|
||||
if isinstance(obj, datetime):
|
||||
# NOTE: we assume that all timestamps are in UTC time
|
||||
return (obj - self.unix_epoch).total_seconds()
|
||||
if isinstance(obj, timedelta):
|
||||
return obj.total_seconds()
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
class BuildPerfTestResult(unittest.TextTestResult):
|
||||
"""Runner class for executing the individual tests"""
|
||||
# List of test cases to run
|
||||
test_run_queue = []
|
||||
|
||||
def __init__(self, out_dir, *args, **kwargs):
|
||||
super(BuildPerfTestResult, self).__init__(*args, **kwargs)
|
||||
|
||||
self.out_dir = out_dir
|
||||
self.hostname = socket.gethostname()
|
||||
self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core')
|
||||
self.start_time = self.elapsed_time = None
|
||||
self.successes = []
|
||||
|
||||
def addSuccess(self, test):
|
||||
"""Record results from successful tests"""
|
||||
super(BuildPerfTestResult, self).addSuccess(test)
|
||||
self.successes.append(test)
|
||||
|
||||
def addError(self, test, err):
|
||||
"""Record results from crashed test"""
|
||||
test.err = err
|
||||
super(BuildPerfTestResult, self).addError(test, err)
|
||||
|
||||
def addFailure(self, test, err):
|
||||
"""Record results from failed test"""
|
||||
test.err = err
|
||||
super(BuildPerfTestResult, self).addFailure(test, err)
|
||||
|
||||
def addExpectedFailure(self, test, err):
|
||||
"""Record results from expectedly failed test"""
|
||||
test.err = err
|
||||
super(BuildPerfTestResult, self).addExpectedFailure(test, err)
|
||||
|
||||
def startTest(self, test):
|
||||
"""Pre-test hook"""
|
||||
test.base_dir = self.out_dir
|
||||
log.info("Executing test %s: %s", test.name, test.shortDescription())
|
||||
self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] "))
|
||||
super(BuildPerfTestResult, self).startTest(test)
|
||||
|
||||
def startTestRun(self):
|
||||
"""Pre-run hook"""
|
||||
self.start_time = datetime.utcnow()
|
||||
|
||||
def stopTestRun(self):
|
||||
"""Pre-run hook"""
|
||||
self.elapsed_time = datetime.utcnow() - self.start_time
|
||||
|
||||
def all_results(self):
|
||||
compound = [('SUCCESS', t, None) for t in self.successes] + \
|
||||
[('FAILURE', t, m) for t, m in self.failures] + \
|
||||
[('ERROR', t, m) for t, m in self.errors] + \
|
||||
[('EXPECTED_FAILURE', t, m) for t, m in self.expectedFailures] + \
|
||||
[('UNEXPECTED_SUCCESS', t, None) for t in self.unexpectedSuccesses] + \
|
||||
[('SKIPPED', t, m) for t, m in self.skipped]
|
||||
return sorted(compound, key=lambda info: info[1].start_time)
|
||||
|
||||
|
||||
def write_buildstats_json(self):
|
||||
"""Write buildstats file"""
|
||||
buildstats = OrderedDict()
|
||||
for _, test, _ in self.all_results():
|
||||
for key, val in test.buildstats.items():
|
||||
buildstats[test.name + '.' + key] = val
|
||||
with open(os.path.join(self.out_dir, 'buildstats.json'), 'w') as fobj:
|
||||
json.dump(buildstats, fobj, cls=ResultsJsonEncoder)
|
||||
|
||||
|
||||
def write_results_json(self):
|
||||
"""Write test results into a json-formatted file"""
|
||||
results = OrderedDict([('tester_host', self.hostname),
|
||||
('start_time', self.start_time),
|
||||
('elapsed_time', self.elapsed_time),
|
||||
('tests', OrderedDict())])
|
||||
|
||||
for status, test, reason in self.all_results():
|
||||
test_result = OrderedDict([('name', test.name),
|
||||
('description', test.shortDescription()),
|
||||
('status', status),
|
||||
('start_time', test.start_time),
|
||||
('elapsed_time', test.elapsed_time),
|
||||
('measurements', test.measurements)])
|
||||
if status in ('ERROR', 'FAILURE', 'EXPECTED_FAILURE'):
|
||||
test_result['message'] = str(test.err[1])
|
||||
test_result['err_type'] = test.err[0].__name__
|
||||
test_result['err_output'] = reason
|
||||
elif reason:
|
||||
test_result['message'] = reason
|
||||
|
||||
results['tests'][test.name] = test_result
|
||||
|
||||
with open(os.path.join(self.out_dir, 'results.json'), 'w') as fobj:
|
||||
json.dump(results, fobj, indent=4,
|
||||
cls=ResultsJsonEncoder)
|
||||
|
||||
def write_results_xml(self):
|
||||
"""Write test results into a JUnit XML file"""
|
||||
top = ET.Element('testsuites')
|
||||
suite = ET.SubElement(top, 'testsuite')
|
||||
suite.set('name', 'oeqa.buildperf')
|
||||
suite.set('timestamp', self.start_time.isoformat())
|
||||
suite.set('time', str(self.elapsed_time.total_seconds()))
|
||||
suite.set('hostname', self.hostname)
|
||||
suite.set('failures', str(len(self.failures) + len(self.expectedFailures)))
|
||||
suite.set('errors', str(len(self.errors)))
|
||||
suite.set('skipped', str(len(self.skipped)))
|
||||
|
||||
test_cnt = 0
|
||||
for status, test, reason in self.all_results():
|
||||
test_cnt += 1
|
||||
testcase = ET.SubElement(suite, 'testcase')
|
||||
testcase.set('classname', test.__module__ + '.' + test.__class__.__name__)
|
||||
testcase.set('name', test.name)
|
||||
testcase.set('description', test.shortDescription())
|
||||
testcase.set('timestamp', test.start_time.isoformat())
|
||||
testcase.set('time', str(test.elapsed_time.total_seconds()))
|
||||
if status in ('ERROR', 'FAILURE', 'EXP_FAILURE'):
|
||||
if status in ('FAILURE', 'EXP_FAILURE'):
|
||||
result = ET.SubElement(testcase, 'failure')
|
||||
else:
|
||||
result = ET.SubElement(testcase, 'error')
|
||||
result.set('message', str(test.err[1]))
|
||||
result.set('type', test.err[0].__name__)
|
||||
result.text = reason
|
||||
elif status == 'SKIPPED':
|
||||
result = ET.SubElement(testcase, 'skipped')
|
||||
result.text = reason
|
||||
elif status not in ('SUCCESS', 'UNEXPECTED_SUCCESS'):
|
||||
raise TypeError("BUG: invalid test status '%s'" % status)
|
||||
|
||||
for data in test.measurements.values():
|
||||
measurement = ET.SubElement(testcase, data['type'])
|
||||
measurement.set('name', data['name'])
|
||||
measurement.set('legend', data['legend'])
|
||||
vals = data['values']
|
||||
if data['type'] == BuildPerfTestCase.SYSRES:
|
||||
ET.SubElement(measurement, 'time',
|
||||
timestamp=vals['start_time'].isoformat()).text = \
|
||||
str(vals['elapsed_time'].total_seconds())
|
||||
attrib = dict((k, str(v)) for k, v in vals['iostat'].items())
|
||||
ET.SubElement(measurement, 'iostat', attrib=attrib)
|
||||
attrib = dict((k, str(v)) for k, v in vals['rusage'].items())
|
||||
ET.SubElement(measurement, 'rusage', attrib=attrib)
|
||||
elif data['type'] == BuildPerfTestCase.DISKUSAGE:
|
||||
ET.SubElement(measurement, 'size').text = str(vals['size'])
|
||||
else:
|
||||
raise TypeError('BUG: unsupported measurement type')
|
||||
|
||||
suite.set('tests', str(test_cnt))
|
||||
|
||||
# Use minidom for pretty-printing
|
||||
dom_doc = minidom.parseString(ET.tostring(top, 'utf-8'))
|
||||
with open(os.path.join(self.out_dir, 'results.xml'), 'w') as fobj:
|
||||
dom_doc.writexml(fobj, addindent=' ', newl='\n', encoding='utf-8')
|
||||
|
||||
|
||||
class BuildPerfTestCase(unittest.TestCase):
|
||||
"""Base class for build performance tests"""
|
||||
SYSRES = 'sysres'
|
||||
DISKUSAGE = 'diskusage'
|
||||
build_target = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(BuildPerfTestCase, self).__init__(*args, **kwargs)
|
||||
self.name = self._testMethodName
|
||||
self.base_dir = None
|
||||
self.start_time = None
|
||||
self.elapsed_time = None
|
||||
self.measurements = OrderedDict()
|
||||
self.buildstats = OrderedDict()
|
||||
# self.err is supposed to be a tuple from sys.exc_info()
|
||||
self.err = None
|
||||
self.bb_vars = get_bb_vars()
|
||||
# TODO: remove 'times' and 'sizes' arrays when globalres support is
|
||||
# removed
|
||||
self.times = []
|
||||
self.sizes = []
|
||||
|
||||
@property
|
||||
def tmp_dir(self):
|
||||
return os.path.join(self.base_dir, self.name + '.tmp')
|
||||
|
||||
def shortDescription(self):
|
||||
return super(BuildPerfTestCase, self).shortDescription() or ""
|
||||
|
||||
def setUp(self):
|
||||
"""Set-up fixture for each test"""
|
||||
if not os.path.isdir(self.tmp_dir):
|
||||
os.mkdir(self.tmp_dir)
|
||||
if self.build_target:
|
||||
self.run_cmd(['bitbake', self.build_target, '--runall=fetch'])
|
||||
|
||||
def tearDown(self):
|
||||
"""Tear-down fixture for each test"""
|
||||
if os.path.isdir(self.tmp_dir):
|
||||
shutil.rmtree(self.tmp_dir)
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
"""Run test"""
|
||||
self.start_time = datetime.now()
|
||||
super(BuildPerfTestCase, self).run(*args, **kwargs)
|
||||
self.elapsed_time = datetime.now() - self.start_time
|
||||
|
||||
def run_cmd(self, cmd):
|
||||
"""Convenience method for running a command"""
|
||||
cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
|
||||
log.info("Logging command: %s", cmd_str)
|
||||
try:
|
||||
runCmd2(cmd)
|
||||
except CommandError as err:
|
||||
log.error("Command failed: %s", err.retcode)
|
||||
raise
|
||||
|
||||
def _append_measurement(self, measurement):
|
||||
"""Simple helper for adding measurements results"""
|
||||
if measurement['name'] in self.measurements:
|
||||
raise ValueError('BUG: two measurements with the same name in {}'.format(
|
||||
self.__class__.__name__))
|
||||
self.measurements[measurement['name']] = measurement
|
||||
|
||||
def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
|
||||
"""Measure system resource usage of a command"""
|
||||
def _worker(data_q, cmd, **kwargs):
|
||||
"""Worker process for measuring resources"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
ret = runCmd2(cmd, **kwargs)
|
||||
etime = datetime.now() - start_time
|
||||
rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
|
||||
iostat = OrderedDict()
|
||||
with open('/proc/{}/io'.format(os.getpid())) as fobj:
|
||||
for line in fobj.readlines():
|
||||
key, val = line.split(':')
|
||||
iostat[key] = int(val)
|
||||
rusage = OrderedDict()
|
||||
# Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
|
||||
# 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
|
||||
for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
|
||||
'ru_majflt', 'ru_inblock', 'ru_oublock',
|
||||
'ru_nvcsw', 'ru_nivcsw']:
|
||||
rusage[key] = getattr(rusage_struct, key)
|
||||
data_q.put({'ret': ret,
|
||||
'start_time': start_time,
|
||||
'elapsed_time': etime,
|
||||
'rusage': rusage,
|
||||
'iostat': iostat})
|
||||
except Exception as err:
|
||||
data_q.put(err)
|
||||
|
||||
cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
|
||||
log.info("Timing command: %s", cmd_str)
|
||||
data_q = SimpleQueue()
|
||||
try:
|
||||
proc = Process(target=_worker, args=(data_q, cmd,))
|
||||
proc.start()
|
||||
data = data_q.get()
|
||||
proc.join()
|
||||
if isinstance(data, Exception):
|
||||
raise data
|
||||
except CommandError:
|
||||
log.error("Command '%s' failed", cmd_str)
|
||||
raise
|
||||
etime = data['elapsed_time']
|
||||
|
||||
measurement = OrderedDict([('type', self.SYSRES),
|
||||
('name', name),
|
||||
('legend', legend)])
|
||||
measurement['values'] = OrderedDict([('start_time', data['start_time']),
|
||||
('elapsed_time', etime),
|
||||
('rusage', data['rusage']),
|
||||
('iostat', data['iostat'])])
|
||||
if save_bs:
|
||||
self.save_buildstats(name)
|
||||
|
||||
self._append_measurement(measurement)
|
||||
|
||||
# Append to 'times' array for globalres log
|
||||
e_sec = etime.total_seconds()
|
||||
self.times.append('{:d}:{:02d}:{:05.2f}'.format(int(e_sec / 3600),
|
||||
int((e_sec % 3600) / 60),
|
||||
e_sec % 60))
|
||||
|
||||
def measure_disk_usage(self, path, name, legend, apparent_size=False):
|
||||
"""Estimate disk usage of a file or directory"""
|
||||
cmd = ['du', '-s', '--block-size', '1024']
|
||||
if apparent_size:
|
||||
cmd.append('--apparent-size')
|
||||
cmd.append(path)
|
||||
|
||||
ret = runCmd2(cmd)
|
||||
size = int(ret.output.split()[0])
|
||||
log.debug("Size of %s path is %s", path, size)
|
||||
measurement = OrderedDict([('type', self.DISKUSAGE),
|
||||
('name', name),
|
||||
('legend', legend)])
|
||||
measurement['values'] = OrderedDict([('size', size)])
|
||||
self._append_measurement(measurement)
|
||||
# Append to 'sizes' array for globalres log
|
||||
self.sizes.append(str(size))
|
||||
|
||||
def save_buildstats(self, measurement_name):
|
||||
"""Save buildstats"""
|
||||
def split_nevr(nevr):
|
||||
"""Split name and version information from recipe "nevr" string"""
|
||||
n_e_v, revision = nevr.rsplit('-', 1)
|
||||
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
|
||||
n_e_v)
|
||||
if not match:
|
||||
# If we're not able to parse a version starting with a number, just
|
||||
# take the part after last dash
|
||||
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
|
||||
n_e_v)
|
||||
name = match.group('name')
|
||||
version = match.group('version')
|
||||
epoch = match.group('epoch')
|
||||
return name, epoch, version, revision
|
||||
|
||||
def bs_to_json(filename):
|
||||
"""Convert (task) buildstats file into json format"""
|
||||
bs_json = OrderedDict()
|
||||
iostat = OrderedDict()
|
||||
rusage = OrderedDict()
|
||||
with open(filename) as fobj:
|
||||
for line in fobj.readlines():
|
||||
key, val = line.split(':', 1)
|
||||
val = val.strip()
|
||||
if key == 'Started':
|
||||
start_time = datetime.utcfromtimestamp(float(val))
|
||||
bs_json['start_time'] = start_time
|
||||
elif key == 'Ended':
|
||||
end_time = datetime.utcfromtimestamp(float(val))
|
||||
elif key.startswith('IO '):
|
||||
split = key.split()
|
||||
iostat[split[1]] = int(val)
|
||||
elif key.find('rusage') >= 0:
|
||||
split = key.split()
|
||||
ru_key = split[-1]
|
||||
if ru_key in ('ru_stime', 'ru_utime'):
|
||||
val = float(val)
|
||||
else:
|
||||
val = int(val)
|
||||
rusage[ru_key] = rusage.get(ru_key, 0) + val
|
||||
elif key == 'Status':
|
||||
bs_json['status'] = val
|
||||
bs_json['elapsed_time'] = end_time - start_time
|
||||
bs_json['rusage'] = rusage
|
||||
bs_json['iostat'] = iostat
|
||||
return bs_json
|
||||
|
||||
log.info('Saving buildstats in JSON format')
|
||||
bs_dirs = sorted(os.listdir(self.bb_vars['BUILDSTATS_BASE']))
|
||||
if len(bs_dirs) > 1:
|
||||
log.warning("Multiple buildstats found for test %s, only "
|
||||
"archiving the last one", self.name)
|
||||
bs_dir = os.path.join(self.bb_vars['BUILDSTATS_BASE'], bs_dirs[-1])
|
||||
|
||||
buildstats = []
|
||||
for fname in os.listdir(bs_dir):
|
||||
recipe_dir = os.path.join(bs_dir, fname)
|
||||
if not os.path.isdir(recipe_dir) or fname == "reduced_proc_pressure":
|
||||
continue
|
||||
name, epoch, version, revision = split_nevr(fname)
|
||||
recipe_bs = OrderedDict((('name', name),
|
||||
('epoch', epoch),
|
||||
('version', version),
|
||||
('revision', revision),
|
||||
('tasks', OrderedDict())))
|
||||
for task in os.listdir(recipe_dir):
|
||||
recipe_bs['tasks'][task] = bs_to_json(os.path.join(recipe_dir,
|
||||
task))
|
||||
buildstats.append(recipe_bs)
|
||||
|
||||
self.buildstats[measurement_name] = buildstats
|
||||
|
||||
def rm_tmp(self):
|
||||
"""Cleanup temporary/intermediate files and directories"""
|
||||
log.debug("Removing temporary and cache files")
|
||||
for name in ['bitbake.lock', 'cache/sanity_info',
|
||||
self.bb_vars['TMPDIR']]:
|
||||
oe.path.remove(name, recurse=True)
|
||||
|
||||
def rm_sstate(self):
|
||||
"""Remove sstate directory"""
|
||||
log.debug("Removing sstate-cache")
|
||||
oe.path.remove(self.bb_vars['SSTATE_DIR'], recurse=True)
|
||||
|
||||
def rm_cache(self):
|
||||
"""Drop bitbake caches"""
|
||||
oe.path.remove(self.bb_vars['PERSISTENT_DIR'], recurse=True)
|
||||
|
||||
@staticmethod
|
||||
def sync():
|
||||
"""Sync and drop kernel caches"""
|
||||
runCmd2('bitbake -m', ignore_status=True)
|
||||
log.debug("Syncing and dropping kernel caches""")
|
||||
KernelDropCaches.drop()
|
||||
os.sync()
|
||||
# Wait a bit for all the dirty blocks to be written onto disk
|
||||
time.sleep(3)
|
||||
|
||||
|
||||
class BuildPerfTestLoader(unittest.TestLoader):
|
||||
"""Test loader for build performance tests"""
|
||||
sortTestMethodsUsing = None
|
||||
|
||||
|
||||
class BuildPerfTestRunner(unittest.TextTestRunner):
|
||||
"""Test loader for build performance tests"""
|
||||
sortTestMethodsUsing = None
|
||||
|
||||
def __init__(self, out_dir, *args, **kwargs):
|
||||
super(BuildPerfTestRunner, self).__init__(*args, **kwargs)
|
||||
self.out_dir = out_dir
|
||||
|
||||
def _makeResult(self):
|
||||
return BuildPerfTestResult(self.out_dir, self.stream, self.descriptions,
|
||||
self.verbosity)
|
||||
@@ -0,0 +1,120 @@
|
||||
# Copyright (c) 2016, Intel Corporation.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
"""Basic set of build performance tests"""
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import oe.path
|
||||
from oeqa.buildperf import BuildPerfTestCase
|
||||
from oeqa.utils.commands import get_bb_var, get_bb_vars
|
||||
|
||||
class Test1P1(BuildPerfTestCase):
|
||||
build_target = 'core-image-sato'
|
||||
|
||||
def test1(self):
|
||||
"""Build core-image-sato"""
|
||||
self.rm_tmp()
|
||||
self.rm_sstate()
|
||||
self.rm_cache()
|
||||
self.sync()
|
||||
self.measure_cmd_resources(['bitbake', self.build_target], 'build',
|
||||
'bitbake ' + self.build_target, save_bs=True)
|
||||
self.measure_disk_usage(self.bb_vars['TMPDIR'], 'tmpdir', 'tmpdir')
|
||||
self.measure_disk_usage(get_bb_var("IMAGE_ROOTFS", self.build_target), 'rootfs', 'rootfs', True)
|
||||
|
||||
|
||||
class Test1P2(BuildPerfTestCase):
|
||||
build_target = 'virtual/kernel'
|
||||
|
||||
def test12(self):
|
||||
"""Build virtual/kernel"""
|
||||
# Build and cleans state in order to get all dependencies pre-built
|
||||
self.run_cmd(['bitbake', self.build_target])
|
||||
self.run_cmd(['bitbake', self.build_target, '-c', 'cleansstate'])
|
||||
|
||||
self.sync()
|
||||
self.measure_cmd_resources(['bitbake', self.build_target], 'build',
|
||||
'bitbake ' + self.build_target)
|
||||
|
||||
|
||||
class Test1P3(BuildPerfTestCase):
|
||||
build_target = 'core-image-sato'
|
||||
|
||||
def test13(self):
|
||||
"""Build core-image-sato with rm_work enabled"""
|
||||
postfile = os.path.join(self.tmp_dir, 'postfile.conf')
|
||||
with open(postfile, 'w') as fobj:
|
||||
fobj.write('INHERIT += "rm_work"\n')
|
||||
|
||||
self.rm_tmp()
|
||||
self.rm_sstate()
|
||||
self.rm_cache()
|
||||
self.sync()
|
||||
cmd = ['bitbake', '-R', postfile, self.build_target]
|
||||
self.measure_cmd_resources(cmd, 'build',
|
||||
'bitbake' + self.build_target,
|
||||
save_bs=True)
|
||||
self.measure_disk_usage(self.bb_vars['TMPDIR'], 'tmpdir', 'tmpdir')
|
||||
|
||||
|
||||
class Test2(BuildPerfTestCase):
|
||||
build_target = 'core-image-sato'
|
||||
|
||||
def test2(self):
|
||||
"""Run core-image-sato do_rootfs with sstate"""
|
||||
# Build once in order to populate sstate cache
|
||||
self.run_cmd(['bitbake', self.build_target])
|
||||
|
||||
self.rm_tmp()
|
||||
self.rm_cache()
|
||||
self.sync()
|
||||
cmd = ['bitbake', self.build_target, '-c', 'rootfs']
|
||||
self.measure_cmd_resources(cmd, 'do_rootfs', 'bitbake do_rootfs')
|
||||
|
||||
|
||||
class Test3(BuildPerfTestCase):
|
||||
|
||||
def test3(self):
|
||||
"""Bitbake parsing (bitbake -p)"""
|
||||
# Drop all caches and parse
|
||||
self.rm_cache()
|
||||
oe.path.remove(os.path.join(self.bb_vars['TMPDIR'], 'cache'), True)
|
||||
self.measure_cmd_resources(['bitbake', '-p'], 'parse_1',
|
||||
'bitbake -p (no caches)')
|
||||
# Drop tmp/cache
|
||||
oe.path.remove(os.path.join(self.bb_vars['TMPDIR'], 'cache'), True)
|
||||
self.measure_cmd_resources(['bitbake', '-p'], 'parse_2',
|
||||
'bitbake -p (no tmp/cache)')
|
||||
# Parse with fully cached data
|
||||
self.measure_cmd_resources(['bitbake', '-p'], 'parse_3',
|
||||
'bitbake -p (cached)')
|
||||
|
||||
|
||||
class Test4(BuildPerfTestCase):
|
||||
build_target = 'core-image-sato'
|
||||
|
||||
def test4(self):
|
||||
"""eSDK metrics"""
|
||||
self.run_cmd(['bitbake', '-c', 'do_populate_sdk_ext',
|
||||
self.build_target])
|
||||
self.bb_vars = get_bb_vars(None, self.build_target)
|
||||
tmp_dir = self.bb_vars['TMPDIR']
|
||||
installer = os.path.join(
|
||||
self.bb_vars['SDK_DEPLOY'],
|
||||
self.bb_vars['TOOLCHAINEXT_OUTPUTNAME'] + '.sh')
|
||||
# Measure installer size
|
||||
self.measure_disk_usage(installer, 'installer_bin', 'eSDK installer',
|
||||
apparent_size=True)
|
||||
# Measure deployment time and deployed size
|
||||
deploy_dir = os.path.join(tmp_dir, 'esdk-deploy')
|
||||
if os.path.exists(deploy_dir):
|
||||
shutil.rmtree(deploy_dir)
|
||||
self.sync()
|
||||
self.measure_cmd_resources([installer, '-y', '-d', deploy_dir],
|
||||
'deploy', 'eSDK deploy')
|
||||
#make sure bitbake is unloaded
|
||||
self.sync()
|
||||
self.measure_disk_usage(deploy_dir, 'deploy_dir', 'deploy dir',
|
||||
apparent_size=True)
|
||||
@@ -0,0 +1,8 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Enable other layers to have modules in the same named directory
|
||||
from pkgutil import extend_path
|
||||
__path__ = extend_path(__path__, __name__)
|
||||
@@ -0,0 +1,199 @@
|
||||
# Copyright (C) 2014 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# This module adds support to testimage.bbclass to deploy images and run
|
||||
# tests using a "controller image" - this is a "known good" image that is
|
||||
# installed onto the device as part of initial setup and will be booted into
|
||||
# with no interaction; we can then use it to deploy the image to be tested
|
||||
# to a second partition before running the tests.
|
||||
#
|
||||
# For an example controller image, see core-image-testcontroller
|
||||
# (meta/recipes-extended/images/core-image-testcontroller.bb)
|
||||
|
||||
import os
|
||||
import bb
|
||||
import traceback
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
import oeqa.targetcontrol
|
||||
import oeqa.utils.sshcontrol as sshcontrol
|
||||
import oeqa.utils.commands as commands
|
||||
from oeqa.utils import CommandError
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
class ControllerImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta):
|
||||
|
||||
supported_image_fstypes = ['tar.gz', 'tar.bz2']
|
||||
|
||||
def __init__(self, d):
|
||||
super(ControllerImageHardwareTarget, self).__init__(d)
|
||||
|
||||
# target ip
|
||||
addr = d.getVar("TEST_TARGET_IP") or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
|
||||
self.ip = addr.split(":")[0]
|
||||
try:
|
||||
self.port = addr.split(":")[1]
|
||||
except IndexError:
|
||||
self.port = None
|
||||
bb.note("Target IP: %s" % self.ip)
|
||||
self.server_ip = d.getVar("TEST_SERVER_IP")
|
||||
if not self.server_ip:
|
||||
try:
|
||||
self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
|
||||
except Exception as e:
|
||||
bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
|
||||
bb.note("Server IP: %s" % self.server_ip)
|
||||
|
||||
# test rootfs + kernel
|
||||
self.image_fstype = self.get_image_fstype(d)
|
||||
self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("IMAGE_LINK_NAME") + '.' + self.image_fstype)
|
||||
self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("KERNEL_IMAGETYPE", False) + '-' + d.getVar('MACHINE', False) + '.bin')
|
||||
if not os.path.isfile(self.rootfs):
|
||||
# we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be
|
||||
# the same as the config with which the image was build, ie
|
||||
# you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz"
|
||||
# and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage
|
||||
bb.fatal("No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \
|
||||
\nExpected path: %s" % self.rootfs)
|
||||
if not os.path.isfile(self.kernel):
|
||||
bb.fatal("No kernel found. Expected path: %s" % self.kernel)
|
||||
|
||||
# controller ssh connection
|
||||
self.controller = None
|
||||
# if the user knows what they are doing, then by all means...
|
||||
self.user_cmds = d.getVar("TEST_DEPLOY_CMDS")
|
||||
self.deploy_cmds = None
|
||||
|
||||
# this is the name of the command that controls the power for a board
|
||||
# e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants"
|
||||
# the command should take as the last argument "off" and "on" and "cycle" (off, on)
|
||||
self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD") or None
|
||||
self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS", False) or ""
|
||||
|
||||
self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD") or None
|
||||
self.serialcontrol_args = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS", False) or ""
|
||||
|
||||
self.origenv = os.environ
|
||||
if self.powercontrol_cmd or self.serialcontrol_cmd:
|
||||
# the external script for controlling power might use ssh
|
||||
# ssh + keys means we need the original user env
|
||||
bborigenv = d.getVar("BB_ORIGENV", False) or {}
|
||||
for key in bborigenv:
|
||||
val = bborigenv.getVar(key)
|
||||
if val is not None:
|
||||
self.origenv[key] = str(val)
|
||||
|
||||
if self.powercontrol_cmd:
|
||||
if self.powercontrol_args:
|
||||
self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd, self.powercontrol_args)
|
||||
if self.serialcontrol_cmd:
|
||||
if self.serialcontrol_args:
|
||||
self.serialcontrol_cmd = "%s %s" % (self.serialcontrol_cmd, self.serialcontrol_args)
|
||||
|
||||
def power_ctl(self, msg):
|
||||
if self.powercontrol_cmd:
|
||||
cmd = "%s %s" % (self.powercontrol_cmd, msg)
|
||||
try:
|
||||
commands.runCmd(cmd, assert_error=False, start_new_session=True, env=self.origenv)
|
||||
except CommandError as e:
|
||||
bb.fatal(str(e))
|
||||
|
||||
def power_cycle(self, conn):
|
||||
if self.powercontrol_cmd:
|
||||
# be nice, don't just cut power
|
||||
conn.run("shutdown -h now")
|
||||
time.sleep(10)
|
||||
self.power_ctl("cycle")
|
||||
else:
|
||||
status, output = conn.run("sync; { sleep 1; reboot; } > /dev/null &")
|
||||
if status != 0:
|
||||
bb.error("Failed rebooting target and no power control command defined. You need to manually reset the device.\n%s" % output)
|
||||
|
||||
def _wait_until_booted(self):
|
||||
''' Waits until the target device has booted (if we have just power cycled it) '''
|
||||
# Subclasses with better methods of determining boot can override this
|
||||
time.sleep(120)
|
||||
|
||||
def deploy(self):
|
||||
# base class just sets the ssh log file for us
|
||||
super(ControllerImageHardwareTarget, self).deploy()
|
||||
self.controller = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port)
|
||||
status, output = self.controller.run("cat /etc/controllerimage")
|
||||
if status != 0:
|
||||
# We're not booted into the controller image, so try rebooting
|
||||
bb.plain("%s - booting into the controller image" % self.pn)
|
||||
self.power_ctl("cycle")
|
||||
self._wait_until_booted()
|
||||
|
||||
bb.plain("%s - deploying image on target" % self.pn)
|
||||
status, output = self.controller.run("cat /etc/controllerimage")
|
||||
if status != 0:
|
||||
bb.fatal("No ssh connectivity or target isn't running a controller image.\n%s" % output)
|
||||
if self.user_cmds:
|
||||
self.deploy_cmds = self.user_cmds.split("\n")
|
||||
try:
|
||||
self._deploy()
|
||||
except Exception as e:
|
||||
bb.fatal("Failed deploying test image: %s" % e)
|
||||
|
||||
@abstractmethod
|
||||
def _deploy(self):
|
||||
pass
|
||||
|
||||
def start(self, extra_bootparams=None):
|
||||
bb.plain("%s - boot test image on target" % self.pn)
|
||||
self._start()
|
||||
# set the ssh object for the target/test image
|
||||
self.connection = sshcontrol.SSHControl(self.ip, logfile=self.sshlog, port=self.port)
|
||||
bb.plain("%s - start running tests" % self.pn)
|
||||
|
||||
@abstractmethod
|
||||
def _start(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
bb.plain("%s - reboot/powercycle target" % self.pn)
|
||||
self.power_cycle(self.controller)
|
||||
|
||||
|
||||
class SystemdbootTarget(ControllerImageHardwareTarget):
|
||||
|
||||
def __init__(self, d):
|
||||
super(SystemdbootTarget, self).__init__(d)
|
||||
# this the value we need to set in the LoaderEntryOneShot EFI variable
|
||||
# so the system boots the 'test' bootloader label and not the default
|
||||
# The first four bytes are EFI bits, and the rest is an utf-16le string
|
||||
# (EFI vars values need to be utf-16)
|
||||
# $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C
|
||||
# 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...|
|
||||
self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00'
|
||||
self.deploy_cmds = [
|
||||
'mount -L boot /boot',
|
||||
'mkdir -p /mnt/testrootfs',
|
||||
'mount -L testrootfs /mnt/testrootfs',
|
||||
'modprobe efivarfs',
|
||||
'mount -t efivarfs efivarfs /sys/firmware/efi/efivars',
|
||||
'cp ~/test-kernel /boot',
|
||||
'rm -rf /mnt/testrootfs/*',
|
||||
'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype,
|
||||
'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue
|
||||
]
|
||||
|
||||
def _deploy(self):
|
||||
# make sure these aren't mounted
|
||||
self.controller.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
|
||||
# from now on, every deploy cmd should return 0
|
||||
# else an exception will be thrown by sshcontrol
|
||||
self.controller.ignore_status = False
|
||||
self.controller.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
|
||||
self.controller.copy_to(self.kernel, "~/test-kernel")
|
||||
for cmd in self.deploy_cmds:
|
||||
self.controller.run(cmd)
|
||||
|
||||
def _start(self, params=None):
|
||||
self.power_cycle(self.controller)
|
||||
# there are better ways than a timeout but this should work for now
|
||||
time.sleep(120)
|
||||
@@ -0,0 +1,74 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import types
|
||||
import bb
|
||||
import os
|
||||
|
||||
# This class is responsible for loading a test target controller
|
||||
class TestTargetLoader:
|
||||
|
||||
# Search oeqa.controllers module directory for and return a controller
|
||||
# corresponding to the given target name.
|
||||
# AttributeError raised if not found.
|
||||
# ImportError raised if a provided module can not be imported.
|
||||
def get_controller_module(self, target, bbpath):
|
||||
controllerslist = self.get_controller_modulenames(bbpath)
|
||||
bb.note("Available controller modules: %s" % str(controllerslist))
|
||||
controller = self.load_controller_from_name(target, controllerslist)
|
||||
return controller
|
||||
|
||||
# Return a list of all python modules in lib/oeqa/controllers for each
|
||||
# layer in bbpath
|
||||
def get_controller_modulenames(self, bbpath):
|
||||
|
||||
controllerslist = []
|
||||
|
||||
def add_controller_list(path):
|
||||
if not os.path.exists(os.path.join(path, '__init__.py')):
|
||||
bb.fatal('Controllers directory %s exists but is missing __init__.py' % path)
|
||||
files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
|
||||
for f in files:
|
||||
module = 'oeqa.controllers.' + f[:-3]
|
||||
if module not in controllerslist:
|
||||
controllerslist.append(module)
|
||||
else:
|
||||
bb.warn("Duplicate controller module found for %s, only one added. Layers should create unique controller module names" % module)
|
||||
|
||||
for p in bbpath:
|
||||
controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers')
|
||||
bb.debug(2, 'Searching for target controllers in %s' % controllerpath)
|
||||
if os.path.exists(controllerpath):
|
||||
add_controller_list(controllerpath)
|
||||
return controllerslist
|
||||
|
||||
# Search for and return a controller from given target name and
|
||||
# set of module names.
|
||||
# Raise AttributeError if not found.
|
||||
# Raise ImportError if a provided module can not be imported
|
||||
def load_controller_from_name(self, target, modulenames):
|
||||
for name in modulenames:
|
||||
obj = self.load_controller_from_module(target, name)
|
||||
if obj:
|
||||
return obj
|
||||
raise AttributeError("Unable to load {0} from available modules: {1}".format(target, str(modulenames)))
|
||||
|
||||
# Search for and return a controller or None from given module name
|
||||
def load_controller_from_module(self, target, modulename):
|
||||
obj = None
|
||||
# import module, allowing it to raise import exception
|
||||
module = __import__(modulename, globals(), locals(), [target])
|
||||
# look for target class in the module, catching any exceptions as it
|
||||
# is valid that a module may not have the target class.
|
||||
try:
|
||||
obj = getattr(module, target)
|
||||
if obj:
|
||||
from oeqa.targetcontrol import BaseTarget
|
||||
if( not issubclass(obj, BaseTarget)):
|
||||
bb.warn("Target {0} found, but subclass is not BaseTarget".format(target))
|
||||
except:
|
||||
obj = None
|
||||
return obj
|
||||
@@ -0,0 +1,76 @@
|
||||
= OEQA (v2) Framework =
|
||||
|
||||
== Introduction ==
|
||||
|
||||
This is version 2 of the OEQA framework. Base clases are located in the
|
||||
'oeqa/core' directory and subsequent components must extend from these.
|
||||
|
||||
The main design consideration was to implement the needed functionality on
|
||||
top of the Python unittest framework. To achieve this goal, the following
|
||||
modules are used:
|
||||
|
||||
* oeqa/core/runner.py: Provides OETestResult and OETestRunner base
|
||||
classes extending the unittest class. These classes support exporting
|
||||
results to different formats; currently RAW and XML support exist.
|
||||
|
||||
* oeqa/core/loader.py: Provides OETestLoader extending the unittest class.
|
||||
It also features a unified implementation of decorator support and
|
||||
filtering test cases.
|
||||
|
||||
* oeqa/core/case.py: Provides OETestCase base class extending
|
||||
unittest.TestCase and provides access to the Test data (td), Test context
|
||||
and Logger functionality.
|
||||
|
||||
* oeqa/core/decorator: Provides OETestDecorator, a new class to implement
|
||||
decorators for Test cases.
|
||||
|
||||
* oeqa/core/context: Provides OETestContext, a high-level API for
|
||||
loadTests and runTests of certain Test component and
|
||||
OETestContextExecutor a base class to enable oe-test to discover/use
|
||||
the Test component.
|
||||
|
||||
Also, a new 'oe-test' runner is located under 'scripts', allowing scans for components
|
||||
that supports OETestContextExecutor (see below).
|
||||
|
||||
== Terminology ==
|
||||
|
||||
* Test component: The area of testing in the Project, for example: runtime, SDK, eSDK, selftest.
|
||||
|
||||
* Test data: Data associated with the Test component. Currently we use bitbake datastore as
|
||||
a Test data input.
|
||||
|
||||
* Test context: A context of what tests needs to be run and how to do it; this additionally
|
||||
provides access to the Test data and could have custom methods and/or attrs.
|
||||
|
||||
== oe-test ==
|
||||
|
||||
The new tool, oe-test, has the ability to scan the code base for test components and provide
|
||||
a unified way to run test cases. Internally it scans folders inside oeqa module in order to find
|
||||
specific classes that implement a test component.
|
||||
|
||||
== Usage ==
|
||||
|
||||
Executing the example test component
|
||||
|
||||
$ source oe-init-build-env
|
||||
$ oe-test core
|
||||
|
||||
Getting help
|
||||
|
||||
$ oe-test -h
|
||||
|
||||
== Creating new Test Component ==
|
||||
|
||||
Adding a new test component the developer needs to extend OETestContext/OETestContextExecutor
|
||||
(from context.py) and OETestCase (from case.py)
|
||||
|
||||
== Selftesting the framework ==
|
||||
|
||||
Run all tests:
|
||||
|
||||
$ PATH=$PATH:../../ python3 -m unittest discover -s tests
|
||||
|
||||
Run some test:
|
||||
|
||||
$ cd tests/
|
||||
$ ./test_data.py
|
||||
@@ -0,0 +1,105 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import base64
|
||||
import zlib
|
||||
import unittest
|
||||
|
||||
from oeqa.core.exception import OEQAMissingVariable
|
||||
|
||||
def _validate_td_vars(td, td_vars, type_msg):
|
||||
if td_vars:
|
||||
for v in td_vars:
|
||||
if not v in td:
|
||||
raise OEQAMissingVariable("Test %s need %s variable but"\
|
||||
" isn't into td" % (type_msg, v))
|
||||
|
||||
class OETestCase(unittest.TestCase):
|
||||
# TestContext and Logger instance set by OETestLoader.
|
||||
tc = None
|
||||
logger = None
|
||||
|
||||
# td has all the variables needed by the test cases
|
||||
# is the same across all the test cases.
|
||||
td = None
|
||||
|
||||
# td_vars has the variables needed by a test class
|
||||
# or test case instance, if some var isn't into td a
|
||||
# OEQAMissingVariable exception is raised
|
||||
td_vars = None
|
||||
|
||||
@classmethod
|
||||
def _oeSetUpClass(clss):
|
||||
_validate_td_vars(clss.td, clss.td_vars, "class")
|
||||
if hasattr(clss, 'setUpHooker') and callable(getattr(clss, 'setUpHooker')):
|
||||
clss.setUpHooker()
|
||||
clss.setUpClassMethod()
|
||||
|
||||
@classmethod
|
||||
def _oeTearDownClass(clss):
|
||||
clss.tearDownClassMethod()
|
||||
|
||||
def _oeSetUp(self):
|
||||
try:
|
||||
for d in self.decorators:
|
||||
d.setUpDecorator()
|
||||
except:
|
||||
for d in self.decorators:
|
||||
d.tearDownDecorator()
|
||||
raise
|
||||
self.setUpMethod()
|
||||
|
||||
def _oeTearDown(self):
|
||||
for d in self.decorators:
|
||||
d.tearDownDecorator()
|
||||
self.tearDownMethod()
|
||||
|
||||
class OEPTestResultTestCase:
|
||||
"""
|
||||
Mix-in class to provide functions to make interacting with extraresults for
|
||||
the purposes of storing ptestresult data.
|
||||
"""
|
||||
@staticmethod
|
||||
def _compress_log(log):
|
||||
logdata = log.encode("utf-8") if isinstance(log, str) else log
|
||||
logdata = zlib.compress(logdata)
|
||||
logdata = base64.b64encode(logdata).decode("utf-8")
|
||||
return {"compressed" : logdata}
|
||||
|
||||
def ptest_rawlog(self, log):
|
||||
if not hasattr(self, "extraresults"):
|
||||
self.extraresults = {"ptestresult.sections" : {}}
|
||||
self.extraresults["ptestresult.rawlogs"] = {"log" : self._compress_log(log)}
|
||||
|
||||
def ptest_section(self, section, duration = None, log = None, logfile = None, exitcode = None):
|
||||
if not hasattr(self, "extraresults"):
|
||||
self.extraresults = {"ptestresult.sections" : {}}
|
||||
|
||||
sections = self.extraresults.get("ptestresult.sections")
|
||||
if section not in sections:
|
||||
sections[section] = {}
|
||||
|
||||
if log is not None:
|
||||
sections[section]["log"] = self._compress_log(log)
|
||||
elif logfile is not None:
|
||||
with open(logfile, "rb") as f:
|
||||
sections[section]["log"] = self._compress_log(f.read())
|
||||
|
||||
if duration is not None:
|
||||
sections[section]["duration"] = duration
|
||||
if exitcode is not None:
|
||||
sections[section]["exitcode"] = exitcode
|
||||
|
||||
def ptest_result(self, section, test, result):
|
||||
if not hasattr(self, "extraresults"):
|
||||
self.extraresults = {"ptestresult.sections" : {}}
|
||||
|
||||
sections = self.extraresults.get("ptestresult.sections")
|
||||
if section not in sections:
|
||||
sections[section] = {}
|
||||
resultname = "ptestresult.{}.{}".format(section, test)
|
||||
self.extraresults[resultname] = {"status" : result}
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
{"ARCH": "x86", "IMAGE": "core-image-minimal"}
|
||||
@@ -0,0 +1,22 @@
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.core.case import OETestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
|
||||
class OETestExample(OETestCase):
|
||||
def test_example(self):
|
||||
self.logger.info('IMAGE: %s' % self.td.get('IMAGE'))
|
||||
self.assertEqual('core-image-minimal', self.td.get('IMAGE'))
|
||||
self.logger.info('ARCH: %s' % self.td.get('ARCH'))
|
||||
self.assertEqual('x86', self.td.get('ARCH'))
|
||||
|
||||
class OETestExampleDepend(OETestCase):
|
||||
@OETestDepends(['OETestExample.test_example'])
|
||||
def test_example_depends(self):
|
||||
pass
|
||||
|
||||
def test_example_no_depends(self):
|
||||
pass
|
||||
@@ -0,0 +1,246 @@
|
||||
## Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
import collections
|
||||
import unittest
|
||||
|
||||
from oeqa.core.loader import OETestLoader
|
||||
from oeqa.core.runner import OETestRunner
|
||||
from oeqa.core.exception import OEQAMissingManifest, OEQATestNotFound
|
||||
|
||||
class OETestContext(object):
|
||||
loaderClass = OETestLoader
|
||||
runnerClass = OETestRunner
|
||||
|
||||
files_dir = os.path.abspath(os.path.join(os.path.dirname(
|
||||
os.path.abspath(__file__)), "../files"))
|
||||
|
||||
def __init__(self, td=None, logger=None):
|
||||
if not type(td) is dict:
|
||||
raise TypeError("td isn't dictionary type")
|
||||
|
||||
self.td = td
|
||||
self.logger = logger
|
||||
self._registry = {}
|
||||
self._registry['cases'] = collections.OrderedDict()
|
||||
|
||||
self.results = unittest.TestResult()
|
||||
unittest.registerResult(self.results)
|
||||
|
||||
def _read_modules_from_manifest(self, manifest):
|
||||
if not os.path.exists(manifest):
|
||||
raise OEQAMissingManifest("Manifest does not exist on %s" % manifest)
|
||||
|
||||
modules = []
|
||||
for line in open(manifest).readlines():
|
||||
line = line.strip()
|
||||
if line and not line.startswith("#"):
|
||||
modules.append(line)
|
||||
|
||||
return modules
|
||||
|
||||
def skipTests(self, skips):
|
||||
if not skips:
|
||||
return
|
||||
def skipfuncgen(skipmsg):
|
||||
def func():
|
||||
raise unittest.SkipTest(skipmsg)
|
||||
return func
|
||||
class_ids = {}
|
||||
for test in self.suites:
|
||||
if test.__class__ not in class_ids:
|
||||
class_ids[test.__class__] = '.'.join(test.id().split('.')[:-1])
|
||||
for skip in skips:
|
||||
if (test.id()+'.').startswith(skip+'.'):
|
||||
setattr(test, 'setUp', skipfuncgen('Skip by the command line argument "%s"' % skip))
|
||||
for tclass in class_ids:
|
||||
cid = class_ids[tclass]
|
||||
for skip in skips:
|
||||
if (cid + '.').startswith(skip + '.'):
|
||||
setattr(tclass, 'setUpHooker', skipfuncgen('Skip by the command line argument "%s"' % skip))
|
||||
|
||||
def loadTests(self, module_paths, modules=[], tests=[],
|
||||
modules_manifest="", modules_required=[], **kwargs):
|
||||
if modules_manifest:
|
||||
modules = self._read_modules_from_manifest(modules_manifest)
|
||||
|
||||
self.loader = self.loaderClass(self, module_paths, modules, tests,
|
||||
modules_required, **kwargs)
|
||||
self.suites = self.loader.discover()
|
||||
|
||||
def prepareSuite(self, suites, processes):
|
||||
return suites
|
||||
|
||||
def runTests(self, processes=None, skips=[]):
|
||||
self.runner = self.runnerClass(self, descriptions=False, verbosity=2)
|
||||
|
||||
# Dynamically skip those tests specified though arguments
|
||||
self.skipTests(skips)
|
||||
|
||||
self._run_start_time = time.time()
|
||||
self._run_end_time = self._run_start_time
|
||||
if not processes:
|
||||
self.runner.buffer = True
|
||||
result = self.runner.run(self.prepareSuite(self.suites, processes))
|
||||
self._run_end_time = time.time()
|
||||
|
||||
return result
|
||||
|
||||
def listTests(self, display_type):
|
||||
self.runner = self.runnerClass(self, verbosity=2)
|
||||
return self.runner.list_tests(self.suites, display_type)
|
||||
|
||||
class OETestContextExecutor(object):
|
||||
_context_class = OETestContext
|
||||
_script_executor = 'oe-test'
|
||||
|
||||
name = 'core'
|
||||
help = 'core test component example'
|
||||
description = 'executes core test suite example'
|
||||
datetime = time.strftime("%Y%m%d%H%M%S")
|
||||
|
||||
default_cases = [os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||
'cases/example')]
|
||||
default_test_data = os.path.join(default_cases[0], 'data.json')
|
||||
default_tests = None
|
||||
default_json_result_dir = None
|
||||
|
||||
def register_commands(self, logger, subparsers):
|
||||
self.parser = subparsers.add_parser(self.name, help=self.help,
|
||||
description=self.description, group='components')
|
||||
|
||||
self.default_output_log = '%s-results-%s.log' % (self.name, self.datetime)
|
||||
self.parser.add_argument('--output-log', action='store',
|
||||
default=self.default_output_log,
|
||||
help="results output log, default: %s" % self.default_output_log)
|
||||
|
||||
self.parser.add_argument('--json-result-dir', action='store',
|
||||
default=self.default_json_result_dir,
|
||||
help="json result output dir, default: %s" % self.default_json_result_dir)
|
||||
|
||||
group = self.parser.add_mutually_exclusive_group()
|
||||
group.add_argument('--run-tests', action='store', nargs='+',
|
||||
default=self.default_tests,
|
||||
help="tests to run in <module>[.<class>[.<name>]]")
|
||||
group.add_argument('--list-tests', action='store',
|
||||
choices=('module', 'class', 'name'),
|
||||
help="lists available tests")
|
||||
|
||||
if self.default_test_data:
|
||||
self.parser.add_argument('--test-data-file', action='store',
|
||||
default=self.default_test_data,
|
||||
help="data file to load, default: %s" % self.default_test_data)
|
||||
else:
|
||||
self.parser.add_argument('--test-data-file', action='store',
|
||||
help="data file to load")
|
||||
|
||||
if self.default_cases:
|
||||
self.parser.add_argument('CASES_PATHS', action='store',
|
||||
default=self.default_cases, nargs='*',
|
||||
help="paths to directories with test cases, default: %s"\
|
||||
% self.default_cases)
|
||||
else:
|
||||
self.parser.add_argument('CASES_PATHS', action='store',
|
||||
nargs='+', help="paths to directories with test cases")
|
||||
|
||||
self.parser.set_defaults(func=self.run)
|
||||
|
||||
def _setup_logger(self, logger, args):
|
||||
formatter = logging.Formatter('%(asctime)s - ' + self.name + \
|
||||
' - %(levelname)s - %(message)s')
|
||||
sh = logger.handlers[0]
|
||||
sh.setFormatter(formatter)
|
||||
fh = logging.FileHandler(args.output_log)
|
||||
fh.setFormatter(formatter)
|
||||
logger.addHandler(fh)
|
||||
if getattr(args, 'verbose', False):
|
||||
logger.setLevel('DEBUG')
|
||||
|
||||
return logger
|
||||
|
||||
def _process_args(self, logger, args):
|
||||
self.tc_kwargs = {}
|
||||
self.tc_kwargs['init'] = {}
|
||||
self.tc_kwargs['load'] = {}
|
||||
self.tc_kwargs['list'] = {}
|
||||
self.tc_kwargs['run'] = {}
|
||||
|
||||
self.tc_kwargs['init']['logger'] = self._setup_logger(logger, args)
|
||||
if args.test_data_file:
|
||||
self.tc_kwargs['init']['td'] = json.load(
|
||||
open(args.test_data_file, "r"))
|
||||
else:
|
||||
self.tc_kwargs['init']['td'] = {}
|
||||
|
||||
if args.run_tests:
|
||||
self.tc_kwargs['load']['modules'] = args.run_tests
|
||||
self.tc_kwargs['load']['modules_required'] = args.run_tests
|
||||
else:
|
||||
self.tc_kwargs['load']['modules'] = []
|
||||
|
||||
self.tc_kwargs['run']['skips'] = []
|
||||
|
||||
self.module_paths = args.CASES_PATHS
|
||||
|
||||
def _get_json_result_dir(self, args):
|
||||
return args.json_result_dir
|
||||
|
||||
def _get_configuration(self):
|
||||
td = self.tc_kwargs['init']['td']
|
||||
configuration = {'TEST_TYPE': self.name,
|
||||
'MACHINE': td.get("MACHINE"),
|
||||
'DISTRO': td.get("DISTRO"),
|
||||
'IMAGE_BASENAME': td.get("IMAGE_BASENAME"),
|
||||
'DATETIME': td.get("DATETIME")}
|
||||
return configuration
|
||||
|
||||
def _get_result_id(self, configuration):
|
||||
return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'],
|
||||
configuration['MACHINE'], self.datetime)
|
||||
|
||||
def _pre_run(self):
|
||||
pass
|
||||
|
||||
def run(self, logger, args):
|
||||
self._process_args(logger, args)
|
||||
|
||||
self.tc = self._context_class(**self.tc_kwargs['init'])
|
||||
try:
|
||||
self.tc.loadTests(self.module_paths, **self.tc_kwargs['load'])
|
||||
except OEQATestNotFound as ex:
|
||||
logger.error(ex)
|
||||
sys.exit(1)
|
||||
|
||||
if args.list_tests:
|
||||
rc = self.tc.listTests(args.list_tests, **self.tc_kwargs['list'])
|
||||
else:
|
||||
self._pre_run()
|
||||
rc = self.tc.runTests(**self.tc_kwargs['run'])
|
||||
|
||||
json_result_dir = self._get_json_result_dir(args)
|
||||
if json_result_dir:
|
||||
configuration = self._get_configuration()
|
||||
rc.logDetails(json_result_dir,
|
||||
configuration,
|
||||
self._get_result_id(configuration))
|
||||
else:
|
||||
rc.logDetails()
|
||||
|
||||
rc.logSummary(self.name)
|
||||
|
||||
output_link = os.path.join(os.path.dirname(args.output_log),
|
||||
"%s-results.log" % self.name)
|
||||
if os.path.exists(output_link):
|
||||
os.remove(output_link)
|
||||
os.symlink(args.output_log, output_link)
|
||||
|
||||
return rc
|
||||
|
||||
_executor_class = OETestContextExecutor
|
||||
@@ -0,0 +1,74 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from functools import wraps
|
||||
from abc import ABCMeta
|
||||
|
||||
decoratorClasses = set()
|
||||
|
||||
def registerDecorator(cls):
|
||||
decoratorClasses.add(cls)
|
||||
return cls
|
||||
|
||||
class OETestDecorator(object, metaclass=ABCMeta):
|
||||
case = None # Reference of OETestCase decorated
|
||||
attrs = None # Attributes to be loaded by decorator implementation
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if not self.attrs:
|
||||
return
|
||||
|
||||
for idx, attr in enumerate(self.attrs):
|
||||
if attr in kwargs:
|
||||
value = kwargs[attr]
|
||||
else:
|
||||
value = args[idx]
|
||||
setattr(self, attr, value)
|
||||
|
||||
def __call__(self, func):
|
||||
@wraps(func)
|
||||
def wrapped_f(*args, **kwargs):
|
||||
self.attrs = self.attrs # XXX: Enables OETestLoader discover
|
||||
return func(*args, **kwargs)
|
||||
return wrapped_f
|
||||
|
||||
# OETestLoader call it when is loading test cases.
|
||||
# XXX: Most methods would change the registry for later
|
||||
# processing; be aware that filtrate method needs to
|
||||
# run later than bind, so there could be data (in the
|
||||
# registry) of a cases that were filtered.
|
||||
def bind(self, registry, case):
|
||||
self.case = case
|
||||
self.logger = case.tc.logger
|
||||
self.case.decorators.append(self)
|
||||
|
||||
# OETestRunner call this method when tries to run
|
||||
# the test case.
|
||||
def setUpDecorator(self):
|
||||
pass
|
||||
|
||||
# OETestRunner call it after a test method has been
|
||||
# called even if the method raised an exception.
|
||||
def tearDownDecorator(self):
|
||||
pass
|
||||
|
||||
class OETestDiscover(OETestDecorator):
|
||||
|
||||
# OETestLoader call it after discover test cases
|
||||
# needs to return the cases to be run.
|
||||
@staticmethod
|
||||
def discover(registry):
|
||||
return registry['cases']
|
||||
|
||||
def OETestTag(*tags):
|
||||
def decorator(item):
|
||||
if hasattr(item, "__oeqa_testtags"):
|
||||
# do not append, create a new list (to handle classes with inheritance)
|
||||
item.__oeqa_testtags = list(item.__oeqa_testtags) + list(tags)
|
||||
else:
|
||||
item.__oeqa_testtags = tags
|
||||
return item
|
||||
return decorator
|
||||
@@ -0,0 +1,220 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.core.exception import OEQAMissingVariable
|
||||
|
||||
from . import OETestDecorator, registerDecorator
|
||||
|
||||
def has_feature(td, feature):
|
||||
"""
|
||||
Checks for feature in DISTRO_FEATURES or IMAGE_FEATURES.
|
||||
"""
|
||||
|
||||
if (feature in td.get('DISTRO_FEATURES', '').split() or
|
||||
feature in td.get('IMAGE_FEATURES', '').split()):
|
||||
return True
|
||||
return False
|
||||
|
||||
def has_machine(td, machine):
|
||||
"""
|
||||
Checks for MACHINE.
|
||||
"""
|
||||
|
||||
if (machine == td.get('MACHINE', '')):
|
||||
return True
|
||||
return False
|
||||
|
||||
@registerDecorator
|
||||
class skipIfDataVar(OETestDecorator):
|
||||
"""
|
||||
Skip test based on value of a data store's variable.
|
||||
|
||||
It will get the info of var from the data store and will
|
||||
check it against value; if are equal it will skip the test
|
||||
with msg as the reason.
|
||||
"""
|
||||
|
||||
attrs = ('var', 'value', 'msg')
|
||||
|
||||
def setUpDecorator(self):
|
||||
msg = ('Checking if %r value is %r to skip test' %
|
||||
(self.var, self.value))
|
||||
self.logger.debug(msg)
|
||||
if self.case.td.get(self.var) == self.value:
|
||||
self.case.skipTest(self.msg)
|
||||
|
||||
@registerDecorator
|
||||
class skipIfNotDataVar(OETestDecorator):
|
||||
"""
|
||||
Skip test based on value of a data store's variable.
|
||||
|
||||
It will get the info of var from the data store and will
|
||||
check it against value; if are not equal it will skip the
|
||||
test with msg as the reason.
|
||||
"""
|
||||
|
||||
attrs = ('var', 'value', 'msg')
|
||||
|
||||
def setUpDecorator(self):
|
||||
msg = ('Checking if %r value is not %r to skip test' %
|
||||
(self.var, self.value))
|
||||
self.logger.debug(msg)
|
||||
if not self.case.td.get(self.var) == self.value:
|
||||
self.case.skipTest(self.msg)
|
||||
|
||||
@registerDecorator
|
||||
class skipIfInDataVar(OETestDecorator):
|
||||
"""
|
||||
Skip test if value is in data store's variable.
|
||||
"""
|
||||
|
||||
attrs = ('var', 'value', 'msg')
|
||||
def setUpDecorator(self):
|
||||
msg = ('Checking if %r value contains %r to skip '
|
||||
'the test' % (self.var, self.value))
|
||||
self.logger.debug(msg)
|
||||
if self.value in (self.case.td.get(self.var)):
|
||||
self.case.skipTest(self.msg)
|
||||
|
||||
@registerDecorator
|
||||
class skipIfNotInDataVar(OETestDecorator):
|
||||
"""
|
||||
Skip test if value is not in data store's variable.
|
||||
"""
|
||||
|
||||
attrs = ('var', 'value', 'msg')
|
||||
def setUpDecorator(self):
|
||||
msg = ('Checking if %r value contains %r to run '
|
||||
'the test' % (self.var, self.value))
|
||||
self.logger.debug(msg)
|
||||
if not self.value in (self.case.td.get(self.var) or ""):
|
||||
self.case.skipTest(self.msg)
|
||||
|
||||
@registerDecorator
|
||||
class OETestDataDepends(OETestDecorator):
|
||||
attrs = ('td_depends',)
|
||||
|
||||
def setUpDecorator(self):
|
||||
for v in self.td_depends:
|
||||
try:
|
||||
value = self.case.td[v]
|
||||
except KeyError:
|
||||
raise OEQAMissingVariable("Test case need %s variable but"\
|
||||
" isn't into td" % v)
|
||||
|
||||
@registerDecorator
|
||||
class skipIfNotFeature(OETestDecorator):
|
||||
"""
|
||||
Skip test based on DISTRO_FEATURES.
|
||||
|
||||
value must be in distro features or it will skip the test
|
||||
with msg as the reason.
|
||||
"""
|
||||
|
||||
attrs = ('value', 'msg')
|
||||
|
||||
def setUpDecorator(self):
|
||||
msg = ('Checking if %s is in DISTRO_FEATURES '
|
||||
'or IMAGE_FEATURES' % (self.value))
|
||||
self.logger.debug(msg)
|
||||
if not has_feature(self.case.td, self.value):
|
||||
self.case.skipTest(self.msg)
|
||||
|
||||
@registerDecorator
|
||||
class skipIfFeature(OETestDecorator):
|
||||
"""
|
||||
Skip test based on DISTRO_FEATURES.
|
||||
|
||||
value must not be in distro features or it will skip the test
|
||||
with msg as the reason.
|
||||
"""
|
||||
|
||||
attrs = ('value', 'msg')
|
||||
|
||||
def setUpDecorator(self):
|
||||
msg = ('Checking if %s is not in DISTRO_FEATURES '
|
||||
'or IMAGE_FEATURES' % (self.value))
|
||||
self.logger.debug(msg)
|
||||
if has_feature(self.case.td, self.value):
|
||||
self.case.skipTest(self.msg)
|
||||
|
||||
@registerDecorator
|
||||
class skipIfNotMachine(OETestDecorator):
|
||||
"""
|
||||
Skip test based on MACHINE.
|
||||
|
||||
value must be match MACHINE or it will skip the test
|
||||
with msg as the reason.
|
||||
"""
|
||||
|
||||
attrs = ('value', 'msg')
|
||||
|
||||
def setUpDecorator(self):
|
||||
msg = ('Checking if %s is not this MACHINE' % self.value)
|
||||
self.logger.debug(msg)
|
||||
if not has_machine(self.case.td, self.value):
|
||||
self.case.skipTest(self.msg)
|
||||
|
||||
@registerDecorator
|
||||
class skipIfMachine(OETestDecorator):
|
||||
"""
|
||||
Skip test based on Machine.
|
||||
|
||||
value must not be this machine or it will skip the test
|
||||
with msg as the reason.
|
||||
"""
|
||||
|
||||
attrs = ('value', 'msg')
|
||||
|
||||
def setUpDecorator(self):
|
||||
msg = ('Checking if %s is this MACHINE' % self.value)
|
||||
self.logger.debug(msg)
|
||||
if has_machine(self.case.td, self.value):
|
||||
self.case.skipTest(self.msg)
|
||||
|
||||
@registerDecorator
|
||||
class skipIfNotQemu(OETestDecorator):
|
||||
"""
|
||||
Skip test if MACHINE is not qemu*
|
||||
"""
|
||||
def setUpDecorator(self):
|
||||
self.logger.debug("Checking if not qemu MACHINE")
|
||||
if not self.case.td.get('MACHINE', '').startswith('qemu'):
|
||||
self.case.skipTest('Test only runs on qemu machines')
|
||||
|
||||
@registerDecorator
|
||||
class skipIfQemu(OETestDecorator):
|
||||
"""
|
||||
Skip test if MACHINE is qemu*
|
||||
"""
|
||||
def setUpDecorator(self):
|
||||
self.logger.debug("Checking if qemu MACHINE")
|
||||
if self.case.td.get('MACHINE', '').startswith('qemu'):
|
||||
self.case.skipTest('Test only runs on real hardware')
|
||||
|
||||
@registerDecorator
|
||||
class skipIfArch(OETestDecorator):
|
||||
"""
|
||||
Skip test if HOST_ARCH is present in the tuple specified.
|
||||
"""
|
||||
|
||||
attrs = ('archs',)
|
||||
def setUpDecorator(self):
|
||||
arch = self.case.td['HOST_ARCH']
|
||||
if arch in self.archs:
|
||||
self.case.skipTest('Test skipped on %s' % arch)
|
||||
|
||||
@registerDecorator
|
||||
class skipIfNotArch(OETestDecorator):
|
||||
"""
|
||||
Skip test if HOST_ARCH is not present in the tuple specified.
|
||||
"""
|
||||
|
||||
attrs = ('archs',)
|
||||
def setUpDecorator(self):
|
||||
arch = self.case.td['HOST_ARCH']
|
||||
if arch not in self.archs:
|
||||
self.case.skipTest('Test skipped on %s' % arch)
|
||||
@@ -0,0 +1,98 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from unittest import SkipTest
|
||||
|
||||
from oeqa.core.exception import OEQADependency
|
||||
|
||||
from . import OETestDiscover, registerDecorator
|
||||
|
||||
def _add_depends(registry, case, depends):
|
||||
module_name = case.__module__
|
||||
class_name = case.__class__.__name__
|
||||
|
||||
case_id = case.id()
|
||||
|
||||
for depend in depends:
|
||||
dparts = depend.split('.')
|
||||
|
||||
if len(dparts) == 1:
|
||||
depend_id = ".".join((module_name, class_name, dparts[0]))
|
||||
elif len(dparts) == 2:
|
||||
depend_id = ".".join((module_name, dparts[0], dparts[1]))
|
||||
else:
|
||||
depend_id = depend
|
||||
|
||||
if not case_id in registry:
|
||||
registry[case_id] = []
|
||||
if not depend_id in registry[case_id]:
|
||||
registry[case_id].append(depend_id)
|
||||
|
||||
def _validate_test_case_depends(cases, depends):
|
||||
for case in depends:
|
||||
if not case in cases:
|
||||
continue
|
||||
for dep in depends[case]:
|
||||
if not dep in cases:
|
||||
raise OEQADependency("TestCase %s depends on %s and isn't available"\
|
||||
", cases available %s." % (case, dep, str(cases.keys())))
|
||||
|
||||
def _order_test_case_by_depends(cases, depends):
|
||||
def _dep_resolve(graph, node, resolved, seen):
|
||||
seen.append(node)
|
||||
for edge in graph[node]:
|
||||
if edge not in resolved:
|
||||
if edge in seen:
|
||||
raise OEQADependency("Test cases %s and %s have a circular" \
|
||||
" dependency." % (node, edge))
|
||||
_dep_resolve(graph, edge, resolved, seen)
|
||||
resolved.append(node)
|
||||
|
||||
dep_graph = {}
|
||||
dep_graph['__root__'] = cases.keys()
|
||||
for case in cases:
|
||||
if case in depends:
|
||||
dep_graph[case] = depends[case]
|
||||
else:
|
||||
dep_graph[case] = []
|
||||
|
||||
cases_ordered = []
|
||||
_dep_resolve(dep_graph, '__root__', cases_ordered, [])
|
||||
cases_ordered.remove('__root__')
|
||||
|
||||
return [cases[case_id] for case_id in cases_ordered]
|
||||
|
||||
def _skipTestDependency(case, depends):
|
||||
for dep in depends:
|
||||
found = False
|
||||
for test, _ in case.tc.results.successes:
|
||||
if test.id() == dep:
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
raise SkipTest("Test case %s depends on %s but it didn't pass/run." \
|
||||
% (case.id(), dep))
|
||||
|
||||
@registerDecorator
|
||||
class OETestDepends(OETestDiscover):
|
||||
attrs = ('depends',)
|
||||
|
||||
def bind(self, registry, case):
|
||||
super(OETestDepends, self).bind(registry, case)
|
||||
if not registry.get('depends'):
|
||||
registry['depends'] = {}
|
||||
_add_depends(registry['depends'], case, self.depends)
|
||||
|
||||
@staticmethod
|
||||
def discover(registry):
|
||||
if registry.get('depends'):
|
||||
_validate_test_case_depends(registry['cases'], registry['depends'])
|
||||
return _order_test_case_by_depends(registry['cases'], registry['depends'])
|
||||
else:
|
||||
return [registry['cases'][case_id] for case_id in registry['cases']]
|
||||
|
||||
def setUpDecorator(self):
|
||||
_skipTestDependency(self.case, self.depends)
|
||||
@@ -0,0 +1,29 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import signal
|
||||
from . import OETestDecorator, registerDecorator
|
||||
from oeqa.core.exception import OEQATimeoutError
|
||||
|
||||
@registerDecorator
|
||||
class OETimeout(OETestDecorator):
|
||||
attrs = ('oetimeout',)
|
||||
|
||||
def setUpDecorator(self):
|
||||
timeout = self.oetimeout
|
||||
def _timeoutHandler(signum, frame):
|
||||
raise OEQATimeoutError("Timed out after %s "
|
||||
"seconds of execution" % timeout)
|
||||
|
||||
self.logger.debug("Setting up a %d second(s) timeout" % self.oetimeout)
|
||||
self.alarmSignal = signal.signal(signal.SIGALRM, _timeoutHandler)
|
||||
signal.alarm(self.oetimeout)
|
||||
|
||||
def tearDownDecorator(self):
|
||||
signal.alarm(0)
|
||||
if hasattr(self, 'alarmSignal'):
|
||||
signal.signal(signal.SIGALRM, self.alarmSignal)
|
||||
self.logger.debug("Removed SIGALRM handler")
|
||||
@@ -0,0 +1,26 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
class OEQAException(Exception):
|
||||
pass
|
||||
|
||||
class OEQATimeoutError(OEQAException):
|
||||
pass
|
||||
|
||||
class OEQAMissingVariable(OEQAException):
|
||||
pass
|
||||
|
||||
class OEQADependency(OEQAException):
|
||||
pass
|
||||
|
||||
class OEQAMissingManifest(OEQAException):
|
||||
pass
|
||||
|
||||
class OEQAPreRun(OEQAException):
|
||||
pass
|
||||
|
||||
class OEQATestNotFound(OEQAException):
|
||||
pass
|
||||
@@ -0,0 +1,350 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import unittest
|
||||
import inspect
|
||||
|
||||
from oeqa.core.utils.path import findFile
|
||||
from oeqa.core.utils.test import getSuiteModules, getCaseID
|
||||
|
||||
from oeqa.core.exception import OEQATestNotFound
|
||||
from oeqa.core.case import OETestCase
|
||||
from oeqa.core.decorator import decoratorClasses, OETestDecorator, \
|
||||
OETestDiscover
|
||||
|
||||
# When loading tests, the unittest framework stores any exceptions and
|
||||
# displays them only when the run method is called.
|
||||
#
|
||||
# For our purposes, it is better to raise the exceptions in the loading
|
||||
# step rather than waiting to run the test suite.
|
||||
#
|
||||
# Generate the function definition because this differ across python versions
|
||||
# Python >= 3.4.4 uses tree parameters instead four but for example Python 3.5.3
|
||||
# ueses four parameters so isn't incremental.
|
||||
_failed_test_args = inspect.getfullargspec(unittest.loader._make_failed_test).args
|
||||
exec("""def _make_failed_test(%s): raise exception""" % ', '.join(_failed_test_args))
|
||||
unittest.loader._make_failed_test = _make_failed_test
|
||||
|
||||
def _find_duplicated_modules(suite, directory):
|
||||
for module in getSuiteModules(suite):
|
||||
path = findFile('%s.py' % module, directory)
|
||||
if path:
|
||||
raise ImportError("Duplicated %s module found in %s" % (module, path))
|
||||
|
||||
def _built_modules_dict(modules, logger):
|
||||
modules_dict = {}
|
||||
|
||||
if modules == None:
|
||||
return modules_dict
|
||||
|
||||
for module in modules:
|
||||
# Assumption: package and module names do not contain upper case
|
||||
# characters, whereas class names do
|
||||
m = re.match(r'^([0-9a-z_.]+)(?:\.(\w[^.]*)(?:\.([^.]+))?)?$', module, flags=re.ASCII)
|
||||
if not m:
|
||||
logger.warn("module '%s' was skipped from selected modules, "\
|
||||
"because it doesn't match with module name assumptions: "\
|
||||
"package and module names do not contain upper case characters, whereas class names do" % module)
|
||||
continue
|
||||
|
||||
module_name, class_name, test_name = m.groups()
|
||||
|
||||
if module_name and module_name not in modules_dict:
|
||||
modules_dict[module_name] = {}
|
||||
if class_name and class_name not in modules_dict[module_name]:
|
||||
modules_dict[module_name][class_name] = []
|
||||
if test_name and test_name not in modules_dict[module_name][class_name]:
|
||||
modules_dict[module_name][class_name].append(test_name)
|
||||
if modules and not modules_dict:
|
||||
raise OEQATestNotFound("All selected modules were skipped, this would trigger selftest with all tests and -r ignored.")
|
||||
|
||||
return modules_dict
|
||||
|
||||
class OETestLoader(unittest.TestLoader):
|
||||
caseClass = OETestCase
|
||||
|
||||
kwargs_names = ['testMethodPrefix', 'sortTestMethodUsing', 'suiteClass',
|
||||
'_top_level_dir']
|
||||
|
||||
def __init__(self, tc, module_paths, modules, tests, modules_required,
|
||||
*args, **kwargs):
|
||||
self.tc = tc
|
||||
|
||||
self.modules = _built_modules_dict(modules, tc.logger)
|
||||
|
||||
self.tests = tests
|
||||
self.modules_required = modules_required
|
||||
|
||||
self.tags_filter = kwargs.get("tags_filter", None)
|
||||
|
||||
if isinstance(module_paths, str):
|
||||
module_paths = [module_paths]
|
||||
elif not isinstance(module_paths, list):
|
||||
raise TypeError('module_paths must be a str or a list of str')
|
||||
self.module_paths = module_paths
|
||||
|
||||
for kwname in self.kwargs_names:
|
||||
if kwname in kwargs:
|
||||
setattr(self, kwname, kwargs[kwname])
|
||||
|
||||
self._patchCaseClass(self.caseClass)
|
||||
|
||||
super(OETestLoader, self).__init__()
|
||||
|
||||
def _patchCaseClass(self, testCaseClass):
|
||||
# Adds custom attributes to the OETestCase class
|
||||
setattr(testCaseClass, 'tc', self.tc)
|
||||
setattr(testCaseClass, 'td', self.tc.td)
|
||||
setattr(testCaseClass, 'logger', self.tc.logger)
|
||||
|
||||
def _registerTestCase(self, case):
|
||||
case_id = case.id()
|
||||
self.tc._registry['cases'][case_id] = case
|
||||
|
||||
def _handleTestCaseDecorators(self, case):
|
||||
def _handle(obj):
|
||||
if isinstance(obj, OETestDecorator):
|
||||
if not obj.__class__ in decoratorClasses:
|
||||
raise Exception("Decorator %s isn't registered" \
|
||||
" in decoratorClasses." % obj.__name__)
|
||||
obj.bind(self.tc._registry, case)
|
||||
|
||||
def _walk_closure(obj):
|
||||
if hasattr(obj, '__closure__') and obj.__closure__:
|
||||
for f in obj.__closure__:
|
||||
obj = f.cell_contents
|
||||
_handle(obj)
|
||||
_walk_closure(obj)
|
||||
method = getattr(case, case._testMethodName, None)
|
||||
_walk_closure(method)
|
||||
|
||||
def _filterTest(self, case):
|
||||
"""
|
||||
Returns True if test case must be filtered, False otherwise.
|
||||
"""
|
||||
# XXX; If the module has more than one namespace only use
|
||||
# the first to support run the whole module specifying the
|
||||
# <module_name>.[test_class].[test_name]
|
||||
module_name_small = case.__module__.split('.')[0]
|
||||
module_name = case.__module__
|
||||
|
||||
class_name = case.__class__.__name__
|
||||
test_name = case._testMethodName
|
||||
|
||||
# 'auto' is a reserved key word to run test cases automatically
|
||||
# warn users if their test case belong to a module named 'auto'
|
||||
if module_name_small == "auto":
|
||||
bb.warn("'auto' is a reserved key word for TEST_SUITES. "
|
||||
"But test case '%s' is detected to belong to auto module. "
|
||||
"Please condier using a new name for your module." % str(case))
|
||||
|
||||
# check if case belongs to any specified module
|
||||
# if 'auto' is specified, such check is skipped
|
||||
if self.modules and not 'auto' in self.modules:
|
||||
module = None
|
||||
try:
|
||||
module = self.modules[module_name_small]
|
||||
except KeyError:
|
||||
try:
|
||||
module = self.modules[module_name]
|
||||
except KeyError:
|
||||
return True
|
||||
|
||||
if module:
|
||||
if not class_name in module:
|
||||
return True
|
||||
|
||||
if module[class_name]:
|
||||
if test_name not in module[class_name]:
|
||||
return True
|
||||
|
||||
# Decorator filters
|
||||
if self.tags_filter is not None and callable(self.tags_filter):
|
||||
alltags = set()
|
||||
# pull tags from the case class
|
||||
if hasattr(case, "__oeqa_testtags"):
|
||||
for t in getattr(case, "__oeqa_testtags"):
|
||||
alltags.add(t)
|
||||
# pull tags from the method itself
|
||||
if hasattr(case, test_name):
|
||||
method = getattr(case, test_name)
|
||||
if hasattr(method, "__oeqa_testtags"):
|
||||
for t in getattr(method, "__oeqa_testtags"):
|
||||
alltags.add(t)
|
||||
|
||||
if self.tags_filter(alltags):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _getTestCase(self, testCaseClass, tcName):
|
||||
if not hasattr(testCaseClass, '__oeqa_loader') and \
|
||||
issubclass(testCaseClass, OETestCase):
|
||||
# In order to support data_vars validation
|
||||
# monkey patch the default setUp/tearDown{Class} to use
|
||||
# the ones provided by OETestCase
|
||||
setattr(testCaseClass, 'setUpClassMethod',
|
||||
getattr(testCaseClass, 'setUpClass'))
|
||||
setattr(testCaseClass, 'tearDownClassMethod',
|
||||
getattr(testCaseClass, 'tearDownClass'))
|
||||
setattr(testCaseClass, 'setUpClass',
|
||||
testCaseClass._oeSetUpClass)
|
||||
setattr(testCaseClass, 'tearDownClass',
|
||||
testCaseClass._oeTearDownClass)
|
||||
|
||||
# In order to support decorators initialization
|
||||
# monkey patch the default setUp/tearDown to use
|
||||
# a setUpDecorators/tearDownDecorators that methods
|
||||
# will call setUp/tearDown original methods.
|
||||
setattr(testCaseClass, 'setUpMethod',
|
||||
getattr(testCaseClass, 'setUp'))
|
||||
setattr(testCaseClass, 'tearDownMethod',
|
||||
getattr(testCaseClass, 'tearDown'))
|
||||
setattr(testCaseClass, 'setUp', testCaseClass._oeSetUp)
|
||||
setattr(testCaseClass, 'tearDown', testCaseClass._oeTearDown)
|
||||
|
||||
setattr(testCaseClass, '__oeqa_loader', True)
|
||||
|
||||
case = testCaseClass(tcName)
|
||||
if isinstance(case, OETestCase):
|
||||
setattr(case, 'decorators', [])
|
||||
|
||||
return case
|
||||
|
||||
def loadTestsFromTestCase(self, testCaseClass):
|
||||
"""
|
||||
Returns a suite of all tests cases contained in testCaseClass.
|
||||
"""
|
||||
if issubclass(testCaseClass, unittest.suite.TestSuite):
|
||||
raise TypeError("Test cases should not be derived from TestSuite." \
|
||||
" Maybe you meant to derive %s from TestCase?" \
|
||||
% testCaseClass.__name__)
|
||||
if not issubclass(testCaseClass, unittest.case.TestCase):
|
||||
raise TypeError("Test %s is not derived from %s" % \
|
||||
(testCaseClass.__name__, unittest.case.TestCase.__name__))
|
||||
|
||||
testCaseNames = self.getTestCaseNames(testCaseClass)
|
||||
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
|
||||
testCaseNames = ['runTest']
|
||||
|
||||
suite = []
|
||||
for tcName in testCaseNames:
|
||||
case = self._getTestCase(testCaseClass, tcName)
|
||||
# Filer by case id
|
||||
if not (self.tests and not 'auto' in self.tests
|
||||
and not getCaseID(case) in self.tests):
|
||||
self._handleTestCaseDecorators(case)
|
||||
|
||||
# Filter by decorators
|
||||
if not self._filterTest(case):
|
||||
self._registerTestCase(case)
|
||||
suite.append(case)
|
||||
|
||||
return self.suiteClass(suite)
|
||||
|
||||
def _required_modules_validation(self):
|
||||
"""
|
||||
Search in Test context registry if a required
|
||||
test is found, raise an exception when not found.
|
||||
"""
|
||||
|
||||
for module in self.modules_required:
|
||||
found = False
|
||||
|
||||
# The module name is splitted to only compare the
|
||||
# first part of a test case id.
|
||||
comp_len = len(module.split('.'))
|
||||
for case in self.tc._registry['cases']:
|
||||
case_comp = '.'.join(case.split('.')[0:comp_len])
|
||||
if module == case_comp:
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
raise OEQATestNotFound("Not found %s in loaded test cases" % \
|
||||
module)
|
||||
|
||||
def discover(self):
|
||||
big_suite = self.suiteClass()
|
||||
for path in self.module_paths:
|
||||
_find_duplicated_modules(big_suite, path)
|
||||
suite = super(OETestLoader, self).discover(path,
|
||||
pattern='*.py', top_level_dir=path)
|
||||
big_suite.addTests(suite)
|
||||
|
||||
cases = None
|
||||
discover_classes = [clss for clss in decoratorClasses
|
||||
if issubclass(clss, OETestDiscover)]
|
||||
for clss in discover_classes:
|
||||
cases = clss.discover(self.tc._registry)
|
||||
|
||||
if self.modules_required:
|
||||
self._required_modules_validation()
|
||||
|
||||
return self.suiteClass(cases) if cases else big_suite
|
||||
|
||||
def _filterModule(self, module):
|
||||
if module.__name__ in sys.builtin_module_names:
|
||||
msg = 'Tried to import %s test module but is a built-in'
|
||||
raise ImportError(msg % module.__name__)
|
||||
|
||||
# XXX; If the module has more than one namespace only use
|
||||
# the first to support run the whole module specifying the
|
||||
# <module_name>.[test_class].[test_name]
|
||||
module_name_small = module.__name__.split('.')[0]
|
||||
module_name = module.__name__
|
||||
|
||||
# Normal test modules are loaded if no modules were specified,
|
||||
# if module is in the specified module list or if 'auto' is in
|
||||
# module list.
|
||||
# Underscore modules are loaded only if specified in module list.
|
||||
load_module = True if not module_name.startswith('_') \
|
||||
and (not self.modules \
|
||||
or module_name in self.modules \
|
||||
or module_name_small in self.modules \
|
||||
or 'auto' in self.modules) \
|
||||
else False
|
||||
|
||||
load_underscore = True if module_name.startswith('_') \
|
||||
and (module_name in self.modules or \
|
||||
module_name_small in self.modules) \
|
||||
else False
|
||||
|
||||
if any(c.isupper() for c in module.__name__):
|
||||
raise SystemExit("Module '%s' contains uppercase characters and this isn't supported. Please fix the module name." % module.__name__)
|
||||
|
||||
return (load_module, load_underscore)
|
||||
|
||||
|
||||
# XXX After Python 3.5, remove backward compatibility hacks for
|
||||
# use_load_tests deprecation via *args and **kws. See issue 16662.
|
||||
if sys.version_info >= (3,5):
|
||||
def loadTestsFromModule(self, module, *args, pattern=None, **kws):
|
||||
"""
|
||||
Returns a suite of all tests cases contained in module.
|
||||
"""
|
||||
load_module, load_underscore = self._filterModule(module)
|
||||
|
||||
if load_module or load_underscore:
|
||||
return super(OETestLoader, self).loadTestsFromModule(
|
||||
module, *args, pattern=pattern, **kws)
|
||||
else:
|
||||
return self.suiteClass()
|
||||
else:
|
||||
def loadTestsFromModule(self, module, use_load_tests=True):
|
||||
"""
|
||||
Returns a suite of all tests cases contained in module.
|
||||
"""
|
||||
load_module, load_underscore = self._filterModule(module)
|
||||
|
||||
if load_module or load_underscore:
|
||||
return super(OETestLoader, self).loadTestsFromModule(
|
||||
module, use_load_tests)
|
||||
else:
|
||||
return self.suiteClass()
|
||||
@@ -0,0 +1,357 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import time
|
||||
import unittest
|
||||
import logging
|
||||
import re
|
||||
import json
|
||||
import sys
|
||||
|
||||
from unittest import TextTestResult as _TestResult
|
||||
from unittest import TextTestRunner as _TestRunner
|
||||
|
||||
class OEStreamLogger(object):
|
||||
def __init__(self, logger):
|
||||
self.logger = logger
|
||||
self.buffer = ""
|
||||
|
||||
def write(self, msg):
|
||||
if len(msg) > 1 and msg[0] != '\n':
|
||||
if '...' in msg:
|
||||
self.buffer += msg
|
||||
elif self.buffer:
|
||||
self.buffer += msg
|
||||
self.logger.log(logging.INFO, self.buffer)
|
||||
self.buffer = ""
|
||||
else:
|
||||
self.logger.log(logging.INFO, msg)
|
||||
|
||||
def flush(self):
|
||||
for handler in self.logger.handlers:
|
||||
handler.flush()
|
||||
|
||||
class OETestResult(_TestResult):
|
||||
def __init__(self, tc, *args, **kwargs):
|
||||
super(OETestResult, self).__init__(*args, **kwargs)
|
||||
|
||||
self.successes = []
|
||||
self.starttime = {}
|
||||
self.endtime = {}
|
||||
self.progressinfo = {}
|
||||
self.extraresults = {}
|
||||
|
||||
# Inject into tc so that TestDepends decorator can see results
|
||||
tc.results = self
|
||||
|
||||
self.tc = tc
|
||||
|
||||
# stdout and stderr for each test case
|
||||
self.logged_output = {}
|
||||
|
||||
def startTest(self, test):
|
||||
# May have been set by concurrencytest
|
||||
if test.id() not in self.starttime:
|
||||
self.starttime[test.id()] = time.time()
|
||||
super(OETestResult, self).startTest(test)
|
||||
|
||||
def stopTest(self, test):
|
||||
self.endtime[test.id()] = time.time()
|
||||
if self.buffer:
|
||||
self.logged_output[test.id()] = (
|
||||
sys.stdout.getvalue(), sys.stderr.getvalue())
|
||||
super(OETestResult, self).stopTest(test)
|
||||
if test.id() in self.progressinfo:
|
||||
self.tc.logger.info(self.progressinfo[test.id()])
|
||||
|
||||
# Print the errors/failures early to aid/speed debugging, its a pain
|
||||
# to wait until selftest finishes to see them.
|
||||
for t in ['failures', 'errors', 'skipped', 'expectedFailures']:
|
||||
for (scase, msg) in getattr(self, t):
|
||||
if test.id() == scase.id():
|
||||
self.tc.logger.info(str(msg))
|
||||
break
|
||||
|
||||
def logSummary(self, component, context_msg=''):
|
||||
elapsed_time = self.tc._run_end_time - self.tc._run_start_time
|
||||
self.tc.logger.info("SUMMARY:")
|
||||
self.tc.logger.info("%s (%s) - Ran %d test%s in %.3fs" % (component,
|
||||
context_msg, self.testsRun, self.testsRun != 1 and "s" or "",
|
||||
elapsed_time))
|
||||
|
||||
if self.wasSuccessful():
|
||||
msg = "%s - OK - All required tests passed" % component
|
||||
else:
|
||||
msg = "%s - FAIL - Required tests failed" % component
|
||||
msg += " (successes=%d, skipped=%d, failures=%d, errors=%d)" % (len(self.successes), len(self.skipped), len(self.failures), len(self.errors))
|
||||
self.tc.logger.info(msg)
|
||||
|
||||
def _getTestResultDetails(self, case):
|
||||
result_types = {'failures': 'FAILED', 'errors': 'ERROR', 'skipped': 'SKIPPED',
|
||||
'expectedFailures': 'EXPECTEDFAIL', 'successes': 'PASSED',
|
||||
'unexpectedSuccesses' : 'PASSED'}
|
||||
|
||||
for rtype in result_types:
|
||||
found = False
|
||||
for resultclass in getattr(self, rtype):
|
||||
# unexpectedSuccesses are just lists, not lists of tuples
|
||||
if isinstance(resultclass, tuple):
|
||||
scase, msg = resultclass
|
||||
else:
|
||||
scase, msg = resultclass, None
|
||||
if case.id() == scase.id():
|
||||
found = True
|
||||
break
|
||||
scase_str = str(scase.id())
|
||||
|
||||
# When fails at module or class level the class name is passed as string
|
||||
# so figure out to see if match
|
||||
m = re.search(r"^setUpModule \((?P<module_name>.*)\).*$", scase_str)
|
||||
if m:
|
||||
if case.__class__.__module__ == m.group('module_name'):
|
||||
found = True
|
||||
break
|
||||
|
||||
m = re.search(r"^setUpClass \((?P<class_name>.*)\).*$", scase_str)
|
||||
if m:
|
||||
class_name = "%s.%s" % (case.__class__.__module__,
|
||||
case.__class__.__name__)
|
||||
|
||||
if class_name == m.group('class_name'):
|
||||
found = True
|
||||
break
|
||||
|
||||
if found:
|
||||
return result_types[rtype], msg
|
||||
|
||||
return 'UNKNOWN', None
|
||||
|
||||
def extractExtraResults(self, test, details = None):
|
||||
extraresults = None
|
||||
if details is not None and "extraresults" in details:
|
||||
extraresults = details.get("extraresults", {})
|
||||
elif hasattr(test, "extraresults"):
|
||||
extraresults = test.extraresults
|
||||
|
||||
if extraresults is not None:
|
||||
for k, v in extraresults.items():
|
||||
# handle updating already existing entries (e.g. ptestresults.sections)
|
||||
if k in self.extraresults:
|
||||
self.extraresults[k].update(v)
|
||||
else:
|
||||
self.extraresults[k] = v
|
||||
|
||||
def addError(self, test, *args, details = None):
|
||||
self.extractExtraResults(test, details = details)
|
||||
return super(OETestResult, self).addError(test, *args)
|
||||
|
||||
def addFailure(self, test, *args, details = None):
|
||||
self.extractExtraResults(test, details = details)
|
||||
return super(OETestResult, self).addFailure(test, *args)
|
||||
|
||||
def addSuccess(self, test, details = None):
|
||||
#Added so we can keep track of successes too
|
||||
self.successes.append((test, None))
|
||||
self.extractExtraResults(test, details = details)
|
||||
return super(OETestResult, self).addSuccess(test)
|
||||
|
||||
def addExpectedFailure(self, test, *args, details = None):
|
||||
self.extractExtraResults(test, details = details)
|
||||
return super(OETestResult, self).addExpectedFailure(test, *args)
|
||||
|
||||
def addUnexpectedSuccess(self, test, details = None):
|
||||
self.extractExtraResults(test, details = details)
|
||||
return super(OETestResult, self).addUnexpectedSuccess(test)
|
||||
|
||||
def logDetails(self, json_file_dir=None, configuration=None, result_id=None,
|
||||
dump_streams=False):
|
||||
self.tc.logger.info("RESULTS:")
|
||||
|
||||
result = self.extraresults
|
||||
logs = {}
|
||||
if hasattr(self.tc, "extraresults"):
|
||||
result.update(self.tc.extraresults)
|
||||
|
||||
for case_name in self.tc._registry['cases']:
|
||||
case = self.tc._registry['cases'][case_name]
|
||||
|
||||
(status, log) = self._getTestResultDetails(case)
|
||||
|
||||
t = ""
|
||||
duration = 0
|
||||
if case.id() in self.starttime and case.id() in self.endtime:
|
||||
duration = self.endtime[case.id()] - self.starttime[case.id()]
|
||||
t = " (" + "{0:.2f}".format(duration) + "s)"
|
||||
|
||||
if status not in logs:
|
||||
logs[status] = []
|
||||
logs[status].append("RESULTS - %s: %s%s" % (case.id(), status, t))
|
||||
report = {'status': status}
|
||||
if log:
|
||||
report['log'] = log
|
||||
if duration:
|
||||
report['duration'] = duration
|
||||
|
||||
alltags = []
|
||||
# pull tags from the case class
|
||||
if hasattr(case, "__oeqa_testtags"):
|
||||
alltags.extend(getattr(case, "__oeqa_testtags"))
|
||||
# pull tags from the method itself
|
||||
test_name = case._testMethodName
|
||||
if hasattr(case, test_name):
|
||||
method = getattr(case, test_name)
|
||||
if hasattr(method, "__oeqa_testtags"):
|
||||
alltags.extend(getattr(method, "__oeqa_testtags"))
|
||||
if alltags:
|
||||
report['oetags'] = alltags
|
||||
|
||||
if dump_streams and case.id() in self.logged_output:
|
||||
(stdout, stderr) = self.logged_output[case.id()]
|
||||
report['stdout'] = stdout
|
||||
report['stderr'] = stderr
|
||||
result[case.id()] = report
|
||||
|
||||
for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
|
||||
if i not in logs:
|
||||
continue
|
||||
for l in logs[i]:
|
||||
self.tc.logger.info(l)
|
||||
|
||||
if json_file_dir:
|
||||
tresultjsonhelper = OETestResultJSONHelper()
|
||||
tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result)
|
||||
|
||||
def wasSuccessful(self):
|
||||
# Override as we unexpected successes aren't failures for us
|
||||
return (len(self.failures) == len(self.errors) == 0)
|
||||
|
||||
def hasAnyFailingTest(self):
|
||||
# Account for expected failures
|
||||
return not self.wasSuccessful() or len(self.expectedFailures)
|
||||
|
||||
class OEListTestsResult(object):
|
||||
def wasSuccessful(self):
|
||||
return True
|
||||
|
||||
class OETestRunner(_TestRunner):
|
||||
streamLoggerClass = OEStreamLogger
|
||||
|
||||
def __init__(self, tc, *args, **kwargs):
|
||||
kwargs['stream'] = self.streamLoggerClass(tc.logger)
|
||||
super(OETestRunner, self).__init__(*args, **kwargs)
|
||||
self.tc = tc
|
||||
self.resultclass = OETestResult
|
||||
|
||||
def _makeResult(self):
|
||||
return self.resultclass(self.tc, self.stream, self.descriptions,
|
||||
self.verbosity)
|
||||
|
||||
def _walk_suite(self, suite, func):
|
||||
for obj in suite:
|
||||
if isinstance(obj, unittest.suite.TestSuite):
|
||||
if len(obj._tests):
|
||||
self._walk_suite(obj, func)
|
||||
elif isinstance(obj, unittest.case.TestCase):
|
||||
func(self.tc.logger, obj)
|
||||
self._walked_cases = self._walked_cases + 1
|
||||
|
||||
def _list_tests_name(self, suite):
|
||||
self._walked_cases = 0
|
||||
|
||||
def _list_cases(logger, case):
|
||||
oetags = []
|
||||
if hasattr(case, '__oeqa_testtags'):
|
||||
oetags = getattr(case, '__oeqa_testtags')
|
||||
if oetags:
|
||||
logger.info("%s (%s)" % (case.id(), ",".join(oetags)))
|
||||
else:
|
||||
logger.info("%s" % (case.id()))
|
||||
|
||||
self.tc.logger.info("Listing all available tests:")
|
||||
self._walked_cases = 0
|
||||
self.tc.logger.info("test (tags)")
|
||||
self.tc.logger.info("-" * 80)
|
||||
self._walk_suite(suite, _list_cases)
|
||||
self.tc.logger.info("-" * 80)
|
||||
self.tc.logger.info("Total found:\t%s" % self._walked_cases)
|
||||
|
||||
def _list_tests_class(self, suite):
|
||||
self._walked_cases = 0
|
||||
|
||||
curr = {}
|
||||
def _list_classes(logger, case):
|
||||
if not 'module' in curr or curr['module'] != case.__module__:
|
||||
curr['module'] = case.__module__
|
||||
logger.info(curr['module'])
|
||||
|
||||
if not 'class' in curr or curr['class'] != \
|
||||
case.__class__.__name__:
|
||||
curr['class'] = case.__class__.__name__
|
||||
logger.info(" -- %s" % curr['class'])
|
||||
|
||||
logger.info(" -- -- %s" % case._testMethodName)
|
||||
|
||||
self.tc.logger.info("Listing all available test classes:")
|
||||
self._walk_suite(suite, _list_classes)
|
||||
|
||||
def _list_tests_module(self, suite):
|
||||
self._walked_cases = 0
|
||||
|
||||
listed = []
|
||||
def _list_modules(logger, case):
|
||||
if not case.__module__ in listed:
|
||||
if case.__module__.startswith('_'):
|
||||
logger.info("%s (hidden)" % case.__module__)
|
||||
else:
|
||||
logger.info(case.__module__)
|
||||
listed.append(case.__module__)
|
||||
|
||||
self.tc.logger.info("Listing all available test modules:")
|
||||
self._walk_suite(suite, _list_modules)
|
||||
|
||||
def list_tests(self, suite, display_type):
|
||||
if display_type == 'name':
|
||||
self._list_tests_name(suite)
|
||||
elif display_type == 'class':
|
||||
self._list_tests_class(suite)
|
||||
elif display_type == 'module':
|
||||
self._list_tests_module(suite)
|
||||
|
||||
return OEListTestsResult()
|
||||
|
||||
class OETestResultJSONHelper(object):
|
||||
|
||||
testresult_filename = 'testresults.json'
|
||||
|
||||
def _get_existing_testresults_if_available(self, write_dir):
|
||||
testresults = {}
|
||||
file = os.path.join(write_dir, self.testresult_filename)
|
||||
if os.path.exists(file):
|
||||
with open(file, "r") as f:
|
||||
testresults = json.load(f)
|
||||
return testresults
|
||||
|
||||
def _write_file(self, write_dir, file_name, file_content):
|
||||
file_path = os.path.join(write_dir, file_name)
|
||||
with open(file_path, 'w') as the_file:
|
||||
the_file.write(file_content)
|
||||
|
||||
def dump_testresult_file(self, write_dir, configuration, result_id, test_result):
|
||||
try:
|
||||
import bb
|
||||
has_bb = True
|
||||
bb.utils.mkdirhier(write_dir)
|
||||
lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
|
||||
except ImportError:
|
||||
has_bb = False
|
||||
os.makedirs(write_dir, exist_ok=True)
|
||||
test_results = self._get_existing_testresults_if_available(write_dir)
|
||||
test_results[result_id] = {'configuration': configuration, 'result': test_result}
|
||||
json_testresults = json.dumps(test_results, sort_keys=True, indent=4)
|
||||
self._write_file(write_dir, self.testresult_filename, json_testresults)
|
||||
if has_bb:
|
||||
bb.utils.unlockfile(lf)
|
||||
@@ -0,0 +1,36 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from abc import abstractmethod
|
||||
|
||||
class OETarget(object):
|
||||
|
||||
def __init__(self, logger, *args, **kwargs):
|
||||
self.logger = logger
|
||||
|
||||
@abstractmethod
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def run(self, cmd, timeout=None):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def copyTo(self, localSrc, remoteDst):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def copyFrom(self, remoteSrc, localDst):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def copyDirTo(self, localSrc, remoteDst):
|
||||
pass
|
||||
@@ -0,0 +1,104 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import time
|
||||
import glob
|
||||
import subprocess
|
||||
from collections import defaultdict
|
||||
|
||||
from .ssh import OESSHTarget
|
||||
from oeqa.utils.qemurunner import QemuRunner
|
||||
from oeqa.utils.dump import MonitorDumper
|
||||
from oeqa.utils.dump import TargetDumper
|
||||
|
||||
supported_fstypes = ['ext3', 'ext4', 'cpio.gz', 'wic']
|
||||
|
||||
class OEQemuTarget(OESSHTarget):
|
||||
def __init__(self, logger, server_ip, timeout=300, user='root',
|
||||
port=None, machine='', rootfs='', kernel='', kvm=False, slirp=False,
|
||||
dump_dir='', display='', bootlog='',
|
||||
tmpdir='', dir_image='', boottime=60, serial_ports=2,
|
||||
boot_patterns = defaultdict(str), ovmf=False, tmpfsdir=None, **kwargs):
|
||||
|
||||
super(OEQemuTarget, self).__init__(logger, None, server_ip, timeout,
|
||||
user, port)
|
||||
|
||||
self.server_ip = server_ip
|
||||
self.server_port = 0
|
||||
self.machine = machine
|
||||
self.rootfs = rootfs
|
||||
self.kernel = kernel
|
||||
self.kvm = kvm
|
||||
self.ovmf = ovmf
|
||||
self.use_slirp = slirp
|
||||
self.boot_patterns = boot_patterns
|
||||
self.dump_dir = dump_dir
|
||||
self.bootlog = bootlog
|
||||
|
||||
self.runner = QemuRunner(machine=machine, rootfs=rootfs, tmpdir=tmpdir,
|
||||
deploy_dir_image=dir_image, display=display,
|
||||
logfile=bootlog, boottime=boottime,
|
||||
use_kvm=kvm, use_slirp=slirp, dump_dir=dump_dir, logger=logger,
|
||||
serial_ports=serial_ports, boot_patterns = boot_patterns,
|
||||
use_ovmf=ovmf, tmpfsdir=tmpfsdir)
|
||||
dump_monitor_cmds = kwargs.get("testimage_dump_monitor")
|
||||
self.monitor_dumper = MonitorDumper(dump_monitor_cmds, dump_dir, self.runner)
|
||||
if self.monitor_dumper:
|
||||
self.monitor_dumper.create_dir("qmp")
|
||||
|
||||
dump_target_cmds = kwargs.get("testimage_dump_target")
|
||||
self.target_dumper = TargetDumper(dump_target_cmds, dump_dir, self.runner)
|
||||
self.target_dumper.create_dir("qemu")
|
||||
|
||||
def start(self, params=None, extra_bootparams=None, runqemuparams=''):
|
||||
if self.use_slirp and not self.server_ip:
|
||||
self.logger.error("Could not start qemu with slirp without server ip - provide 'TEST_SERVER_IP'")
|
||||
raise RuntimeError("FAILED to start qemu - check the task log and the boot log")
|
||||
if self.runner.start(params, extra_bootparams=extra_bootparams, runqemuparams=runqemuparams):
|
||||
self.ip = self.runner.ip
|
||||
if self.use_slirp:
|
||||
target_ip_port = self.runner.ip.split(':')
|
||||
if len(target_ip_port) == 2:
|
||||
target_ip = target_ip_port[0]
|
||||
port = target_ip_port[1]
|
||||
self.ip = target_ip
|
||||
self.ssh = self.ssh + ['-p', port]
|
||||
self.scp = self.scp + ['-P', port]
|
||||
else:
|
||||
self.logger.error("Could not get host machine port to connect qemu with slirp, ssh will not be "
|
||||
"able to connect to qemu with slirp")
|
||||
if self.runner.server_ip:
|
||||
self.server_ip = self.runner.server_ip
|
||||
else:
|
||||
self.stop()
|
||||
# Display the first 20 lines of top and
|
||||
# last 20 lines of the bootlog when the
|
||||
# target is not being booted up.
|
||||
topfile = glob.glob(self.dump_dir + "/*_qemu/host_*_top")
|
||||
msg = "\n\n===== start: snippet =====\n\n"
|
||||
for f in topfile:
|
||||
msg += "file: %s\n\n" % f
|
||||
with open(f) as tf:
|
||||
for x in range(20):
|
||||
msg += next(tf)
|
||||
msg += "\n\n===== end: snippet =====\n\n"
|
||||
blcmd = ["tail", "-20", self.bootlog]
|
||||
msg += "===== start: snippet =====\n\n"
|
||||
try:
|
||||
out = subprocess.check_output(blcmd, stderr=subprocess.STDOUT, timeout=1).decode('utf-8')
|
||||
msg += "file: %s\n\n" % self.bootlog
|
||||
msg += out
|
||||
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
|
||||
msg += "Error running command: %s\n%s\n" % (blcmd, err)
|
||||
msg += "\n\n===== end: snippet =====\n"
|
||||
|
||||
raise RuntimeError("FAILED to start qemu - check the task log and the boot log %s" % (msg))
|
||||
|
||||
def stop(self):
|
||||
self.runner.stop()
|
||||
@@ -0,0 +1,319 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import time
|
||||
import select
|
||||
import logging
|
||||
import subprocess
|
||||
import codecs
|
||||
|
||||
from . import OETarget
|
||||
|
||||
class OESSHTarget(OETarget):
|
||||
def __init__(self, logger, ip, server_ip, timeout=300, user='root',
|
||||
port=None, server_port=0, **kwargs):
|
||||
if not logger:
|
||||
logger = logging.getLogger('target')
|
||||
logger.setLevel(logging.INFO)
|
||||
filePath = os.path.join(os.getcwd(), 'remoteTarget.log')
|
||||
fileHandler = logging.FileHandler(filePath, 'w', 'utf-8')
|
||||
formatter = logging.Formatter(
|
||||
'%(asctime)s.%(msecs)03d %(levelname)s: %(message)s',
|
||||
'%H:%M:%S')
|
||||
fileHandler.setFormatter(formatter)
|
||||
logger.addHandler(fileHandler)
|
||||
|
||||
super(OESSHTarget, self).__init__(logger)
|
||||
self.ip = ip
|
||||
self.server_ip = server_ip
|
||||
self.server_port = server_port
|
||||
self.timeout = timeout
|
||||
self.user = user
|
||||
ssh_options = [
|
||||
'-o', 'ServerAliveCountMax=2',
|
||||
'-o', 'ServerAliveInterval=30',
|
||||
'-o', 'UserKnownHostsFile=/dev/null',
|
||||
'-o', 'StrictHostKeyChecking=no',
|
||||
'-o', 'LogLevel=ERROR'
|
||||
]
|
||||
scp_options = [
|
||||
'-r'
|
||||
]
|
||||
self.ssh = ['ssh', '-l', self.user ] + ssh_options
|
||||
self.scp = ['scp'] + ssh_options + scp_options
|
||||
if port:
|
||||
self.ssh = self.ssh + [ '-p', port ]
|
||||
self.scp = self.scp + [ '-P', port ]
|
||||
self._monitor_dumper = None
|
||||
self.target_dumper = None
|
||||
|
||||
def start(self, **kwargs):
|
||||
pass
|
||||
|
||||
def stop(self, **kwargs):
|
||||
pass
|
||||
|
||||
@property
|
||||
def monitor_dumper(self):
|
||||
return self._monitor_dumper
|
||||
|
||||
@monitor_dumper.setter
|
||||
def monitor_dumper(self, dumper):
|
||||
self._monitor_dumper = dumper
|
||||
self.monitor_dumper.dump_monitor()
|
||||
|
||||
def _run(self, command, timeout=None, ignore_status=True):
|
||||
"""
|
||||
Runs command in target using SSHProcess.
|
||||
"""
|
||||
self.logger.debug("[Running]$ %s" % " ".join(command))
|
||||
|
||||
starttime = time.time()
|
||||
status, output = SSHCall(command, self.logger, timeout)
|
||||
self.logger.debug("[Command returned '%d' after %.2f seconds]"
|
||||
"" % (status, time.time() - starttime))
|
||||
|
||||
if status and not ignore_status:
|
||||
raise AssertionError("Command '%s' returned non-zero exit "
|
||||
"status %d:\n%s" % (command, status, output))
|
||||
|
||||
return (status, output)
|
||||
|
||||
def run(self, command, timeout=None, ignore_status=True):
|
||||
"""
|
||||
Runs command in target.
|
||||
|
||||
command: Command to run on target.
|
||||
timeout: <value>: Kill command after <val> seconds.
|
||||
None: Kill command default value seconds.
|
||||
0: No timeout, runs until return.
|
||||
"""
|
||||
targetCmd = 'export PATH=/usr/sbin:/sbin:/usr/bin:/bin; %s' % command
|
||||
sshCmd = self.ssh + [self.ip, targetCmd]
|
||||
|
||||
if timeout:
|
||||
processTimeout = timeout
|
||||
elif timeout==0:
|
||||
processTimeout = None
|
||||
else:
|
||||
processTimeout = self.timeout
|
||||
|
||||
status, output = self._run(sshCmd, processTimeout, ignore_status)
|
||||
self.logger.debug('Command: %s\nStatus: %d Output: %s\n' % (command, status, output))
|
||||
if (status == 255) and (('No route to host') in output):
|
||||
if self.monitor_dumper:
|
||||
self.monitor_dumper.dump_monitor()
|
||||
if status == 255:
|
||||
if self.target_dumper:
|
||||
self.target_dumper.dump_target()
|
||||
if self.monitor_dumper:
|
||||
self.monitor_dumper.dump_monitor()
|
||||
return (status, output)
|
||||
|
||||
def copyTo(self, localSrc, remoteDst):
|
||||
"""
|
||||
Copy file to target.
|
||||
|
||||
If local file is symlink, recreate symlink in target.
|
||||
"""
|
||||
if os.path.islink(localSrc):
|
||||
link = os.readlink(localSrc)
|
||||
dstDir, dstBase = os.path.split(remoteDst)
|
||||
sshCmd = 'cd %s; ln -s %s %s' % (dstDir, link, dstBase)
|
||||
return self.run(sshCmd)
|
||||
|
||||
else:
|
||||
remotePath = '%s@%s:%s' % (self.user, self.ip, remoteDst)
|
||||
scpCmd = self.scp + [localSrc, remotePath]
|
||||
return self._run(scpCmd, ignore_status=False)
|
||||
|
||||
def copyFrom(self, remoteSrc, localDst, warn_on_failure=False):
|
||||
"""
|
||||
Copy file from target.
|
||||
"""
|
||||
remotePath = '%s@%s:%s' % (self.user, self.ip, remoteSrc)
|
||||
scpCmd = self.scp + [remotePath, localDst]
|
||||
(status, output) = self._run(scpCmd, ignore_status=warn_on_failure)
|
||||
if warn_on_failure and status:
|
||||
self.logger.warning("Copy returned non-zero exit status %d:\n%s" % (status, output))
|
||||
return (status, output)
|
||||
|
||||
def copyDirTo(self, localSrc, remoteDst):
|
||||
"""
|
||||
Copy recursively localSrc directory to remoteDst in target.
|
||||
"""
|
||||
|
||||
for root, dirs, files in os.walk(localSrc):
|
||||
# Create directories in the target as needed
|
||||
for d in dirs:
|
||||
tmpDir = os.path.join(root, d).replace(localSrc, "")
|
||||
newDir = os.path.join(remoteDst, tmpDir.lstrip("/"))
|
||||
cmd = "mkdir -p %s" % newDir
|
||||
self.run(cmd)
|
||||
|
||||
# Copy files into the target
|
||||
for f in files:
|
||||
tmpFile = os.path.join(root, f).replace(localSrc, "")
|
||||
dstFile = os.path.join(remoteDst, tmpFile.lstrip("/"))
|
||||
srcFile = os.path.join(root, f)
|
||||
self.copyTo(srcFile, dstFile)
|
||||
|
||||
def deleteFiles(self, remotePath, files):
|
||||
"""
|
||||
Deletes files in target's remotePath.
|
||||
"""
|
||||
|
||||
cmd = "rm"
|
||||
if not isinstance(files, list):
|
||||
files = [files]
|
||||
|
||||
for f in files:
|
||||
cmd = "%s %s" % (cmd, os.path.join(remotePath, f))
|
||||
|
||||
self.run(cmd)
|
||||
|
||||
|
||||
def deleteDir(self, remotePath):
|
||||
"""
|
||||
Deletes target's remotePath directory.
|
||||
"""
|
||||
|
||||
cmd = "rmdir %s" % remotePath
|
||||
self.run(cmd)
|
||||
|
||||
|
||||
def deleteDirStructure(self, localPath, remotePath):
|
||||
"""
|
||||
Delete recursively localPath structure directory in target's remotePath.
|
||||
|
||||
This function is very usefult to delete a package that is installed in
|
||||
the DUT and the host running the test has such package extracted in tmp
|
||||
directory.
|
||||
|
||||
Example:
|
||||
pwd: /home/user/tmp
|
||||
tree: .
|
||||
└── work
|
||||
├── dir1
|
||||
│ └── file1
|
||||
└── dir2
|
||||
|
||||
localpath = "/home/user/tmp" and remotepath = "/home/user"
|
||||
|
||||
With the above variables this function will try to delete the
|
||||
directory in the DUT in this order:
|
||||
/home/user/work/dir1/file1
|
||||
/home/user/work/dir1 (if dir is empty)
|
||||
/home/user/work/dir2 (if dir is empty)
|
||||
/home/user/work (if dir is empty)
|
||||
"""
|
||||
|
||||
for root, dirs, files in os.walk(localPath, topdown=False):
|
||||
# Delete files first
|
||||
tmpDir = os.path.join(root).replace(localPath, "")
|
||||
remoteDir = os.path.join(remotePath, tmpDir.lstrip("/"))
|
||||
self.deleteFiles(remoteDir, files)
|
||||
|
||||
# Remove dirs if empty
|
||||
for d in dirs:
|
||||
tmpDir = os.path.join(root, d).replace(localPath, "")
|
||||
remoteDir = os.path.join(remotePath, tmpDir.lstrip("/"))
|
||||
self.deleteDir(remoteDir)
|
||||
|
||||
def SSHCall(command, logger, timeout=None, **opts):
|
||||
|
||||
def run():
|
||||
nonlocal output
|
||||
nonlocal process
|
||||
output_raw = b''
|
||||
starttime = time.time()
|
||||
process = subprocess.Popen(command, **options)
|
||||
if timeout:
|
||||
endtime = starttime + timeout
|
||||
eof = False
|
||||
os.set_blocking(process.stdout.fileno(), False)
|
||||
while time.time() < endtime and not eof:
|
||||
try:
|
||||
logger.debug('Waiting for process output: time: %s, endtime: %s' % (time.time(), endtime))
|
||||
if select.select([process.stdout], [], [], 5)[0] != []:
|
||||
# wait a bit for more data, tries to avoid reading single characters
|
||||
time.sleep(0.2)
|
||||
data = process.stdout.read()
|
||||
if not data:
|
||||
eof = True
|
||||
else:
|
||||
output_raw += data
|
||||
# ignore errors to capture as much as possible
|
||||
logger.debug('Partial data from SSH call:\n%s' % data.decode('utf-8', errors='ignore'))
|
||||
endtime = time.time() + timeout
|
||||
except InterruptedError:
|
||||
logger.debug('InterruptedError')
|
||||
continue
|
||||
|
||||
process.stdout.close()
|
||||
|
||||
# process hasn't returned yet
|
||||
if not eof:
|
||||
process.terminate()
|
||||
time.sleep(5)
|
||||
try:
|
||||
process.kill()
|
||||
except OSError:
|
||||
logger.debug('OSError when killing process')
|
||||
pass
|
||||
endtime = time.time() - starttime
|
||||
lastline = ("\nProcess killed - no output for %d seconds. Total"
|
||||
" running time: %d seconds." % (timeout, endtime))
|
||||
logger.debug('Received data from SSH call:\n%s ' % lastline)
|
||||
output += lastline
|
||||
|
||||
else:
|
||||
output_raw = process.communicate()[0]
|
||||
|
||||
output = output_raw.decode('utf-8', errors='ignore')
|
||||
logger.debug('Data from SSH call:\n%s' % output.rstrip())
|
||||
|
||||
# timout or not, make sure process exits and is not hanging
|
||||
if process.returncode == None:
|
||||
try:
|
||||
process.wait(timeout=5)
|
||||
except TimeoutExpired:
|
||||
try:
|
||||
process.kill()
|
||||
except OSError:
|
||||
logger.debug('OSError')
|
||||
pass
|
||||
|
||||
options = {
|
||||
"stdout": subprocess.PIPE,
|
||||
"stderr": subprocess.STDOUT,
|
||||
"stdin": None,
|
||||
"shell": False,
|
||||
"bufsize": -1,
|
||||
"start_new_session": True,
|
||||
}
|
||||
options.update(opts)
|
||||
output = ''
|
||||
process = None
|
||||
|
||||
# Unset DISPLAY which means we won't trigger SSH_ASKPASS
|
||||
env = os.environ.copy()
|
||||
if "DISPLAY" in env:
|
||||
del env['DISPLAY']
|
||||
options['env'] = env
|
||||
|
||||
try:
|
||||
run()
|
||||
except:
|
||||
# Need to guard against a SystemExit or other exception ocurring
|
||||
# whilst running and ensure we don't leave a process behind.
|
||||
if process.poll() is None:
|
||||
process.kill()
|
||||
logger.debug('Something went wrong, killing SSH process')
|
||||
raise
|
||||
|
||||
return (process.returncode, output.rstrip())
|
||||
@@ -0,0 +1,23 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.core.case import OETestCase
|
||||
from oeqa.core.decorator import OETestTag
|
||||
from oeqa.core.decorator.data import OETestDataDepends
|
||||
|
||||
class DataTest(OETestCase):
|
||||
data_vars = ['IMAGE', 'ARCH']
|
||||
|
||||
@OETestDataDepends(['MACHINE',])
|
||||
@OETestTag('dataTestOk')
|
||||
def testDataOk(self):
|
||||
self.assertEqual(self.td.get('IMAGE'), 'core-image-minimal')
|
||||
self.assertEqual(self.td.get('ARCH'), 'x86')
|
||||
self.assertEqual(self.td.get('MACHINE'), 'qemuarm')
|
||||
|
||||
@OETestTag('dataTestFail')
|
||||
def testDataFail(self):
|
||||
pass
|
||||
@@ -0,0 +1,41 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.core.case import OETestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
|
||||
class DependsTest(OETestCase):
|
||||
|
||||
def testDependsFirst(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETestDepends(['testDependsFirst'])
|
||||
def testDependsSecond(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETestDepends(['testDependsSecond'])
|
||||
def testDependsThird(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETestDepends(['testDependsSecond'])
|
||||
def testDependsFourth(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETestDepends(['testDependsThird', 'testDependsFourth'])
|
||||
def testDependsFifth(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETestDepends(['testDependsCircular3'])
|
||||
def testDependsCircular1(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETestDepends(['testDependsCircular1'])
|
||||
def testDependsCircular2(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETestDepends(['testDependsCircular2'])
|
||||
def testDependsCircular3(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
@@ -0,0 +1,12 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.core.case import OETestCase
|
||||
|
||||
class AnotherTest(OETestCase):
|
||||
|
||||
def testAnother(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
@@ -0,0 +1,38 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.core.case import OETestCase
|
||||
from oeqa.core.decorator import OETestTag
|
||||
|
||||
class TagTest(OETestCase):
|
||||
@OETestTag('goodTag')
|
||||
def testTagGood(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETestTag('otherTag')
|
||||
def testTagOther(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETestTag('otherTag', 'multiTag')
|
||||
def testTagOtherMulti(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
def testTagNone(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETestTag('classTag')
|
||||
class TagClassTest(OETestCase):
|
||||
@OETestTag('otherTag')
|
||||
def testTagOther(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETestTag('otherTag', 'multiTag')
|
||||
def testTagOtherMulti(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
def testTagNone(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from time import sleep
|
||||
|
||||
from oeqa.core.case import OETestCase
|
||||
from oeqa.core.decorator.oetimeout import OETimeout
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
|
||||
class TimeoutTest(OETestCase):
|
||||
|
||||
@OETimeout(1)
|
||||
def testTimeoutPass(self):
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
@OETimeout(1)
|
||||
def testTimeoutFail(self):
|
||||
sleep(2)
|
||||
self.assertTrue(True, msg='How is this possible?')
|
||||
|
||||
|
||||
def testTimeoutSkip(self):
|
||||
self.skipTest("This test needs to be skipped, so that testTimeoutDepends()'s OETestDepends kicks in")
|
||||
|
||||
@OETestDepends(["timeout.TimeoutTest.testTimeoutSkip"])
|
||||
@OETimeout(3)
|
||||
def testTimeoutDepends(self):
|
||||
self.assertTrue(False, msg='How is this possible?')
|
||||
|
||||
def testTimeoutUnrelated(self):
|
||||
sleep(6)
|
||||
@@ -0,0 +1,38 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
import unittest
|
||||
import logging
|
||||
import os
|
||||
|
||||
logger = logging.getLogger("oeqa")
|
||||
logger.setLevel(logging.INFO)
|
||||
consoleHandler = logging.StreamHandler()
|
||||
formatter = logging.Formatter('OEQATest: %(message)s')
|
||||
consoleHandler.setFormatter(formatter)
|
||||
logger.addHandler(consoleHandler)
|
||||
|
||||
def setup_sys_path():
|
||||
directory = os.path.dirname(os.path.abspath(__file__))
|
||||
oeqa_lib = os.path.realpath(os.path.join(directory, '../../../'))
|
||||
if not oeqa_lib in sys.path:
|
||||
sys.path.insert(0, oeqa_lib)
|
||||
|
||||
class TestBase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.logger = logger
|
||||
directory = os.path.dirname(os.path.abspath(__file__))
|
||||
self.cases_path = os.path.join(directory, 'cases')
|
||||
|
||||
def _testLoader(self, d={}, modules=[], tests=[], **kwargs):
|
||||
from oeqa.core.context import OETestContext
|
||||
tc = OETestContext(d, self.logger)
|
||||
tc.loadTests(self.cases_path, modules=modules, tests=tests,
|
||||
**kwargs)
|
||||
return tc
|
||||
Executable
+55
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import unittest
|
||||
import logging
|
||||
import os
|
||||
|
||||
from common import setup_sys_path, TestBase
|
||||
setup_sys_path()
|
||||
|
||||
from oeqa.core.exception import OEQAMissingVariable
|
||||
from oeqa.core.utils.test import getCaseMethod, getSuiteCasesNames
|
||||
|
||||
class TestData(TestBase):
|
||||
modules = ['data']
|
||||
|
||||
def test_data_fail_missing_variable(self):
|
||||
expectedException = "oeqa.core.exception.OEQAMissingVariable"
|
||||
|
||||
tc = self._testLoader(modules=self.modules)
|
||||
results = tc.runTests()
|
||||
self.assertFalse(results.wasSuccessful())
|
||||
for test, data in results.errors:
|
||||
expect = False
|
||||
if expectedException in data:
|
||||
expect = True
|
||||
|
||||
self.assertTrue(expect)
|
||||
|
||||
def test_data_fail_wrong_variable(self):
|
||||
expectedError = 'AssertionError'
|
||||
d = {'IMAGE' : 'core-image-weston', 'ARCH' : 'arm'}
|
||||
|
||||
tc = self._testLoader(d=d, modules=self.modules)
|
||||
results = tc.runTests()
|
||||
self.assertFalse(results.wasSuccessful())
|
||||
for test, data in results.failures:
|
||||
expect = False
|
||||
if expectedError in data:
|
||||
expect = True
|
||||
|
||||
self.assertTrue(expect)
|
||||
|
||||
def test_data_ok(self):
|
||||
d = {'IMAGE' : 'core-image-minimal', 'ARCH' : 'x86', 'MACHINE' : 'qemuarm'}
|
||||
|
||||
tc = self._testLoader(d=d, modules=self.modules)
|
||||
self.assertEqual(True, tc.runTests().wasSuccessful())
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
+143
@@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import signal
|
||||
import unittest
|
||||
|
||||
from common import setup_sys_path, TestBase
|
||||
setup_sys_path()
|
||||
|
||||
from oeqa.core.exception import OEQADependency
|
||||
from oeqa.core.utils.test import getCaseMethod, getSuiteCasesNames, getSuiteCasesIDs
|
||||
|
||||
class TestTagDecorator(TestBase):
|
||||
def _runTest(self, modules, filterfn, expect):
|
||||
tc = self._testLoader(modules = modules, tags_filter = filterfn)
|
||||
test_loaded = set(getSuiteCasesIDs(tc.suites))
|
||||
self.assertEqual(expect, test_loaded)
|
||||
|
||||
def test_oetag(self):
|
||||
# get all cases without any filtering
|
||||
self._runTest(['oetag'], None, {
|
||||
'oetag.TagTest.testTagGood',
|
||||
'oetag.TagTest.testTagOther',
|
||||
'oetag.TagTest.testTagOtherMulti',
|
||||
'oetag.TagTest.testTagNone',
|
||||
'oetag.TagClassTest.testTagOther',
|
||||
'oetag.TagClassTest.testTagOtherMulti',
|
||||
'oetag.TagClassTest.testTagNone',
|
||||
})
|
||||
|
||||
# exclude any case with tags
|
||||
self._runTest(['oetag'], lambda tags: tags, {
|
||||
'oetag.TagTest.testTagNone',
|
||||
})
|
||||
|
||||
# exclude any case with otherTag
|
||||
self._runTest(['oetag'], lambda tags: "otherTag" in tags, {
|
||||
'oetag.TagTest.testTagGood',
|
||||
'oetag.TagTest.testTagNone',
|
||||
'oetag.TagClassTest.testTagNone',
|
||||
})
|
||||
|
||||
# exclude any case with classTag
|
||||
self._runTest(['oetag'], lambda tags: "classTag" in tags, {
|
||||
'oetag.TagTest.testTagGood',
|
||||
'oetag.TagTest.testTagOther',
|
||||
'oetag.TagTest.testTagOtherMulti',
|
||||
'oetag.TagTest.testTagNone',
|
||||
})
|
||||
|
||||
# include any case with classTag
|
||||
self._runTest(['oetag'], lambda tags: "classTag" not in tags, {
|
||||
'oetag.TagClassTest.testTagOther',
|
||||
'oetag.TagClassTest.testTagOtherMulti',
|
||||
'oetag.TagClassTest.testTagNone',
|
||||
})
|
||||
|
||||
# include any case with classTag or no tags
|
||||
self._runTest(['oetag'], lambda tags: tags and "classTag" not in tags, {
|
||||
'oetag.TagTest.testTagNone',
|
||||
'oetag.TagClassTest.testTagOther',
|
||||
'oetag.TagClassTest.testTagOtherMulti',
|
||||
'oetag.TagClassTest.testTagNone',
|
||||
})
|
||||
|
||||
class TestDependsDecorator(TestBase):
|
||||
modules = ['depends']
|
||||
|
||||
def test_depends_order(self):
|
||||
tests = ['depends.DependsTest.testDependsFirst',
|
||||
'depends.DependsTest.testDependsSecond',
|
||||
'depends.DependsTest.testDependsThird',
|
||||
'depends.DependsTest.testDependsFourth',
|
||||
'depends.DependsTest.testDependsFifth']
|
||||
tests2 = list(tests)
|
||||
tests2[2], tests2[3] = tests[3], tests[2]
|
||||
tc = self._testLoader(modules=self.modules, tests=tests)
|
||||
test_loaded = getSuiteCasesIDs(tc.suites)
|
||||
result = True if test_loaded == tests or test_loaded == tests2 else False
|
||||
msg = 'Failed to order tests using OETestDepends decorator.\nTest order:'\
|
||||
' %s.\nExpected: %s\nOr: %s' % (test_loaded, tests, tests2)
|
||||
self.assertTrue(result, msg=msg)
|
||||
|
||||
def test_depends_fail_missing_dependency(self):
|
||||
expect = "TestCase depends.DependsTest.testDependsSecond depends on "\
|
||||
"depends.DependsTest.testDependsFirst and isn't available"
|
||||
tests = ['depends.DependsTest.testDependsSecond']
|
||||
try:
|
||||
# Must throw OEQADependency because missing 'testDependsFirst'
|
||||
tc = self._testLoader(modules=self.modules, tests=tests)
|
||||
self.fail('Expected OEQADependency exception')
|
||||
except OEQADependency as e:
|
||||
result = True if expect in str(e) else False
|
||||
msg = 'Expected OEQADependency exception missing testDependsFirst test'
|
||||
self.assertTrue(result, msg=msg)
|
||||
|
||||
def test_depends_fail_circular_dependency(self):
|
||||
expect = 'have a circular dependency'
|
||||
tests = ['depends.DependsTest.testDependsCircular1',
|
||||
'depends.DependsTest.testDependsCircular2',
|
||||
'depends.DependsTest.testDependsCircular3']
|
||||
try:
|
||||
# Must throw OEQADependency because circular dependency
|
||||
tc = self._testLoader(modules=self.modules, tests=tests)
|
||||
self.fail('Expected OEQADependency exception')
|
||||
except OEQADependency as e:
|
||||
result = True if expect in str(e) else False
|
||||
msg = 'Expected OEQADependency exception having a circular dependency'
|
||||
self.assertTrue(result, msg=msg)
|
||||
|
||||
class TestTimeoutDecorator(TestBase):
|
||||
modules = ['timeout']
|
||||
|
||||
def test_timeout(self):
|
||||
tests = ['timeout.TimeoutTest.testTimeoutPass']
|
||||
msg = 'Failed to run test using OETestTimeout'
|
||||
alarm_signal = signal.getsignal(signal.SIGALRM)
|
||||
tc = self._testLoader(modules=self.modules, tests=tests)
|
||||
self.assertTrue(tc.runTests().wasSuccessful(), msg=msg)
|
||||
msg = "OETestTimeout didn't restore SIGALRM"
|
||||
self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
|
||||
|
||||
def test_timeout_fail(self):
|
||||
tests = ['timeout.TimeoutTest.testTimeoutFail']
|
||||
msg = "OETestTimeout test didn't timeout as expected"
|
||||
alarm_signal = signal.getsignal(signal.SIGALRM)
|
||||
tc = self._testLoader(modules=self.modules, tests=tests)
|
||||
self.assertFalse(tc.runTests().wasSuccessful(), msg=msg)
|
||||
msg = "OETestTimeout didn't restore SIGALRM"
|
||||
self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
|
||||
|
||||
def test_timeout_cancel(self):
|
||||
tests = ['timeout.TimeoutTest.testTimeoutSkip', 'timeout.TimeoutTest.testTimeoutDepends', 'timeout.TimeoutTest.testTimeoutUnrelated']
|
||||
msg = 'Unrelated test failed to complete'
|
||||
tc = self._testLoader(modules=self.modules, tests=tests)
|
||||
self.assertTrue(tc.runTests().wasSuccessful(), msg=msg)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Executable
+63
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from common import setup_sys_path, TestBase
|
||||
setup_sys_path()
|
||||
|
||||
from oeqa.core.exception import OEQADependency
|
||||
from oeqa.core.utils.test import getSuiteModules, getSuiteCasesIDs
|
||||
|
||||
class TestLoader(TestBase):
|
||||
@unittest.skip("invalid directory is missing oetag.py")
|
||||
def test_fail_duplicated_module(self):
|
||||
cases_path = self.cases_path
|
||||
invalid_path = os.path.join(cases_path, 'loader', 'invalid')
|
||||
self.cases_path = [self.cases_path, invalid_path]
|
||||
expect = 'Duplicated oetag module found in'
|
||||
msg = 'Expected ImportError exception for having duplicated module'
|
||||
try:
|
||||
# Must throw ImportEror because duplicated module
|
||||
tc = self._testLoader()
|
||||
self.fail(msg)
|
||||
except ImportError as e:
|
||||
result = True if expect in str(e) else False
|
||||
self.assertTrue(result, msg=msg)
|
||||
finally:
|
||||
self.cases_path = cases_path
|
||||
|
||||
def test_filter_modules(self):
|
||||
expected_modules = {'oetag'}
|
||||
tc = self._testLoader(modules=expected_modules)
|
||||
modules = getSuiteModules(tc.suites)
|
||||
msg = 'Expected just %s modules' % ', '.join(expected_modules)
|
||||
self.assertEqual(modules, expected_modules, msg=msg)
|
||||
|
||||
def test_filter_cases(self):
|
||||
modules = ['oetag', 'data']
|
||||
expected_cases = {'data.DataTest.testDataOk',
|
||||
'oetag.TagTest.testTagGood'}
|
||||
tc = self._testLoader(modules=modules, tests=expected_cases)
|
||||
cases = set(getSuiteCasesIDs(tc.suites))
|
||||
msg = 'Expected just %s cases' % ', '.join(expected_cases)
|
||||
self.assertEqual(cases, expected_cases, msg=msg)
|
||||
|
||||
def test_import_from_paths(self):
|
||||
cases_path = self.cases_path
|
||||
cases2_path = os.path.join(cases_path, 'loader', 'valid')
|
||||
expected_modules = {'another'}
|
||||
self.cases_path = [self.cases_path, cases2_path]
|
||||
tc = self._testLoader(modules=expected_modules)
|
||||
modules = getSuiteModules(tc.suites)
|
||||
self.cases_path = cases_path
|
||||
msg = 'Expected modules from two different paths'
|
||||
self.assertEqual(modules, expected_modules, msg=msg)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Executable
+40
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import unittest
|
||||
import logging
|
||||
import tempfile
|
||||
|
||||
from common import setup_sys_path, TestBase
|
||||
setup_sys_path()
|
||||
|
||||
from oeqa.core.runner import OEStreamLogger
|
||||
|
||||
class TestRunner(TestBase):
|
||||
def test_stream_logger(self):
|
||||
fp = tempfile.TemporaryFile(mode='w+')
|
||||
|
||||
logging.basicConfig(format='%(message)s', stream=fp)
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
oeSL = OEStreamLogger(logger)
|
||||
|
||||
lines = ['init', 'bigline_' * 65535, 'morebigline_' * 65535 * 4, 'end']
|
||||
for line in lines:
|
||||
oeSL.write(line)
|
||||
|
||||
fp.seek(0)
|
||||
fp_lines = fp.readlines()
|
||||
for i, fp_line in enumerate(fp_lines):
|
||||
fp_line = fp_line.strip()
|
||||
self.assertEqual(lines[i], fp_line)
|
||||
|
||||
fp.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -0,0 +1,335 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
#
|
||||
# Modified for use in OE by Richard Purdie, 2018
|
||||
#
|
||||
# Modified by: Corey Goldberg, 2013
|
||||
# License: GPLv2+
|
||||
#
|
||||
# Original code from:
|
||||
# Bazaar (bzrlib.tests.__init__.py, v2.6, copied Jun 01 2013)
|
||||
# Copyright (C) 2005-2011 Canonical Ltd
|
||||
# License: GPLv2+
|
||||
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
import unittest
|
||||
import subprocess
|
||||
import testtools
|
||||
import threading
|
||||
import time
|
||||
import io
|
||||
import json
|
||||
import subunit
|
||||
|
||||
from queue import Queue
|
||||
from itertools import cycle
|
||||
from subunit import ProtocolTestCase, TestProtocolClient
|
||||
from subunit.test_results import AutoTimingTestResultDecorator
|
||||
from testtools import ThreadsafeForwardingResult, iterate_tests
|
||||
from testtools.content import Content
|
||||
from testtools.content_type import ContentType
|
||||
from oeqa.utils.commands import get_test_layer
|
||||
|
||||
import bb.utils
|
||||
import oe.path
|
||||
|
||||
_all__ = [
|
||||
'ConcurrentTestSuite',
|
||||
'fork_for_tests',
|
||||
'partition_tests',
|
||||
]
|
||||
|
||||
#
|
||||
# Patch the version from testtools to allow access to _test_start and allow
|
||||
# computation of timing information and threading progress
|
||||
#
|
||||
class BBThreadsafeForwardingResult(ThreadsafeForwardingResult):
|
||||
|
||||
def __init__(self, target, semaphore, threadnum, totalinprocess, totaltests, output, finalresult):
|
||||
super(BBThreadsafeForwardingResult, self).__init__(target, semaphore)
|
||||
self.threadnum = threadnum
|
||||
self.totalinprocess = totalinprocess
|
||||
self.totaltests = totaltests
|
||||
self.buffer = True
|
||||
self.outputbuf = output
|
||||
self.finalresult = finalresult
|
||||
self.finalresult.buffer = True
|
||||
self.target = target
|
||||
|
||||
def _add_result_with_semaphore(self, method, test, *args, **kwargs):
|
||||
self.semaphore.acquire()
|
||||
try:
|
||||
if self._test_start:
|
||||
self.result.starttime[test.id()] = self._test_start.timestamp()
|
||||
self.result.threadprogress[self.threadnum].append(test.id())
|
||||
totalprogress = sum(len(x) for x in self.result.threadprogress.values())
|
||||
self.result.progressinfo[test.id()] = "%s: %s/%s %s/%s (%ss) (%s failed) (%s)" % (
|
||||
self.threadnum,
|
||||
len(self.result.threadprogress[self.threadnum]),
|
||||
self.totalinprocess,
|
||||
totalprogress,
|
||||
self.totaltests,
|
||||
"{0:.2f}".format(time.time()-self._test_start.timestamp()),
|
||||
self.target.failed_tests,
|
||||
test.id())
|
||||
finally:
|
||||
self.semaphore.release()
|
||||
self.finalresult._stderr_buffer = io.StringIO(initial_value=self.outputbuf.getvalue().decode("utf-8"))
|
||||
self.finalresult._stdout_buffer = io.StringIO()
|
||||
super(BBThreadsafeForwardingResult, self)._add_result_with_semaphore(method, test, *args, **kwargs)
|
||||
|
||||
class ProxyTestResult:
|
||||
# a very basic TestResult proxy, in order to modify add* calls
|
||||
def __init__(self, target):
|
||||
self.result = target
|
||||
self.failed_tests = 0
|
||||
|
||||
def _addResult(self, method, test, *args, exception = False, **kwargs):
|
||||
return method(test, *args, **kwargs)
|
||||
|
||||
def addError(self, test, err = None, **kwargs):
|
||||
self.failed_tests += 1
|
||||
self._addResult(self.result.addError, test, err, exception = True, **kwargs)
|
||||
|
||||
def addFailure(self, test, err = None, **kwargs):
|
||||
self.failed_tests += 1
|
||||
self._addResult(self.result.addFailure, test, err, exception = True, **kwargs)
|
||||
|
||||
def addSuccess(self, test, **kwargs):
|
||||
self._addResult(self.result.addSuccess, test, **kwargs)
|
||||
|
||||
def addExpectedFailure(self, test, err = None, **kwargs):
|
||||
self._addResult(self.result.addExpectedFailure, test, err, exception = True, **kwargs)
|
||||
|
||||
def addUnexpectedSuccess(self, test, **kwargs):
|
||||
self._addResult(self.result.addUnexpectedSuccess, test, **kwargs)
|
||||
|
||||
def wasSuccessful(self):
|
||||
return self.failed_tests == 0
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self.result, attr)
|
||||
|
||||
class ExtraResultsDecoderTestResult(ProxyTestResult):
|
||||
def _addResult(self, method, test, *args, exception = False, **kwargs):
|
||||
if "details" in kwargs and "extraresults" in kwargs["details"]:
|
||||
if isinstance(kwargs["details"]["extraresults"], Content):
|
||||
kwargs = kwargs.copy()
|
||||
kwargs["details"] = kwargs["details"].copy()
|
||||
extraresults = kwargs["details"]["extraresults"]
|
||||
data = bytearray()
|
||||
for b in extraresults.iter_bytes():
|
||||
data += b
|
||||
extraresults = json.loads(data.decode())
|
||||
kwargs["details"]["extraresults"] = extraresults
|
||||
return method(test, *args, **kwargs)
|
||||
|
||||
class ExtraResultsEncoderTestResult(ProxyTestResult):
|
||||
def _addResult(self, method, test, *args, exception = False, **kwargs):
|
||||
if hasattr(test, "extraresults"):
|
||||
extras = lambda : [json.dumps(test.extraresults).encode()]
|
||||
kwargs = kwargs.copy()
|
||||
if "details" not in kwargs:
|
||||
kwargs["details"] = {}
|
||||
else:
|
||||
kwargs["details"] = kwargs["details"].copy()
|
||||
kwargs["details"]["extraresults"] = Content(ContentType("application", "json", {'charset': 'utf8'}), extras)
|
||||
# if using details, need to encode any exceptions into the details obj,
|
||||
# testtools does not handle "err" and "details" together.
|
||||
if "details" in kwargs and exception and (len(args) >= 1 and args[0] is not None):
|
||||
kwargs["details"]["traceback"] = testtools.content.TracebackContent(args[0], test)
|
||||
args = []
|
||||
return method(test, *args, **kwargs)
|
||||
|
||||
#
|
||||
# We have to patch subunit since it doesn't understand how to handle addError
|
||||
# outside of a running test case. This can happen if classSetUp() fails
|
||||
# for a class of tests. This unfortunately has horrible internal knowledge.
|
||||
#
|
||||
def outSideTestaddError(self, offset, line):
|
||||
"""An 'error:' directive has been read."""
|
||||
test_name = line[offset:-1].decode('utf8')
|
||||
self.parser._current_test = subunit.RemotedTestCase(test_name)
|
||||
self.parser.current_test_description = test_name
|
||||
self.parser._state = self.parser._reading_error_details
|
||||
self.parser._reading_error_details.set_simple()
|
||||
self.parser.subunitLineReceived(line)
|
||||
|
||||
subunit._OutSideTest.addError = outSideTestaddError
|
||||
|
||||
# Like outSideTestaddError above, we need an equivalent for skips
|
||||
# happening at the setUpClass() level, otherwise we will see "UNKNOWN"
|
||||
# as a result for concurrent tests
|
||||
#
|
||||
def outSideTestaddSkip(self, offset, line):
|
||||
"""A 'skip:' directive has been read."""
|
||||
test_name = line[offset:-1].decode('utf8')
|
||||
self.parser._current_test = subunit.RemotedTestCase(test_name)
|
||||
self.parser.current_test_description = test_name
|
||||
self.parser._state = self.parser._reading_skip_details
|
||||
self.parser._reading_skip_details.set_simple()
|
||||
self.parser.subunitLineReceived(line)
|
||||
|
||||
subunit._OutSideTest.addSkip = outSideTestaddSkip
|
||||
|
||||
#
|
||||
# A dummy structure to add to io.StringIO so that the .buffer object
|
||||
# is available and accepts writes. This allows unittest with buffer=True
|
||||
# to interact ok with subunit which wants to access sys.stdout.buffer.
|
||||
#
|
||||
class dummybuf(object):
|
||||
def __init__(self, parent):
|
||||
self.p = parent
|
||||
def write(self, data):
|
||||
self.p.write(data.decode("utf-8"))
|
||||
|
||||
#
|
||||
# Taken from testtools.ConncurrencyTestSuite but modified for OE use
|
||||
#
|
||||
class ConcurrentTestSuite(unittest.TestSuite):
|
||||
|
||||
def __init__(self, suite, processes, setupfunc, removefunc):
|
||||
super(ConcurrentTestSuite, self).__init__([suite])
|
||||
self.processes = processes
|
||||
self.setupfunc = setupfunc
|
||||
self.removefunc = removefunc
|
||||
|
||||
def run(self, result):
|
||||
testservers, totaltests = fork_for_tests(self.processes, self)
|
||||
try:
|
||||
threads = {}
|
||||
queue = Queue()
|
||||
semaphore = threading.Semaphore(1)
|
||||
result.threadprogress = {}
|
||||
for i, (testserver, testnum, output) in enumerate(testservers):
|
||||
result.threadprogress[i] = []
|
||||
process_result = BBThreadsafeForwardingResult(
|
||||
ExtraResultsDecoderTestResult(result),
|
||||
semaphore, i, testnum, totaltests, output, result)
|
||||
reader_thread = threading.Thread(
|
||||
target=self._run_test, args=(testserver, process_result, queue))
|
||||
threads[testserver] = reader_thread, process_result
|
||||
reader_thread.start()
|
||||
while threads:
|
||||
finished_test = queue.get()
|
||||
threads[finished_test][0].join()
|
||||
del threads[finished_test]
|
||||
except:
|
||||
for thread, process_result in threads.values():
|
||||
process_result.stop()
|
||||
raise
|
||||
finally:
|
||||
for testserver in testservers:
|
||||
testserver[0]._stream.close()
|
||||
|
||||
def _run_test(self, testserver, process_result, queue):
|
||||
try:
|
||||
try:
|
||||
testserver.run(process_result)
|
||||
except Exception:
|
||||
# The run logic itself failed
|
||||
case = testtools.ErrorHolder(
|
||||
"broken-runner",
|
||||
error=sys.exc_info())
|
||||
case.run(process_result)
|
||||
finally:
|
||||
queue.put(testserver)
|
||||
|
||||
def fork_for_tests(concurrency_num, suite):
|
||||
testservers = []
|
||||
if 'BUILDDIR' in os.environ:
|
||||
selftestdir = get_test_layer()
|
||||
|
||||
test_blocks = partition_tests(suite, concurrency_num)
|
||||
# Clear the tests from the original suite so it doesn't keep them alive
|
||||
suite._tests[:] = []
|
||||
totaltests = sum(len(x) for x in test_blocks)
|
||||
for process_tests in test_blocks:
|
||||
numtests = len(process_tests)
|
||||
process_suite = unittest.TestSuite(process_tests)
|
||||
# Also clear each split list so new suite has only reference
|
||||
process_tests[:] = []
|
||||
c2pread, c2pwrite = os.pipe()
|
||||
# Clear buffers before fork to avoid duplicate output
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
ourpid = os.getpid()
|
||||
try:
|
||||
newbuilddir = None
|
||||
stream = os.fdopen(c2pwrite, 'wb', 1)
|
||||
os.close(c2pread)
|
||||
|
||||
(builddir, newbuilddir) = suite.setupfunc("-st-" + str(ourpid), selftestdir, process_suite)
|
||||
|
||||
# Leave stderr and stdout open so we can see test noise
|
||||
# Close stdin so that the child goes away if it decides to
|
||||
# read from stdin (otherwise its a roulette to see what
|
||||
# child actually gets keystrokes for pdb etc).
|
||||
newsi = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(newsi, sys.stdin.fileno())
|
||||
|
||||
# Send stdout/stderr over the stream
|
||||
os.dup2(c2pwrite, sys.stdout.fileno())
|
||||
os.dup2(c2pwrite, sys.stderr.fileno())
|
||||
|
||||
subunit_client = TestProtocolClient(stream)
|
||||
subunit_result = AutoTimingTestResultDecorator(subunit_client)
|
||||
unittest_result = process_suite.run(ExtraResultsEncoderTestResult(subunit_result))
|
||||
if ourpid != os.getpid():
|
||||
os._exit(0)
|
||||
if newbuilddir and unittest_result.wasSuccessful():
|
||||
suite.removefunc(newbuilddir)
|
||||
except:
|
||||
# Don't do anything with process children
|
||||
if ourpid != os.getpid():
|
||||
os._exit(1)
|
||||
# Try and report traceback on stream, but exit with error
|
||||
# even if stream couldn't be created or something else
|
||||
# goes wrong. The traceback is formatted to a string and
|
||||
# written in one go to avoid interleaving lines from
|
||||
# multiple failing children.
|
||||
try:
|
||||
stream.write(traceback.format_exc().encode('utf-8'))
|
||||
except:
|
||||
sys.stderr.write(traceback.format_exc())
|
||||
finally:
|
||||
if newbuilddir:
|
||||
suite.removefunc(newbuilddir)
|
||||
stream.flush()
|
||||
os._exit(1)
|
||||
stream.flush()
|
||||
os._exit(0)
|
||||
else:
|
||||
os.close(c2pwrite)
|
||||
stream = os.fdopen(c2pread, 'rb', 1)
|
||||
# Collect stdout/stderr into an io buffer
|
||||
output = io.BytesIO()
|
||||
testserver = ProtocolTestCase(stream, passthrough=output)
|
||||
testservers.append((testserver, numtests, output))
|
||||
return testservers, totaltests
|
||||
|
||||
def partition_tests(suite, count):
|
||||
# Keep tests from the same class together but allow tests from modules
|
||||
# to go to different processes to aid parallelisation.
|
||||
modules = {}
|
||||
for test in iterate_tests(suite):
|
||||
m = test.__module__ + "." + test.__class__.__name__
|
||||
if m not in modules:
|
||||
modules[m] = []
|
||||
modules[m].append(test)
|
||||
|
||||
# Simply divide the test blocks between the available processes
|
||||
partitions = [list() for _ in range(count)]
|
||||
for partition, m in zip(cycle(partitions), modules):
|
||||
partition.extend(modules[m])
|
||||
|
||||
# No point in empty threads so drop them
|
||||
return [p for p in partitions if p]
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
def findFile(file_name, directory):
|
||||
"""
|
||||
Search for a file in directory and returns its complete path.
|
||||
"""
|
||||
for r, d, f in os.walk(directory):
|
||||
if file_name in f:
|
||||
return os.path.join(r, file_name)
|
||||
return None
|
||||
|
||||
def remove_safe(path):
|
||||
if os.path.exists(path):
|
||||
os.remove(path)
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import inspect
|
||||
import unittest
|
||||
|
||||
def getSuiteCases(suite):
|
||||
"""
|
||||
Returns individual test from a test suite.
|
||||
"""
|
||||
tests = []
|
||||
|
||||
if isinstance(suite, unittest.TestCase):
|
||||
tests.append(suite)
|
||||
elif isinstance(suite, unittest.suite.TestSuite):
|
||||
for item in suite:
|
||||
tests.extend(getSuiteCases(item))
|
||||
|
||||
return tests
|
||||
|
||||
def getSuiteModules(suite):
|
||||
"""
|
||||
Returns modules in a test suite.
|
||||
"""
|
||||
modules = set()
|
||||
for test in getSuiteCases(suite):
|
||||
modules.add(getCaseModule(test))
|
||||
return modules
|
||||
|
||||
def getSuiteCasesInfo(suite, func):
|
||||
"""
|
||||
Returns test case info from suite. Info is fetched from func.
|
||||
"""
|
||||
tests = []
|
||||
for test in getSuiteCases(suite):
|
||||
tests.append(func(test))
|
||||
return tests
|
||||
|
||||
def getSuiteCasesNames(suite):
|
||||
"""
|
||||
Returns test case names from suite.
|
||||
"""
|
||||
return getSuiteCasesInfo(suite, getCaseMethod)
|
||||
|
||||
def getSuiteCasesIDs(suite):
|
||||
"""
|
||||
Returns test case ids from suite.
|
||||
"""
|
||||
return getSuiteCasesInfo(suite, getCaseID)
|
||||
|
||||
def getSuiteCasesFiles(suite):
|
||||
"""
|
||||
Returns test case files paths from suite.
|
||||
"""
|
||||
return getSuiteCasesInfo(suite, getCaseFile)
|
||||
|
||||
def getCaseModule(test_case):
|
||||
"""
|
||||
Returns test case module name.
|
||||
"""
|
||||
return test_case.__module__
|
||||
|
||||
def getCaseClass(test_case):
|
||||
"""
|
||||
Returns test case class name.
|
||||
"""
|
||||
return test_case.__class__.__name__
|
||||
|
||||
def getCaseID(test_case):
|
||||
"""
|
||||
Returns test case complete id.
|
||||
"""
|
||||
return test_case.id()
|
||||
|
||||
def getCaseFile(test_case):
|
||||
"""
|
||||
Returns test case file path.
|
||||
"""
|
||||
return inspect.getsourcefile(test_case.__class__)
|
||||
|
||||
def getCaseMethod(test_case):
|
||||
"""
|
||||
Returns test case method name.
|
||||
"""
|
||||
return getCaseID(test_case).split('.')[-1]
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,26 @@
|
||||
#include <stdio.h>
|
||||
#include <math.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
double convert(long long l)
|
||||
{
|
||||
return (double)l;
|
||||
}
|
||||
|
||||
int main(int argc, char * argv[]) {
|
||||
|
||||
long long l = 10;
|
||||
double f;
|
||||
double check = 10.0;
|
||||
|
||||
f = convert(l);
|
||||
printf("convert: %lld => %f\n", l, f);
|
||||
if ( f != check ) exit(1);
|
||||
|
||||
f = 1234.67;
|
||||
check = 1234.0;
|
||||
printf("floorf(%f) = %f\n", f, floorf(f));
|
||||
if ( floorf(f) != check) exit(1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
#include <limits>
|
||||
|
||||
int main() {}
|
||||
@@ -0,0 +1,2 @@
|
||||
fn main() {
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"runtime_core-image-minimal:qemuarm_20181225195701": {
|
||||
"configuration": {
|
||||
"DISTRO": "poky",
|
||||
"HOST_DISTRO": "ubuntu-16.04",
|
||||
"IMAGE_BASENAME": "core-image-minimal",
|
||||
"IMAGE_PKGTYPE": "rpm",
|
||||
"LAYERS": {
|
||||
"meta": {
|
||||
"branch": "master",
|
||||
"commit": "801745d918e83f976c706f29669779f5b292ade3",
|
||||
"commit_count": 52782
|
||||
},
|
||||
"meta-poky": {
|
||||
"branch": "master",
|
||||
"commit": "801745d918e83f976c706f29669779f5b292ade3",
|
||||
"commit_count": 52782
|
||||
},
|
||||
"meta-yocto-bsp": {
|
||||
"branch": "master",
|
||||
"commit": "801745d918e83f976c706f29669779f5b292ade3",
|
||||
"commit_count": 52782
|
||||
}
|
||||
},
|
||||
"MACHINE": "qemuarm",
|
||||
"STARTTIME": "20181225195701",
|
||||
"TEST_TYPE": "runtime"
|
||||
},
|
||||
"result": {
|
||||
"apt.AptRepoTest.test_apt_install_from_repo": {
|
||||
"log": "Test requires apt to be installed",
|
||||
"status": "PASSED"
|
||||
},
|
||||
"buildcpio.BuildCpioTest.test_cpio": {
|
||||
"log": "Test requires autoconf to be installed",
|
||||
"status": "ERROR"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
########
|
||||
diff --git a/glxgears_check.sh b/glxgears_check.sh
|
||||
index 17622b8..c4d3b97 100755
|
||||
--- a/glxgears_check.sh
|
||||
+++ b/glxgears_check.sh
|
||||
@@ -31,7 +31,7 @@ else
|
||||
|
||||
sleep 6
|
||||
|
||||
- XPID=$( ps ax | awk '{print $1, $5}' | grep glxgears | awk '{print $1}')
|
||||
+ XPID=$( ps | awk '{print $1, $5}' | grep glxgears | awk '{print $1}')
|
||||
if [ ! -z "$XPID" ]; then
|
||||
kill -9 $XPID >/dev/null 2>&1
|
||||
echo "glxgears can run, PASS!"
|
||||
diff --git a/x_close.sh b/x_close.sh
|
||||
index e287be1..3429f1a 100755
|
||||
--- a/x_close.sh
|
||||
+++ b/x_close.sh
|
||||
@@ -22,7 +22,7 @@
|
||||
#
|
||||
function close_proc(){
|
||||
echo "kill process Xorg"
|
||||
-XPID=$( ps ax | awk '{print $1, $5}' | egrep "X$|Xorg$" | awk '{print $1}')
|
||||
+XPID=$( ps | awk '{print $1, $6}' | egrep "X$|Xorg$" | awk '{print $1}')
|
||||
if [ ! -z "$XPID" ]; then
|
||||
kill $XPID
|
||||
sleep 4
|
||||
diff --git a/x_start.sh b/x_start.sh
|
||||
index 9cf6eab..2305796 100755
|
||||
--- a/x_start.sh
|
||||
+++ b/x_start.sh
|
||||
@@ -24,7 +24,7 @@
|
||||
X_ERROR=0
|
||||
|
||||
#test whether X has started
|
||||
-PXID=$(ps ax |awk '{print $1,$5}' |egrep "Xorg$|X$" |grep -v grep | awk '{print $1}')
|
||||
+PXID=$(ps |awk '{print $1,$6}' |egrep "Xorg$|X$" |grep -v grep | awk '{print $1}')
|
||||
if [ ! -z "$PXID" ]; then
|
||||
echo "[WARNING] Xorg has started!"
|
||||
XORG_STATUS="started"
|
||||
@@ -35,9 +35,11 @@ else
|
||||
#start up the x server
|
||||
echo "Start up the X server for test in display $DISPLAY................"
|
||||
|
||||
- $XORG_DIR/bin/X >/dev/null 2>&1 &
|
||||
+ #$XORG_DIR/bin/X >/dev/null 2>&1 &
|
||||
+ #sleep 8
|
||||
+ #xterm &
|
||||
+ /etc/init.d/xserver-nodm start &
|
||||
sleep 8
|
||||
- xterm &
|
||||
fi
|
||||
XLOG_FILE=/var/log/Xorg.0.log
|
||||
[ -f $XORG_DIR/var/log/Xorg.0.log ] && XLOG_FILE=$XORG_DIR/var/log/Xorg.0.log
|
||||
@@ -54,7 +56,7 @@ fi
|
||||
X_ERROR=1
|
||||
fi
|
||||
|
||||
- XPID=$( ps ax | awk '{print $1, $5}' | egrep "X$|Xorg$" |grep -v grep| awk '{print $1}')
|
||||
+ XPID=$( ps | awk '{print $1, $6}' | egrep "X$|Xorg$" |grep -v grep| awk '{print $1}')
|
||||
if [ -z "$XPID" ]; then
|
||||
echo "Start up X server FAIL!"
|
||||
echo
|
||||
########
|
||||
@@ -0,0 +1,626 @@
|
||||
[
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.boot_and_install_from_USB",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "plugin usb which contains live image burned",
|
||||
"expected_results": "User can choose install system from usb stick onto harddisk from boot menu or command line option \n"
|
||||
},
|
||||
"2": {
|
||||
"action": "configure device BIOS to firstly boot from USB if necessary",
|
||||
"expected_results": "Installed system can boot up"
|
||||
},
|
||||
"3": {
|
||||
"action": "boot the device and select option \"Install\" from boot menu",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "proceed through default install process",
|
||||
"expected_results": ""
|
||||
},
|
||||
"5": {
|
||||
"action": "Remove USB, and reboot into new installed system. \nNote: If installation was successfully completed and received this message \"\"(sdx): Volume was not properly unmounted...Please run fsck.\"\" ignore it because this was allowed according to bug 9652.",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "boot_and_install_from_USB"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.live_boot_from_USB",
|
||||
"author": [
|
||||
{
|
||||
"email": "juan.fernandox.ramos.frayle@intel.com",
|
||||
"name": "juan.fernandox.ramos.frayle@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Plugin usb which contains live image burned.",
|
||||
"expected_results": "User can choose boot from live image on usb stick from boot menu or command line option"
|
||||
},
|
||||
"2": {
|
||||
"action": "Configure device BIOS to firstly boot from USB if necessary.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Reboot the device and boot from USB stick.",
|
||||
"expected_results": "Live image can boot up with usb stick"
|
||||
}
|
||||
},
|
||||
"summary": "live_boot_from_USB"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.switch_among_multi_applications_and_desktop",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "launch several applications(like contacts, file manager, notes, etc)",
|
||||
"expected_results": "user could switch among multi applications and desktop"
|
||||
},
|
||||
"2": {
|
||||
"action": "launch terminal",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "switch among multi applications and desktop",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "close applications \nNote: The case is for sato image only. ",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "switch_among_multi_applications_and_desktop"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.connman_offline_mode_in_connman-gnome",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Launch connman-properties after system booting \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "choose \"offline mode\" and check the connection of all network interfaces ",
|
||||
"expected_results": "All connection should be off after clicking \"offline mode\" . "
|
||||
}
|
||||
},
|
||||
"summary": "connman_offline_mode_in_connman-gnome"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.check_CPU_utilization_after_standby",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Start up system",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "run \"top\" command and check if there is any process eating CPU time",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "make system into standby and resume it",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "run \"top\" command and check if there is any difference with the data before standby",
|
||||
"expected_results": "There should be no big difference before/after standby with \"top\" . "
|
||||
}
|
||||
},
|
||||
"summary": "check_CPU_utilization_after_standby"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.Add_multiple_files_in_media_player",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Launch media player",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Add multiple files(5 files) in media player at same time (ogg or wav)",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Verify the sound.",
|
||||
"expected_results": "Media player should be OK with this action, it reproduce files correctly."
|
||||
}
|
||||
},
|
||||
"summary": "Add_multiple_files_in_media_player"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.video_-_libva_check_(ogg_video_play)",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "check if libva is installed on system (or libogg)",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "copy sample ogg file to system",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "launch media player can play the ogg file",
|
||||
"expected_results": "ogg file can be played without problem when libva is used (or libogg) "
|
||||
}
|
||||
},
|
||||
"summary": "video_-_libva_check_(ogg_video_play)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.media_player_-_play_video_(ogv)",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "copy sample ogv file to system",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "launch media player and make sure it can play the ogv file",
|
||||
"expected_results": "ogv file can be played without problem"
|
||||
}
|
||||
},
|
||||
"summary": "media_player_-_play_video_(ogv)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.media_player_-_stop/play_button_(ogv)",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "copy sample ogv file to system",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "launch media player can play the ogv file",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "click \"stop\" button to stop playing",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "click \"start\" button to resume playing",
|
||||
"expected_results": "ogv file can be start/stop without problem"
|
||||
}
|
||||
},
|
||||
"summary": "media_player_-_stop/play_button_(ogv)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.audio_-_play_(ogg)_with_HDMI",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "copy sample ogg file to system",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "connect system with a monitor with HDMI",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "launch media player and play the ogg file",
|
||||
"expected_results": "ogg file can be played without problem with HDMI"
|
||||
}
|
||||
},
|
||||
"summary": "audio_-_play_(ogg)_with_HDMI"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.audio_-_play_(wav)_with_HDMI",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "copy sample wav file to system",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "connect system with a monitor with HDMI",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "launch media player and play the wav file",
|
||||
"expected_results": "wav file can be played without problem, with HDMI"
|
||||
}
|
||||
},
|
||||
"summary": "audio_-_play_(wav)_with_HDMI"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.Graphics_-_ABAT",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Download ABAT test suite from internal git repository, git clone git://tinderbox.sh.intel.com/git/abat",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Apply following patch to make it work on yocto environment",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Run \"./abat.sh\" to run ABAT test refer to abat.patch",
|
||||
"expected_results": "All ABAT test should pass. \nNote : If below 3 fails appears ignore them. \n- start up X server fail.. due is already up \n- module [intel_agp] \n- module [i915]"
|
||||
}
|
||||
},
|
||||
"summary": "Graphics_-_ABAT"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.Graphics_-_x11perf_-_2D",
|
||||
"author": [
|
||||
{
|
||||
"email": "alexandru.c.georgescu@intel.com",
|
||||
"name": "alexandru.c.georgescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Run \"x11perf -aa10text\" and \"x11perf -rgb10text\"",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Get the FPS result and compare it with upstream graphics data on Sandybridge",
|
||||
"expected_results": "There should not be big regression between Yocto and upstream linux . "
|
||||
}
|
||||
},
|
||||
"summary": "Graphics_-_x11perf_-_2D"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.Check_if_SATA_disk_can_work_correctly",
|
||||
"author": [
|
||||
{
|
||||
"email": "yi.zhao@windriver.com",
|
||||
"name": "yi.zhao@windriver.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Run fdisk command to create partition on SATA disk. ",
|
||||
"expected_results": "The SATA device can mount, umount, read and write. "
|
||||
},
|
||||
"2": {
|
||||
"action": "Mount/Umount \n mke2fs /dev/sda1 \n mount -t ext2 /dev/sda1 /mnt/disk \n umount /mnt/disk",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Read/Write (filesystem) \n touch /mnt/disk/test.txt \n echo abcd > /mnt/disk/test.txt \n cat /mnt/disk/test.txt",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Read/Write (raw) \n dd if=/dev/sda1 of=/tmp/test bs=1k count=1k \n This command will read 1MB from /dev/sda1 to /tmp/test",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "Check_if_SATA_disk_can_work_correctly"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.Install_and_boot_from_USB-drive_to_HDD-drive",
|
||||
"author": [
|
||||
{
|
||||
"email": "david.israelx.rodriguez.castellanos@intel.com",
|
||||
"name": "david.israelx.rodriguez.castellanos@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Get a HDD drive.",
|
||||
"expected_results": "User can choose install system from USB stick on HDD drive from boot menu or command line option \n"
|
||||
},
|
||||
"2": {
|
||||
"action": "Plugin USB which contains live image burned (USB1).",
|
||||
"expected_results": "Installed system can boot up."
|
||||
},
|
||||
"3": {
|
||||
"action": "Configure device BIOS to firstly boot from USB if necessary",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Boot the device and select option \"Install\" from boot menu.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"5": {
|
||||
"action": "Make sure that the divice in which image is going to be installed is the HDD drive.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"6": {
|
||||
"action": "Proceed through default install process.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"7": {
|
||||
"action": "Remove USB1, and reboot into new installed system.",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "Install_and_boot_from_USB-drive_to_HDD-drive"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.Install_and_boot_from_USB-drive_to_SD-drive",
|
||||
"author": [
|
||||
{
|
||||
"email": "david.israelx.rodriguez.castellanos@intel.com",
|
||||
"name": "david.israelx.rodriguez.castellanos@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Get a SD-drive with enough free space to install an image.",
|
||||
"expected_results": "User can choose install system from USB stick on SD-drive from boot menu or command line option. \n"
|
||||
},
|
||||
"2": {
|
||||
"action": "Plugin USB which contains live image burned (USB1).",
|
||||
"expected_results": "Installed system can boot up."
|
||||
},
|
||||
"3": {
|
||||
"action": "Configure device BIOS to firstly boot from USB if necessary",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Boot the device and select option \"Install\" from boot menu.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"5": {
|
||||
"action": "Make sure that the device in which image is going to be installed is the SD-drive.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"6": {
|
||||
"action": "Proceed through default install process.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"7": {
|
||||
"action": "Remove USB1, and reboot into new installed system.",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "Install_and_boot_from_USB-drive_to_SD-drive"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.Test_boot_on_serial_communication_SD",
|
||||
"author": [
|
||||
{
|
||||
"email": "juan.fernandox.ramos.frayle@intel.com",
|
||||
"name": "juan.fernandox.ramos.frayle@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "1.- Create a yocto project image in a SD card \nexample \n2 - Configure a connection like shown in the link avobe: \nhttps://wiki.yoctoproject.org/wiki/MinnowMax_board_Serial_video_connection_guide \n3 - Verify the Minow Max board is connected to the host \n4 - Boot the system to desktop \n5 - Open a Terminal and check the IP \nIn Terminal type $ifconfig\"",
|
||||
"expected_results": "Verify you can create a live image \n"
|
||||
}
|
||||
},
|
||||
"summary": "Test_boot_on_serial_communication_SD"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.Test_boot_on_serial_communication_HDD",
|
||||
"author": [
|
||||
{
|
||||
"email": "juan.fernandox.ramos.frayle@intel.com",
|
||||
"name": "juan.fernandox.ramos.frayle@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "1 - Create a yocto project image in a HDD \nexample \n2 - Configure a connection like shown in the link avobe: \nhttps://wiki.yoctoproject.org/wiki/MinnowMax_board_Serial_video_connection_guide \n3 - Verify the Minow Max board is connected to the host \n4 - Boot the system to desktop \n5 - Open a Terminal and check the IP \nIn Terminal type $ifconfig\"> ",
|
||||
"expected_results": "Verify you can create a live image \n"
|
||||
}
|
||||
},
|
||||
"summary": "Test_boot_on_serial_communication_HDD"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.Test_boot_on_serial_communication_USB",
|
||||
"author": [
|
||||
{
|
||||
"email": "juan.fernandox.ramos.frayle@intel.com",
|
||||
"name": "juan.fernandox.ramos.frayle@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "1.- Create a yocto project image in a USB \nexample <dd if= core-image-sato-sdk.hddimg of =/dev/sdb>",
|
||||
"expected_results": "Verify you can create a live image \n"
|
||||
},
|
||||
"2": {
|
||||
"action": "Configure a connection like shown in the link avobe: \nhttps://wiki.yoctoproject.org/wiki/MinnowMax_board_Serial_video_connection_guide\n\n",
|
||||
"expected_results": "Video signal is present and not delayed \n"
|
||||
},
|
||||
"3": {
|
||||
"action": " Verify the Minow Max board is connected to the host",
|
||||
"expected_results": "Verify the system boot ok and no errors are present \n"
|
||||
},
|
||||
"4": {
|
||||
"action": " Boot the system to desktop",
|
||||
"expected_results": " Check that a valid IP is retrieved"
|
||||
},
|
||||
"5": {
|
||||
"action": " Open a Terminal and check the IP \nIn Terminal type $ifconfig\" ",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "Test_boot_on_serial_communication_USB"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.Test_Seek_bar_and_volume_control",
|
||||
"author": [
|
||||
{
|
||||
"email": "juan.fernandox.ramos.frayle@intel.com",
|
||||
"name": "juan.fernandox.ramos.frayle@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Run media player and load a media file ",
|
||||
"expected_results": "Media player correctly open audio/video file \n"
|
||||
},
|
||||
"2": {
|
||||
"action": "Verify that seek and volume control are present ",
|
||||
"expected_results": "Seek bar and volume control are present \n"
|
||||
},
|
||||
"3": {
|
||||
"action": "Verify that selecting the speaker icon opens the volume control",
|
||||
"expected_results": "Volume control bar must appear \n"
|
||||
},
|
||||
"4": {
|
||||
"action": "Verify you can increase and decrease volume level with the volume control",
|
||||
"expected_results": "Volume level must be increased and decreased \n"
|
||||
},
|
||||
"5": {
|
||||
"action": "Observe that slider on the seek bar moves along with the video/audio play",
|
||||
"expected_results": "Video/audio file can be played and slider moves along with the video/audio play \n"
|
||||
},
|
||||
"6": {
|
||||
"action": "Verify you can navigate the video with the slider back and forward",
|
||||
"expected_results": "The slider can move back and forward in the seek bar \n"
|
||||
},
|
||||
"7": {
|
||||
"action": "Verify that seek and volume control are functional in full screen mode",
|
||||
"expected_results": "Press the full screen mode icon, seek bar and volume control must work fine \n"
|
||||
},
|
||||
"8": {
|
||||
"action": "Verify that pressing << or >> while playing a file makes the slide goes slow/backwards or faster",
|
||||
"expected_results": "Verify << and >> works correctly"
|
||||
}
|
||||
},
|
||||
"summary": "Test_Seek_bar_and_volume_control"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.Check_if_watchdog_can_reset_the_target_system",
|
||||
"author": [
|
||||
{
|
||||
"email": "yi.zhao@windriver.com",
|
||||
"name": "yi.zhao@windriver.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "1.Check if watchdog device exist in /dev/ directory. Run command echo 1 > /dev/watchdog and wait for 60s. Then, the target will reboot.",
|
||||
"expected_results": "The watchdog device exist in /dev/ directory and can reboot the target.\n"
|
||||
}
|
||||
},
|
||||
"summary": "Check_if_watchdog_can_reset_the_target_system"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "bsps-hw.bsps-hw.System_can_boot_up_via_NFS",
|
||||
"author": [
|
||||
{
|
||||
"email": "yi.zhao@windriver.com",
|
||||
"name": "yi.zhao@windriver.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Connect the board's first serial port to your workstation and then start up your favourite serial terminal so that you will be able to interact with the serial console. If you don't have a favourite, picocom is suggested: $ picocom /dev/ttyS0 -b 115200 ",
|
||||
"expected_results": "The system can boot up without problem\n"
|
||||
},
|
||||
"2": {
|
||||
"action": "Power up or reset the board and press a key on the terminal when prompted to get to the U-Boot command line ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Set up the environment in U-Boot: => setenv ipaddr => setenv serverip ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Download the kernel and boot: => tftp tftp $loadaddr vmlinux => bootoctlinux $loadaddr coremask=0x3 root=/dev/nfs rw nfsroot=: ip=::::edgerouter:eth0:off mtdparts=phys_mapped_flash:512k(boot0),512k(boot1),64k@3072k(eeprom)\n",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "System_can_boot_up_via_NFS"
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,96 @@
|
||||
[
|
||||
{
|
||||
"test": {
|
||||
"@alias": "build-appliance.build-appliance.Build_core-image-minimal_with_build-appliance-image",
|
||||
"author": [
|
||||
{
|
||||
"email": "corneliux.stoicescu@intel.com",
|
||||
"name": "corneliux.stoicescu@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Build with AUTOREV or download from Autobuilder an image for Yocto Build Appliance. ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Boot the image under VMWare Player. ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Build qemux86 core-image-minimal using bitbake command line in the build-appliance-image ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Launch the image just built using runqemu. ",
|
||||
"expected_results": "core-image-minimal should build and boot. "
|
||||
}
|
||||
},
|
||||
"summary": "Build_core-image-minimal_with_build-appliance-image"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "build-appliance.build-appliance.Build_a_image_without_error_(added_recipe)",
|
||||
"author": [
|
||||
{
|
||||
"email": "sstncr@gmail.com",
|
||||
"name": "sstncr@gmail.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Launch Build Appliance",
|
||||
"expected_results": "User could build a image without error and the added package is in the image"
|
||||
},
|
||||
"2": {
|
||||
"action": "Set \"Machine\" in conf/local.conf, for example, qemuarm",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Install a new package to the image, for example, acpid. Set the following line in conf/local.conf: IMAGE_INSTALL:append = \" acpid\"",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Build a image using bitbake command line, for example, bitbake core-image-minimal",
|
||||
"expected_results": ""
|
||||
},
|
||||
"5": {
|
||||
"action": "After build finished, launch the image and check if the added package built into image",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "Build_a_image_without_error_(added_recipe)."
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "build-appliance.build-appliance.Create_core-image-sato-sdk_using_build_appliance",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Build with AUTOREV or download from Autobuilder an image for Yocto Build Appliance. ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Boot the image under VMWare Player. ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Build qemux86 core-image-sato-sdk using bitbake command line in the build-appliance-image ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Launch the image just built using runqemu. ",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "Create_core-image-sato-sdk_using_build_appliance"
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,294 @@
|
||||
[
|
||||
{
|
||||
"test": {
|
||||
"@alias": "crops-default.crops-default.sdkext_eSDK_devtool_build_make",
|
||||
"author": [
|
||||
{
|
||||
"email": "francisco.j.pedraza.gonzalez@intel.com",
|
||||
"name": "francisco.j.pedraza.gonzalez@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "IMPORTANT NOTE: The firsts 5 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Create the following tree of files <crops-esdk-workdir-workspace>/sdkext/files/myapp <crops-esdk-workdir-workspace>/sdkext/files/myapp_cmake \n\n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": " Create the following files withing the myapp directory myapp.c and the Makefile. Write the following inside of each file: \n---------------------------------------- \nMakefile should contain \n\nall: myapp \n\nmyapp: myapp.o \n\t$(CC) $(LDFLAGS) $< -o $@ \n\nmyapp.o: myapp.c \n\t$(CC) $(CFLAGS) -c $< -o $@ \n\nclean: \n\trm -rf myapp.o myapp \n\n----------------------------- \nmyapp.c shold contain \n\n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n \n\treturn 0; \n} \n------------------------------------ \n\n",
|
||||
"expected_results": "be sure that the indentations on the makefile are tabs not spaces. \n\n"
|
||||
},
|
||||
"5": {
|
||||
"action": " Create the following files within the myapp_cmake directory CMakeLists.txt and myapp.c. Write the following inside each file: \n\n------------------------------------ \nCMakeLists.txt should contain: \n\ncmake_minimum_required (VERSION 2.6) \nproject (myapp) \n# The version number. \nset (myapp_VERSION_MAJOR 1) \nset (myapp_VERSION_MINOR 0) \n\n# add the executable \nadd_executable (myapp myapp.c) \n\ninstall(TARGETS myapp \nRUNTIME DESTINATION bin) \n\n------------------------------------------ \nmyapp.c should contain: \n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n\n\treturn 0; \n} \n------------------------------------------------- \n\n",
|
||||
"expected_results": "Be sure that the indentations on CMakeLists.txt is tabs not spaces."
|
||||
},
|
||||
"6": {
|
||||
"action": " source environment-setup-i586-poky-linux \n\n",
|
||||
"expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
|
||||
},
|
||||
"7": {
|
||||
"action": " run command which devtool \n\n",
|
||||
"expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n "
|
||||
},
|
||||
"8": {
|
||||
"action": "devtool add myapp <directory>(this is myapp dir) \n\n\n",
|
||||
"expected_results": "The directory you should input is the myapp directory. This should automatically create the recipe myapp.bb under <crops-esdk-workdir-workspace>/recipes/myapp/myapp.bb"
|
||||
},
|
||||
"9": {
|
||||
"action": " devtool build myapp \n\n",
|
||||
"expected_results": "This should compile an image"
|
||||
},
|
||||
"10": {
|
||||
"action": " devtool reset myapp ",
|
||||
"expected_results": "This cleans sysroot of the myapp recipe, but it leaves the source tree intact. meaning it does not erase."
|
||||
}
|
||||
},
|
||||
"summary": "sdkext_eSDK_devtool_build_make"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "crops-default.crops-default.sdkext_devtool_build_esdk_package",
|
||||
"author": [
|
||||
{
|
||||
"email": "francisco.j.pedraza.gonzalez@intel.com",
|
||||
"name": "francisco.j.pedraza.gonzalez@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "IMPORTANT NOTE: The firsts 5 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": " Create the following tree of files <crops-esdk-workdir-workspace>/sdkext/files/myapp/ \n <crops-esdk-workdir-workspace>/sdkext/files/myapp_cmake \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": " Create the following files withing the myapp directory myapp.c and the Makefile. Write the following inside of each file: \n---------------------------------------- \nMakefile should contain \n\nall: myapp \n\nmyapp: myapp.o \n\t$(CC) $(LDFLAGS) $< -o $@ \n\nmyapp.o: myapp.c \n\t$(CC) $(CFLAGS) -c $< -o $@ \n\nclean: \n\trm -rf myapp.o myapp \n\n----------------------------- \nmyapp.c shold contain \n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n \n\treturn 0; \n} \n------------------------------------ \n\n",
|
||||
"expected_results": "be sure that the indentations on the makefile are tabs not spaces. \n\n"
|
||||
},
|
||||
"5": {
|
||||
"action": " Create the following files within the myapp_cmake directory CMakeLists.txt and myapp.c. Write the following inside each file: \n\n------------------------------------ \nCMakeLists.txt should contain: \n\ncmake_minimum_required (VERSION 2.6) \nproject (myapp) \n# The version number. \nset (myapp_VERSION_MAJOR 1) \nset (myapp_VERSION_MINOR 0) \n\n# add the executable \nadd_executable (myapp myapp.c) \n\ninstall(TARGETS myapp \nRUNTIME DESTINATION bin) \n\n------------------------------------------ \nmyapp.c should contain: \n\n#include<stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n\n\treturn 0; \n} \n------------------------------------------------- \n\n",
|
||||
"expected_results": "Be sure that the indentations on CMakeLists.txt is tabs not spaces. \n\n"
|
||||
},
|
||||
"6": {
|
||||
"action": " source environment-setup-i586-poky-linux \n\n",
|
||||
"expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
|
||||
},
|
||||
"7": {
|
||||
"action": " run command which devtool \n\n",
|
||||
"expected_results": " this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
|
||||
},
|
||||
"8": {
|
||||
"action": " devtool add myapp <directory> (this is myapp dir) \n\n",
|
||||
"expected_results": " The directory you should input is the myapp directory. This should automatically create the recipe myapp.bb under <crops-esdk-workdir-workspace>/recipes/myapp/myapp.bb \n\n"
|
||||
},
|
||||
"9": {
|
||||
"action": " devtool package myapp \n\n",
|
||||
"expected_results": " you should expect a package creation of myapp and it should be under the /tmp/deploy/ \n\n"
|
||||
},
|
||||
"10": {
|
||||
"action": " devtool reset myapp ",
|
||||
"expected_results": "This cleans sysroot of the myapp recipe, but it leaves the source tree intact. meaning it does not erase.\n</package_format>"
|
||||
}
|
||||
},
|
||||
"summary": "sdkext_devtool_build_esdk_package"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "crops-default.crops-default.sdkext_devtool_build_cmake",
|
||||
"author": [
|
||||
{
|
||||
"email": "francisco.j.pedraza.gonzalez@intel.com",
|
||||
"name": "francisco.j.pedraza.gonzalez@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "IMPORTANT NOTE: The firsts 5 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": " Create the following tree of files <crops-esdk-workdir-workspace>/sdkext/files/myapp \n <crops-esdk-workdir-workspace>/sdkext/files/myapp_cmake \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": " Create the following files withing the myapp directory myapp.c and the Makefile. Write the following inside of each file: \n---------------------------------------- \nMakefile should contain \n\nall: myapp \n\nmyapp: myapp.o \n\t$(CC) $(LDFLAGS) $< -o $@ \n\nmyapp.o: myapp.c \n\t$(CC) $(CFLAGS) -c $< -o $@ \n\nclean: \n\trm -rf myapp.o myapp \n\n----------------------------- \nmyapp.c shold contain \n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n \n\treturn 0; \n} \n------------------------------------ \n\n",
|
||||
"expected_results": "be sure that the indentations on the makefile are tabs not spaces. \n\n"
|
||||
},
|
||||
"5": {
|
||||
"action": " Create the following files within the myapp_cmake directory CMakeLists.txt and myapp.c. Write the following inside each file: \n\n------------------------------------ \nCMakeLists.txt should contain: \n\ncmake_minimum_required (VERSION 2.6) \nproject (myapp) \n# The version number. \nset (myapp_VERSION_MAJOR 1) \nset (myapp_VERSION_MINOR 0) \n\n# add the executable \nadd_executable (myapp myapp.c) \n\ninstall(TARGETS myapp \nRUNTIME DESTINATION bin) \n\n------------------------------------------ \nmyapp.c should contain: \n\n#include \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n\n\treturn 0; \n} \n------------------------------------------------- \n\n",
|
||||
"expected_results": "Be sure that the indentations on CMakeLists.txt is tabs not spaces. \n\n"
|
||||
},
|
||||
"6": {
|
||||
"action": " source environment-setup-i586-poky-linux \n\n",
|
||||
"expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
|
||||
},
|
||||
"7": {
|
||||
"action": " run command which devtool \n\n",
|
||||
"expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
|
||||
},
|
||||
"8": {
|
||||
"action": " devtool add myapp <directory> (this is myapp_cmake dir) \n\n",
|
||||
"expected_results": "The directory you should input is the myapp_cmake directory. This should automatically create the recipe myapp.bb under <crops-esdk-workdir-workspace>/recipes/myapp/myapp.bb \n\n"
|
||||
},
|
||||
"9": {
|
||||
"action": " devtool build myapp \n\n",
|
||||
"expected_results": "This should compile an image \n\n"
|
||||
},
|
||||
"10": {
|
||||
"action": " devtool reset myapp ",
|
||||
"expected_results": "This cleans sysroot of the myapp recipe, but it leaves the source tree intact. meaning it does not erase. "
|
||||
}
|
||||
},
|
||||
"summary": "sdkext_devtool_build_cmake"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "crops-default.crops-default.sdkext_extend_autotools_recipe_creation",
|
||||
"author": [
|
||||
{
|
||||
"email": "francisco.j.pedraza.gonzalez@intel.com",
|
||||
"name": "francisco.j.pedraza.gonzalez@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "IMPORTANT NOTE: The firsts 2 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": " source environment-setup-i586-poky-linux \n\n",
|
||||
"expected_results": " This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
|
||||
},
|
||||
"4": {
|
||||
"action": "run command which devtool \n\n",
|
||||
"expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
|
||||
},
|
||||
"5": {
|
||||
"action": "devtool sdk-install -s libxml2 \n\n",
|
||||
"expected_results": "this should install libxml2 \n\n"
|
||||
},
|
||||
"6": {
|
||||
"action": "devtool add librdfa https://github.com/rdfa/librdfa \n\n",
|
||||
"expected_results": "This should automatically create the recipe librdfa.bb under /recipes/librdfa/librdfa.bb \n\n"
|
||||
},
|
||||
"7": {
|
||||
"action": "devtool build librdfa \n\n",
|
||||
"expected_results": "This should compile \n\n"
|
||||
},
|
||||
"8": {
|
||||
"action": "devtool reset librdfa ",
|
||||
"expected_results": "This cleans sysroot of the librdfa recipe, but it leaves the source tree intact. meaning it does not erase."
|
||||
}
|
||||
},
|
||||
"summary": "sdkext_extend_autotools_recipe_creation"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "crops-default.crops-default.sdkext_devtool_kernelmodule",
|
||||
"author": [
|
||||
{
|
||||
"email": "francisco.j.pedraza.gonzalez@intel.com",
|
||||
"name": "francisco.j.pedraza.gonzalez@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "IMPORTANT NOTE: The firsts 2 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "source environment-setup-i586-poky-linux \n\n",
|
||||
"expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n \n"
|
||||
},
|
||||
"4": {
|
||||
"action": "run command which devtool \n\n",
|
||||
"expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
|
||||
},
|
||||
"5": {
|
||||
"action": "devtool add kernel-module-hello-world https://git.yoctoproject.org/git/kernel-module-hello-world \n\n",
|
||||
"expected_results": "This should automatically create the recipe kernel-module-hello-world.bb under <crops-esdk-workdir-workspace>/recipes/kernel-module-hello-world/kernel-module-hello-world.bb "
|
||||
},
|
||||
"6": {
|
||||
"action": "devtool build kernel-module-hello-world \n\n",
|
||||
"expected_results": "This should compile an image \n\n"
|
||||
},
|
||||
"7": {
|
||||
"action": "devtool reset kernel-module-hello-world ",
|
||||
"expected_results": "This cleans sysroot of the kernel-module-hello-world recipe, but it leaves the source tree intact. meaning it does not erase."
|
||||
}
|
||||
},
|
||||
"summary": "sdkext_devtool_kernelmodule"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "crops-default.crops-default.sdkext_recipes_for_nodejs",
|
||||
"author": [
|
||||
{
|
||||
"email": "francisco.j.pedraza.gonzalez@intel.com",
|
||||
"name": "francisco.j.pedraza.gonzalez@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "IMPORTANT NOTE: The firsts 2 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\nlets say variable npm = npm://registry.npmjs.org;name=winston;version=2.2.0 \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "source environment-setup-i586-poky-linux \n\n",
|
||||
"expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
|
||||
},
|
||||
"4": {
|
||||
"action": "run command which devtool \n\n",
|
||||
"expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
|
||||
},
|
||||
"5": {
|
||||
"action": " 4a) git clone git://git.openembedded.org/meta-openembedded in layers/build directory \n \n4b) Add meta-openembedded/meta-oe in bblayer.conf as mentioned below: ${SDKBASEMETAPATH}/layers/build/meta-openembedded/meta-oe \\ \n\n4c) devtool add \"npm://registry.npmjs.org;name=npm;version=2.2.0\" \n\n",
|
||||
"expected_results": " This should automatically create the recipe npm.bb under /recipes/npm/npm.bb \n\n"
|
||||
},
|
||||
"6": {
|
||||
"action": "devtool build npm \n\n",
|
||||
"expected_results": "This should compile an image \n\n"
|
||||
},
|
||||
"7": {
|
||||
"action": " devtool reset npm",
|
||||
"expected_results": "This cleans sysroot of the npm recipe, but it leaves the source tree intact. meaning it does not erase."
|
||||
}
|
||||
},
|
||||
"summary": "sdkext_recipes_for_nodejs"
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,322 @@
|
||||
[
|
||||
{
|
||||
"test": {
|
||||
"@alias": "eclipse-plugin.eclipse-plugin.support_SSH_connection_to_Target",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "In Eclipse, swich to Remote System Explorer to create a connention baseed on SSH, input the remote target IP address as the Host name, make sure disable the proxy in Window->Preferences->General->Network Connection, set Direct as Active Provider field. ",
|
||||
"expected_results": "the connection based on SSH could be set up."
|
||||
},
|
||||
"2": {
|
||||
"action": "Configure connection from Eclipse: Run->Run Configurations->C/C++ Remote Application\\ ->New Connection->General->SSH Only ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Then right click to connect, input the user ID and password. ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "expand the connection, it will show the Sftp Files etc. \nNOTE. Might need to change dropbear to openssh and add the packagegroup-core-eclipse-debug recipe",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "support_SSH_connection_to_Target"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "eclipse-plugin.eclipse-plugin.Launch_QEMU_from_Eclipse",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Set the Yocto ADT's toolchain root location, sysroot location and kernel, in the menu Window -> Preferences -> Yocto ADT. \n \n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "wget https://downloads.yoctoproject.org/releases/yocto/yocto-$VERSION/machines/qemu/qemux86/ (ex:core-image-sato-sdk-qemux86-date-rootfs-tar-bz2) \nsource /opt/poky/version/environment-setup-i585-poky-linux \n\nExtract qemu with runqemu-extract-sdk /home/user/file(ex.core-image-sato-sdk-qemux86.bz2) \n/home/user/qemux86-sato-sdk \n\n",
|
||||
"expected_results": " Qemu can be lauched normally."
|
||||
},
|
||||
"3": {
|
||||
"action": "(a)Point to the Toolchain: \n \nIf you are using a stand-alone pre-built toolchain, you should be pointing to the /opt/poky/{test-version} directory as Toolchain Root Location. This is the default location for toolchains installed by the ADT Installer or by hand. If ADT is installed in other location, use that location as Toolchain location.\nIf you are using a system-derived toolchain, the path you provide for the Toolchain Root Location field is the Yocto Project's build directory. \n \n E.g:/home/user/yocto/poky/build \n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "(b)Specify the Sysroot Location: \nSysroot Location is the location where the root filesystem for the target hardware is created on the development system by the ADT Installer (SYSROOT in step 2 of the case ADT installer Installation). \n \n Local : e.g: /home/user/qemux86-sato-sdk \nUsing ADT : e.g :/home/user/test-yocto/qemux86 \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"5": {
|
||||
"action": "(c)Select the Target Architecture: \n \nThe target architecture is the type of hardware you are going to use or emulate. Use the pull-down Target Architecture menu to make your selection. \n \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"6": {
|
||||
"action": "(d) QEMU: \nSelect this option if you will be using the QEMU emulator. Specify the Kernel matching the QEMU architecture you are using. \n wget https://downloads.yoctoproject.org/releases/yocto/yocto-$VERSION/machines/qemu/qemux86/bzImage-qemux86.bin \n e.g: /home/$USER/yocto/adt-installer/download_image/bzImage-qemux86.bin \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"7": {
|
||||
"action": "(e) select OK to save the settings. \n\n\n1: In the Eclipse toolbar, expose the Run -> External Tools menu. Your image should appear as a selectable menu item. \n2: Select your image in the navigation pane to launch the emulator in a new window. \n3: If needed, enter your host root password in the shell window at the prompt. This sets up a Tap 0 connection needed for running in user-space NFS mode. \n",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "Launch_QEMU_from_Eclipse"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "eclipse-plugin.eclipse-plugin.Relocatable_SDK_-_C_-_Build_Hello_World_ANSI_C_Autotools_Project",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Launch a QEMU of target environment.(Reference to case \"ADT - Launch qemu by eclipse\") ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Select File -> New -> Project.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Double click C/C++.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Click C or C++ Project to create the project.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"5": {
|
||||
"action": "Expand Yocto ADT Project.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"6": {
|
||||
"action": "Select Hello World ANSI C Autotools Project.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"7": {
|
||||
"action": "Put a name in the Project name. Do not use hyphens as part of the name. \n \n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"8": {
|
||||
"action": "Click Next.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"9": {
|
||||
"action": "Add information in the Author and Copyright notice fields. \n1",
|
||||
"expected_results": ""
|
||||
},
|
||||
"10": {
|
||||
"action": "Click Finish. \n1",
|
||||
"expected_results": ""
|
||||
},
|
||||
"11": {
|
||||
"action": "If the \"open perspective\" prompt appears, click \"Yes\" so that you open the C/C++ perspective. \n1",
|
||||
"expected_results": ""
|
||||
},
|
||||
"12": {
|
||||
"action": "In the Project Explorer window, right click the project -> Reconfigure project. \n1",
|
||||
"expected_results": ""
|
||||
},
|
||||
"13": {
|
||||
"action": "In the Project Explorer window, right click the project -> Build project. \n1",
|
||||
"expected_results": "Under the Project files, a new folder appears called Binaries. This indicates that the compilation have been successful and the project binary have been created. \n"
|
||||
},
|
||||
"14": {
|
||||
"action": "Right click it again and Run as -> Run Configurations. \n\t\t\tUnder Run Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \n\t\t\tin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application. e.g.: /home/root/myapplication \n\t\t\tIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button. \n1",
|
||||
"expected_results": "step 14 to step 16 -> Build succeed and the console outputs Hello world, you can also check the output on target."
|
||||
},
|
||||
"15": {
|
||||
"action": "After all settings are done, select the Run button on the bottom right corner \n\n1",
|
||||
"expected_results": ""
|
||||
},
|
||||
"16": {
|
||||
"action": "Repeat the steps 14-15, but instead of using Run Configurations use Debug Configurations: \nRight click it again and Debug as -> Debug Configurations \nUnder Debug Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \nin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application.\ne.g.: /home/root/myapplication \nIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button \n1",
|
||||
"expected_results": ""
|
||||
},
|
||||
"17": {
|
||||
"action": "After all settings are done, select the Debug button on the bottom right corner",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "Relocatable_SDK_-_C_-_Build_Hello_World_ANSI_C_Autotools_Project"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "eclipse-plugin.eclipse-plugin.Relocatable_SDK_-_C++_-_Build_Hello_World_C++_Autotools_project",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Launch a QEMU of target environment.(Reference to case \"ADT - Launch qemu by eclipse\") ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Select File -> New -> Project. ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Double click C/C++. ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Click C or C++ Project to create the project. ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"5": {
|
||||
"action": "Expand Yocto ADT Project. ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"6": {
|
||||
"action": "Select Hello World ANSI C++ Autotools Project. ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"7": {
|
||||
"action": "Put a name in the Project name. Do not use hyphens as part of the name. \n \n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"8": {
|
||||
"action": "Click Next.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"9": {
|
||||
"action": "Add information in the Author and Copyright notice fields.",
|
||||
"expected_results": ""
|
||||
},
|
||||
"10": {
|
||||
"action": "Click Finish. \n1",
|
||||
"expected_results": ""
|
||||
},
|
||||
"11": {
|
||||
"action": "If the \"open perspective\" prompt appears, click \"Yes\" so that you open the C/C++ perspective. \n1",
|
||||
"expected_results": ""
|
||||
},
|
||||
"12": {
|
||||
"action": "In the Project Explorer window, right click the project -> Reconfigure project. \n1",
|
||||
"expected_results": ""
|
||||
},
|
||||
"13": {
|
||||
"action": "In the Project Explorer window, right click the project -> Build project. \n\n1",
|
||||
"expected_results": "under the Project files, a new folder appears called Binaries. This indicates that the compilation have been successful and the project binary have been created. \n"
|
||||
},
|
||||
"14": {
|
||||
"action": "Right click it again and Run as -> Run Configurations. \n\t\t\tUnder Run Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \n\t\t\tin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application. e.g.: /home/root/myapplication \n\t\t\tIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button. \n1",
|
||||
"expected_results": "step 14 to step 16 -> Build succeed and the console outputs Hello world, you can also check the output on target."
|
||||
},
|
||||
"15": {
|
||||
"action": "After all settings are done, select the Run button on the bottom right corner \n\n1",
|
||||
"expected_results": ""
|
||||
},
|
||||
"16": {
|
||||
"action": "Repeat the steps 14-15, but instead of using Run Configurations use Debug Configurations: \n\t\tRight click it again and Debug as -> Debug Configurations \n\t\tUnder Debug Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \n\t\tin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application. \n\t\te.g.: /home/root/myapplication \n\t\tIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button \n1",
|
||||
"expected_results": ""
|
||||
},
|
||||
"17": {
|
||||
"action": "After all settings are done, select the Debug button on the bottom right corner",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "Relocatable_SDK_-_C++_-_Build_Hello_World_C++_Autotools_project"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "eclipse-plugin.eclipse-plugin.Build_Eclipse_Plugin_from_source",
|
||||
"author": [
|
||||
{
|
||||
"email": "laurentiu.serban@intel.com",
|
||||
"name": "laurentiu.serban@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Clone eclipse-poky source. \n \n - git clone git://git.yoctoproject.org/eclipse-poky \n\n",
|
||||
"expected_results": "Eclipse plugin is successfully installed \n\nDocumentation is there. For example if you have release yocto-2.0.1 you will found on https://downloads.yoctoproject.org/releases/yocto/yocto-2.0.1/eclipse-plugin/mars/ archive with documentation like org.yocto.doc-development-$date.zip \n \n"
|
||||
},
|
||||
"2": {
|
||||
"action": "Checkout correct tag. \n\n - git checkout <eclipse-version>/<yocto-version> \n\n",
|
||||
"expected_results": "After plugin is build you must have 4 archive in foder scripts from eclipse-poky: \n - org.yocto.bc - mars-master-$date.zip \n - org.yocto.doc - mars-master-$date.zip --> documentation \n - org.yocto.sdk - mars-master-$date.zip \n - org.yocto.sdk - mars-master-$date.-archive.zip --> plugin "
|
||||
},
|
||||
"3": {
|
||||
"action": "Move to scripts/ folder. \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Run ./setup.sh \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"5": {
|
||||
"action": "When the script finishes, it prompts a command to issue to build the plugin. It should look similar to the following: \n\n$ ECLIPSE_HOME=/eclipse-poky/scripts/eclipse ./build.sh /&1 | tee -a build.log \n\nHere, the three arguments to the build script are tag name, branch for documentation and release name. \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"6": {
|
||||
"action": "On an eclipse without the Yocto Plugin, select \"Install New Software\" from Help pull-down menu \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"7": {
|
||||
"action": "Select Add and from the dialog choose Archive... Look for the *archive.zip file that was built previously with the build.sh script. Click OK. \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"8": {
|
||||
"action": "Select all components and proceed with Installation of plugin. Restarting eclipse might be required.\n",
|
||||
"expected_results": ""
|
||||
}
|
||||
},
|
||||
"summary": "Build_Eclipse_Plugin_from_source"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "eclipse-plugin.eclipse-plugin.Eclipse_Poky_installation_and_setup",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Install SDK \n\ta)Download https://autobuilder.yocto.io/pub/releases//toolchain/x86_64/poky-glibc-x86_64-core-\timage-sato-i586-toolchain-.sh \n\tb)Run the SDK installer and accept the default installation directory ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Install \"Eclipse IDE for C/C++ Developers\" Oxygen release (4.7.0) \n\ta) Go to https://www.eclipse.org/downloads/packages/all, click \"Oxygen R\" \n\tb) Click to download the build for your OS \n\tc) Click \"Download\" button to download from a mirror \n\td) Run \"tar xf\" to extract the downloaded archive ",
|
||||
"expected_result": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "Install \"Eclipse IDE for C/C++ Developers\" Oxygen release (4.7.0) (Continue) \n\te) Run \"eclipse/eclipse\" to start Eclipse \n\tf) Optional step for host machine within Intel network: In Eclipse workbench window, go to \"Window\" menu -> \"Preferences...\". \n\tg) In \"Preferences\" dialog, go to \"General\" -> \"Network Connections\", set \"Active Provider\" to \"Manual\". In \"Proxy \tentries\" table, select HTTP and click \"Edit\" and enter host \"proxy-chain.intel.com\" port 911, click OK. Repeat for HTTPS with port 912 \nClick OK to close \"Preferences\" dialog. \n\th) Go to \"File\" menu -> \"Restart\" to restart Eclipse for proxy settings to take effect. ",
|
||||
"expected_result": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Install Eclipse Poky plugins \n\ta) Download https://autobuilder.yocto.io/pub/releases/<yocto-version>/eclipse-plugin/<eclipse-version>/org.yocto.sdk-development-<date>-archive.zip \n\tb) In Eclipse workbench window, go to \"Help\" menu -> \"Install New Software...\" \n\tc) In \"Install\" dialog, click \"Add...\" button \n\td) In \"Add Repository\" dialog, enter \"Eclipse Poky\" for (repository) Name, click \"Archive...\" ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"5": {
|
||||
"action": "Install Eclipse Poky plugins (continue) \n\te) In \"Repository archive\" browse dialog, select the downloaded Eclipse Poky repository archive \n\tf) Back in \"Add Repository\" dialog, click \"OK\" \n\tg) Back in \"Install\" dialog, make sure \"Work with:\" is set to \"Eclipse Poky\" repository, tick \"Yocto Project \tDocumentation Plug-in\" and \"Yocto Project SDK Plug-in\", click \"Next >\" and verify plugins/features name/version, \tclick \"Next >\" and accept license agreement, click \"Finish\" \n\th) If \"Security Warning\" dialog appears, click \"OK\" to install unsigned content. \n\ti) In \"Software Updates\" dialog, click \"Yes\" to restart Eclipse to complete Eclipse Poky plugins installation. ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"6": {
|
||||
"action": "Setup Eclipse Poky to use SDK \n\ta) In Eclipse workbench window, go to \"Window\" menu -> \"Preferences\". \n\tb) In \"Preferences\" window, go to \"Yocto Project SDK\", in \"Cross Compiler Options\" frame, select \"Standalone pre-\tbuilt toolchain\". ",
|
||||
"expected_results": "Eclipse Poky plugins installed and running successfully, e.g. observe that \"Yocto Project Tools\" menu is available on Eclipse workbench window."
|
||||
}
|
||||
},
|
||||
"summary": "Eclipse_Poky_installation_and_setup"
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,200 @@
|
||||
[
|
||||
{
|
||||
"test": {
|
||||
"@alias": "kernel-dev.kernel-dev.Kernel_dev_defconfig",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_7 ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_7",
|
||||
"expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_7"
|
||||
}
|
||||
},
|
||||
"summary": "Kernel_dev_defconfig"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "kernel-dev.kernel-dev.Kernel_dev_defconfig+fragments",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_8 ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_8",
|
||||
"expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_8"
|
||||
}
|
||||
},
|
||||
"summary": "Kernel_dev_defconfig+fragments"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "kernel-dev.kernel-dev.Kernel_dev_Applying_patches",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps",
|
||||
"expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results"
|
||||
}
|
||||
},
|
||||
"summary": "Kernel_dev_Applying_patches"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "kernel-dev.kernel-dev.Kernel_dev_linux-yocto-local-source",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_2 ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_2",
|
||||
"expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_2"
|
||||
}
|
||||
},
|
||||
"summary": "Kernel_dev_linux-yocto-local-source"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "kernel-dev.kernel-dev.Kernel_dev_linux-yocto-custom-local-source",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_3 ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_3",
|
||||
"expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_3"
|
||||
}
|
||||
},
|
||||
"summary": "Kernel_dev_linux-yocto-custom-local-source"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "kernel-dev.kernel-dev.Kernel_dev_recipe-space_meta",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_5 ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_5",
|
||||
"expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_5"
|
||||
}
|
||||
},
|
||||
"summary": "Kernel_dev_recipe-space_meta"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "kernel-dev.kernel-dev.Kernel_dev_External_source",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_6 ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_6",
|
||||
"expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_6"
|
||||
}
|
||||
},
|
||||
"summary": "Kernel_dev_External_source"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "kernel-dev.kernel-dev.Kernel_dev_building_external_modules(hello-mod)",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_10 ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup_10",
|
||||
"expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_10"
|
||||
}
|
||||
},
|
||||
"summary": "Kernel_dev_building_external_modules(hello-mod)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "kernel-dev.kernel-dev.Kernel_dev_local_parallel_meta",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wikioproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_4 ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_4",
|
||||
"expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_4"
|
||||
}
|
||||
},
|
||||
"summary": "Kernel_dev_local_parallel_meta"
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,158 @@
|
||||
[
|
||||
{
|
||||
"test": {
|
||||
"@alias": "oe-core.bitbake.Test_bitbake_menuconfig",
|
||||
"author": [
|
||||
{
|
||||
"email": "jose.perez.carranza@intel.com",
|
||||
"name": "jose.perez.carranza@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "clone poky \n \n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "cd poky \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "source oe-init-build-env && cd build \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "set below in local.conf \n\n \tMACHINE = \"qemux86\" \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"5": {
|
||||
"action": "bitbake linux-yocto -c kernel_configme -f \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"6": {
|
||||
"action": "bitbake linux-yocto -c menuconfig \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"7": {
|
||||
"action": "Once menuconfig launches, use the interface to navigate through the selections and \n enable option \"64-bit kernel\" \n\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"8": {
|
||||
"action": "Save changes and set name of the file as \"test.config\" ",
|
||||
"expected_results": "Open file: \n \npoky/build//tmp/work/qemux86-poky-linux/linux-yocto/4.X.X+*/linux-qemux86-standard-build/test.config \n \n \n\nand verify that changes are present in the file as follows: \n \nCONFIG_64BIT=y \n \nCONFIG_X86_64=y"
|
||||
}
|
||||
},
|
||||
"summary": "Test_bitbake_menuconfig"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "oe-core.bitbake.test_bitbake_devshell",
|
||||
"author": [
|
||||
{
|
||||
"email": "jose.perez.carranza@intel.com",
|
||||
"name": "jose.perez.carranza@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "clone poky ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "cd poky ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "source oe-init-build-env && cd build ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "bitbake matchbox-desktop ",
|
||||
"expected_results": "Package was build correctly "
|
||||
},
|
||||
"5": {
|
||||
"action": "bitbake matchbox-desktop -c devshell ",
|
||||
"expected_results": "A terminal with a shell prompt within the OpenEmbedded build environment is opened "
|
||||
},
|
||||
"6": {
|
||||
"action": "Verify that \"matchbox-desktop\" binary file is not created under\"src\" directory ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"7": {
|
||||
"action": "Run command:./configure ${CONFIGUREOPTS} && make ",
|
||||
"expected_results": "Verify that \"matchbox-desktop\" binary file was created successfully under \"src/\" directory "
|
||||
},
|
||||
"8": {
|
||||
"action": "Exit fromthe devshell terminal,exit ",
|
||||
"expected_results": "Terminal back to the build directory"
|
||||
}
|
||||
},
|
||||
"summary": "test_bitbake_devshell"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "oe-core.bitbake.test_dependency_explorer_is_launched",
|
||||
"author": [
|
||||
{
|
||||
"email": "jose.perez.carranza@intel.com",
|
||||
"name": "jose.perez.carranza@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "clone poky ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "cd poky ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "source oe-init-build-env ",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "bitbake -u taskexp -g core-image-full-cmdline \n\nNOTE: To execute the last command of this test, it's necessary that the machine is executing an X11 server, or if that's not the case (for example, if running the test on a headless server), it is required to enable ssh X11 forwarding on both, the server and the client, and have the X11 server running on the client. \n\nThe instructions to enable X11 forwarding vary between distributions. But for example, these are the steps to enable it between a server running openSUSE Leap 42.1 and a client with Fedora 24: \nA. On the server, make sure /home//.ssh/config contains the line: \n ForwardX11 yes \nB. On the server, make sure xauth is installed by running: \n which xauth \nC. On the client, connect to the server, enabling X11 forwarding, for example by using: \n ssh -X user@server \nNOTE 2: depexp was renamed to taskexp on 2.3 M4",
|
||||
"expected_results": "Verify that a \"dependency explorer\" is opened and file \n dependencies are listed "
|
||||
}
|
||||
},
|
||||
"summary": "test_dependency_explorer_is_launched"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test": {
|
||||
"@alias": "oe-core.bitbake.test_bitbake_sane_error_for_invalid_layer",
|
||||
"author": [
|
||||
{
|
||||
"email": "jose.perez.carranza@intel.com",
|
||||
"name": "jose.perez.carranza@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "clone poky \n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"2": {
|
||||
"action": "cd poky \n \n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"3": {
|
||||
"action": "source oe-init-build-env && cd build \n \n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"4": {
|
||||
"action": "Add a invalid layer to conf/bblayers.conf \"<poky dir>/my-invalid-layer\" \n\t\n",
|
||||
"expected_results": ""
|
||||
},
|
||||
"5": {
|
||||
"action": "bitbake core-image-minimal",
|
||||
"expected_results": "Below error should be displayed:\n\"ERROR: Layer directory does not exist! Please check BBLAYERS in <poky dir>/<build dir>/conf/bblayers.conf\""
|
||||
}
|
||||
},
|
||||
"summary": "test_bitbake_sane_error_for_invalid_layer"
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,32 @@
|
||||
[
|
||||
{
|
||||
"test": {
|
||||
"@alias": "sdk.sdk_runqemu.test_install_cross_toolchain_can_run_multiple_qemu_for_x86",
|
||||
"author": [
|
||||
{
|
||||
"email": "ee.peng.yeoh@intel.com",
|
||||
"name": "ee.peng.yeoh@intel.com"
|
||||
}
|
||||
],
|
||||
"execution": {
|
||||
"1": {
|
||||
"action": "Prepare kernel, rootfs tar.bz2 image, and qemu configuration \n \ta. Download kernel, rootfs tar.bz2 image and qemu configuration from public autobuilder webpage \n \tb. Goto https://autobuilder.yocto.io/pub/releases/<target_release>/machines/qemu/qemux86/ \n \tc. Download \n \t \ti. rootfs tar.bz2: core-image-sato-sdk-qemux86.tar.bz2 \n \t\tii. kernel: bzImage-qemux86.bin \n \t\tiii. qemu configuration: core-image-sato-sdk-qemux86.qemuboot.conf ",
|
||||
"expected_results": "Download completes successfully."
|
||||
},
|
||||
"2": {
|
||||
"action": "Download & install toolchain tarball matching your host from public autobuilder \n \ta. Goto https://autobuilder.yocto.io/pub/releases/<target_release>/toolchain/x86_64/ \n \tb. Download poky-glibc-x86_64-core-image-sato-<type-arch>-toolchain-<release-version>.sh \n \tc. Run command: poky-glibc-x86_64-core-image-sato-<type-arch>-toolchain-<release-version>.sh \n \td. After installation toolchain Run source command : source /toolchain-installed-path/environment-setup-<architecture name>-poky-linux",
|
||||
"expected_results": "Toolchain gets installed successfully."
|
||||
},
|
||||
"3": {
|
||||
"action": "Extract rootfs twice into two images \n \ta. Run 2 commands below: \n runqemu-extract-sdk core-image-sato-sdk-qemux86.tar.bz2 qemux86_rootfs_image1 \n runqemu-extract-sdk core-image-sato-sdk-qemux86.tar.bz2 qemux86_rootfs_image2",
|
||||
"expected_results": "Both images build successfully."
|
||||
},
|
||||
"4": {
|
||||
"action": " From the 2 terminals, start qemu to boot up both two images \n \ta. Run 2 commands below: \n runqemu <kernel-name> core-image-sato-sdk-qemux86.qemuboot.conf qemux86_rootfs_image1 \n runqemu <kernel-name> core-image-sato-sdk-qemux86.qemuboot.conf qemux86_rootfs_image2 ",
|
||||
"expected_results": "Expect both qemu to boot up successfully."
|
||||
}
|
||||
},
|
||||
"summary": "test_install_cross_toolchain_can_run_multiple_qemu_for:x86"
|
||||
}
|
||||
}
|
||||
]
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,598 @@
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Main unittest module used by testimage.bbclass
|
||||
# This provides the oeRuntimeTest base class which is inherited by all tests in meta/lib/oeqa/runtime.
|
||||
|
||||
# It also has some helper functions and it's responsible for actually starting the tests
|
||||
|
||||
import os, re, sys
|
||||
import unittest
|
||||
import inspect
|
||||
import subprocess
|
||||
import signal
|
||||
import shutil
|
||||
import functools
|
||||
try:
|
||||
import bb
|
||||
except ImportError:
|
||||
pass
|
||||
import logging
|
||||
|
||||
import oeqa.runtime
|
||||
# Exported test doesn't require sdkext
|
||||
try:
|
||||
import oeqa.sdkext
|
||||
except ImportError:
|
||||
pass
|
||||
from oeqa.utils.decorators import LogResults, gettag
|
||||
|
||||
logger = logging.getLogger("BitBake")
|
||||
|
||||
def getVar(obj):
|
||||
#extend form dict, if a variable didn't exists, need find it in testcase
|
||||
class VarDict(dict):
|
||||
def __getitem__(self, key):
|
||||
return gettag(obj, key)
|
||||
return VarDict()
|
||||
|
||||
def checkTags(tc, tagexp):
|
||||
return eval(tagexp, None, getVar(tc))
|
||||
|
||||
def filterByTagExp(testsuite, tagexp):
|
||||
if not tagexp:
|
||||
return testsuite
|
||||
caseList = []
|
||||
for each in testsuite:
|
||||
if not isinstance(each, unittest.BaseTestSuite):
|
||||
if checkTags(each, tagexp):
|
||||
caseList.append(each)
|
||||
else:
|
||||
caseList.append(filterByTagExp(each, tagexp))
|
||||
return testsuite.__class__(caseList)
|
||||
|
||||
@LogResults
|
||||
class oeTest(unittest.TestCase):
|
||||
|
||||
longMessage = True
|
||||
|
||||
@classmethod
|
||||
def hasPackage(self, pkg):
|
||||
"""
|
||||
True if the full package name exists in the manifest, False otherwise.
|
||||
"""
|
||||
return pkg in oeTest.tc.pkgmanifest
|
||||
|
||||
@classmethod
|
||||
def hasPackageMatch(self, match):
|
||||
"""
|
||||
True if match exists in the manifest as a regular expression substring,
|
||||
False otherwise.
|
||||
"""
|
||||
for s in oeTest.tc.pkgmanifest:
|
||||
if re.match(match, s):
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def hasFeature(self,feature):
|
||||
if feature in oeTest.tc.imagefeatures or \
|
||||
feature in oeTest.tc.distrofeatures:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
class oeRuntimeTest(oeTest):
|
||||
def __init__(self, methodName='runTest'):
|
||||
self.target = oeRuntimeTest.tc.target
|
||||
super(oeRuntimeTest, self).__init__(methodName)
|
||||
|
||||
def setUp(self):
|
||||
# Install packages in the DUT
|
||||
self.tc.install_uninstall_packages(self.id())
|
||||
|
||||
# Check if test needs to run
|
||||
if self.tc.sigterm:
|
||||
self.fail("Got SIGTERM")
|
||||
elif (type(self.target).__name__ == "QemuTarget"):
|
||||
self.assertTrue(self.target.check(), msg = "Qemu not running?")
|
||||
|
||||
self.setUpLocal()
|
||||
|
||||
# a setup method before tests but after the class instantiation
|
||||
def setUpLocal(self):
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
# Uninstall packages in the DUT
|
||||
self.tc.install_uninstall_packages(self.id(), False)
|
||||
self.tearDownLocal()
|
||||
|
||||
# Method to be run after tearDown and implemented by child classes
|
||||
def tearDownLocal(self):
|
||||
pass
|
||||
|
||||
def getmodule(pos=2):
|
||||
# stack returns a list of tuples containg frame information
|
||||
# First element of the list the is current frame, caller is 1
|
||||
frameinfo = inspect.stack()[pos]
|
||||
modname = inspect.getmodulename(frameinfo[1])
|
||||
#modname = inspect.getmodule(frameinfo[0]).__name__
|
||||
return modname
|
||||
|
||||
def skipModule(reason, pos=2):
|
||||
modname = getmodule(pos)
|
||||
if modname not in oeTest.tc.testsrequired:
|
||||
raise unittest.SkipTest("%s: %s" % (modname, reason))
|
||||
else:
|
||||
raise Exception("\nTest %s wants to be skipped.\nReason is: %s" \
|
||||
"\nTest was required in TEST_SUITES, so either the condition for skipping is wrong" \
|
||||
"\nor the image really doesn't have the required feature/package when it should." % (modname, reason))
|
||||
|
||||
def skipModuleIf(cond, reason):
|
||||
|
||||
if cond:
|
||||
skipModule(reason, 3)
|
||||
|
||||
def skipModuleUnless(cond, reason):
|
||||
|
||||
if not cond:
|
||||
skipModule(reason, 3)
|
||||
|
||||
_buffer_logger = ""
|
||||
def custom_verbose(msg, *args, **kwargs):
|
||||
global _buffer_logger
|
||||
if msg[-1] != "\n":
|
||||
_buffer_logger += msg
|
||||
else:
|
||||
_buffer_logger += msg
|
||||
try:
|
||||
bb.plain(_buffer_logger.rstrip("\n"), *args, **kwargs)
|
||||
except NameError:
|
||||
logger.info(_buffer_logger.rstrip("\n"), *args, **kwargs)
|
||||
_buffer_logger = ""
|
||||
|
||||
class TestContext(object):
|
||||
def __init__(self, d, exported=False):
|
||||
self.d = d
|
||||
|
||||
self.testsuites = self._get_test_suites()
|
||||
|
||||
if exported:
|
||||
path = [os.path.dirname(os.path.abspath(__file__))]
|
||||
extrapath = ""
|
||||
else:
|
||||
path = d.getVar("BBPATH").split(':')
|
||||
extrapath = "lib/oeqa"
|
||||
|
||||
self.testslist = self._get_tests_list(path, extrapath)
|
||||
self.testsrequired = self._get_test_suites_required()
|
||||
|
||||
self.filesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runtime/files")
|
||||
self.corefilesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files")
|
||||
self.imagefeatures = d.getVar("IMAGE_FEATURES").split()
|
||||
self.distrofeatures = d.getVar("DISTRO_FEATURES").split()
|
||||
|
||||
# get testcase list from specified file
|
||||
# if path is a relative path, then relative to build/conf/
|
||||
def _read_testlist(self, fpath, builddir):
|
||||
if not os.path.isabs(fpath):
|
||||
fpath = os.path.join(builddir, "conf", fpath)
|
||||
if not os.path.exists(fpath):
|
||||
bb.fatal("No such manifest file: ", fpath)
|
||||
tcs = []
|
||||
for line in open(fpath).readlines():
|
||||
line = line.strip()
|
||||
if line and not line.startswith("#"):
|
||||
tcs.append(line)
|
||||
return " ".join(tcs)
|
||||
|
||||
# return test list by type also filter if TEST_SUITES is specified
|
||||
def _get_tests_list(self, bbpath, extrapath):
|
||||
testslist = []
|
||||
|
||||
type = self._get_test_namespace()
|
||||
|
||||
# This relies on lib/ under each directory in BBPATH being added to sys.path
|
||||
# (as done by default in base.bbclass)
|
||||
for testname in self.testsuites:
|
||||
if testname != "auto":
|
||||
if testname.startswith("oeqa."):
|
||||
testslist.append(testname)
|
||||
continue
|
||||
found = False
|
||||
for p in bbpath:
|
||||
if os.path.exists(os.path.join(p, extrapath, type, testname + ".py")):
|
||||
testslist.append("oeqa." + type + "." + testname)
|
||||
found = True
|
||||
break
|
||||
elif os.path.exists(os.path.join(p, extrapath, type, testname.split(".")[0] + ".py")):
|
||||
testslist.append("oeqa." + type + "." + testname)
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
bb.fatal('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
|
||||
|
||||
if "auto" in self.testsuites:
|
||||
def add_auto_list(path):
|
||||
files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
|
||||
for f in files:
|
||||
module = 'oeqa.' + type + '.' + f[:-3]
|
||||
if module not in testslist:
|
||||
testslist.append(module)
|
||||
|
||||
for p in bbpath:
|
||||
testpath = os.path.join(p, 'lib', 'oeqa', type)
|
||||
bb.debug(2, 'Searching for tests in %s' % testpath)
|
||||
if os.path.exists(testpath):
|
||||
add_auto_list(testpath)
|
||||
|
||||
return testslist
|
||||
|
||||
def getTestModules(self):
|
||||
"""
|
||||
Returns all the test modules in the testlist.
|
||||
"""
|
||||
|
||||
import pkgutil
|
||||
|
||||
modules = []
|
||||
for test in self.testslist:
|
||||
if re.search("\w+\.\w+\.test_\S+", test):
|
||||
test = '.'.join(t.split('.')[:3])
|
||||
module = pkgutil.get_loader(test)
|
||||
modules.append(module)
|
||||
|
||||
return modules
|
||||
|
||||
def getModulefromID(self, test_id):
|
||||
"""
|
||||
Returns the test module based on a test id.
|
||||
"""
|
||||
|
||||
module_name = ".".join(test_id.split(".")[:3])
|
||||
modules = self.getTestModules()
|
||||
for module in modules:
|
||||
if module.name == module_name:
|
||||
return module
|
||||
|
||||
return None
|
||||
|
||||
def getTests(self, test):
|
||||
'''Return all individual tests executed when running the suite.'''
|
||||
# Unfortunately unittest does not have an API for this, so we have
|
||||
# to rely on implementation details. This only needs to work
|
||||
# for TestSuite containing TestCase.
|
||||
method = getattr(test, '_testMethodName', None)
|
||||
if method:
|
||||
# leaf case: a TestCase
|
||||
yield test
|
||||
else:
|
||||
# Look into TestSuite.
|
||||
tests = getattr(test, '_tests', [])
|
||||
for t1 in tests:
|
||||
for t2 in self.getTests(t1):
|
||||
yield t2
|
||||
|
||||
def loadTests(self):
|
||||
setattr(oeTest, "tc", self)
|
||||
|
||||
testloader = unittest.TestLoader()
|
||||
testloader.sortTestMethodsUsing = None
|
||||
suites = [testloader.loadTestsFromName(name) for name in self.testslist]
|
||||
suites = filterByTagExp(suites, getattr(self, "tagexp", None))
|
||||
|
||||
# Determine dependencies between suites by looking for @skipUnlessPassed
|
||||
# method annotations. Suite A depends on suite B if any method in A
|
||||
# depends on a method on B.
|
||||
for suite in suites:
|
||||
suite.dependencies = []
|
||||
suite.depth = 0
|
||||
for test in self.getTests(suite):
|
||||
methodname = getattr(test, '_testMethodName', None)
|
||||
if methodname:
|
||||
method = getattr(test, methodname)
|
||||
depends_on = getattr(method, '_depends_on', None)
|
||||
if depends_on:
|
||||
for dep_suite in suites:
|
||||
if depends_on in [getattr(t, '_testMethodName', None) for t in self.getTests(dep_suite)]:
|
||||
if dep_suite not in suite.dependencies and \
|
||||
dep_suite is not suite:
|
||||
suite.dependencies.append(dep_suite)
|
||||
break
|
||||
else:
|
||||
logger.warning("Test %s was declared as @skipUnlessPassed('%s') but that test is either not defined or not active. Will run the test anyway." %
|
||||
(test, depends_on))
|
||||
|
||||
# Use brute-force topological sort to determine ordering. Sort by
|
||||
# depth (higher depth = must run later), with original ordering to
|
||||
# break ties.
|
||||
def set_suite_depth(suite):
|
||||
for dep in suite.dependencies:
|
||||
new_depth = set_suite_depth(dep) + 1
|
||||
if new_depth > suite.depth:
|
||||
suite.depth = new_depth
|
||||
return suite.depth
|
||||
|
||||
for index, suite in enumerate(suites):
|
||||
set_suite_depth(suite)
|
||||
suite.index = index
|
||||
|
||||
def cmp(a, b):
|
||||
return (a > b) - (a < b)
|
||||
|
||||
def cmpfunc(a, b):
|
||||
return cmp((a.depth, a.index), (b.depth, b.index))
|
||||
|
||||
suites.sort(key=functools.cmp_to_key(cmpfunc))
|
||||
|
||||
self.suite = testloader.suiteClass(suites)
|
||||
|
||||
return self.suite
|
||||
|
||||
def runTests(self):
|
||||
logger.info("Test modules %s" % self.testslist)
|
||||
if hasattr(self, "tagexp") and self.tagexp:
|
||||
logger.info("Filter test cases by tags: %s" % self.tagexp)
|
||||
logger.info("Found %s tests" % self.suite.countTestCases())
|
||||
runner = unittest.TextTestRunner(verbosity=2)
|
||||
if 'bb' in sys.modules:
|
||||
runner.stream.write = custom_verbose
|
||||
|
||||
return runner.run(self.suite)
|
||||
|
||||
class RuntimeTestContext(TestContext):
|
||||
def __init__(self, d, target, exported=False):
|
||||
super(RuntimeTestContext, self).__init__(d, exported)
|
||||
|
||||
self.target = target
|
||||
|
||||
self.pkgmanifest = {}
|
||||
manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"),
|
||||
d.getVar("IMAGE_LINK_NAME") + ".manifest")
|
||||
nomanifest = d.getVar("IMAGE_NO_MANIFEST")
|
||||
if nomanifest is None or nomanifest != "1":
|
||||
try:
|
||||
with open(manifest) as f:
|
||||
for line in f:
|
||||
(pkg, arch, version) = line.strip().split()
|
||||
self.pkgmanifest[pkg] = (version, arch)
|
||||
except IOError as e:
|
||||
bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
|
||||
|
||||
def _get_test_namespace(self):
|
||||
return "runtime"
|
||||
|
||||
def _get_test_suites(self):
|
||||
testsuites = []
|
||||
|
||||
manifests = (self.d.getVar("TEST_SUITES_MANIFEST") or '').split()
|
||||
if manifests:
|
||||
for manifest in manifests:
|
||||
testsuites.extend(self._read_testlist(manifest,
|
||||
self.d.getVar("TOPDIR")).split())
|
||||
|
||||
else:
|
||||
testsuites = self.d.getVar("TEST_SUITES").split()
|
||||
|
||||
return testsuites
|
||||
|
||||
def _get_test_suites_required(self):
|
||||
return [t for t in self.d.getVar("TEST_SUITES").split() if t != "auto"]
|
||||
|
||||
def extract_packages(self):
|
||||
"""
|
||||
Find packages that will be needed during runtime.
|
||||
"""
|
||||
|
||||
modules = self.getTestModules()
|
||||
bbpaths = self.d.getVar("BBPATH").split(":")
|
||||
|
||||
shutil.rmtree(self.d.getVar("TEST_EXTRACTED_DIR"))
|
||||
shutil.rmtree(self.d.getVar("TEST_PACKAGED_DIR"))
|
||||
for module in modules:
|
||||
json_file = self._getJsonFile(module)
|
||||
if json_file:
|
||||
needed_packages = self._getNeededPackages(json_file)
|
||||
self._perform_package_extraction(needed_packages)
|
||||
|
||||
def _perform_package_extraction(self, needed_packages):
|
||||
"""
|
||||
Extract packages that will be needed during runtime.
|
||||
"""
|
||||
|
||||
import oe.path
|
||||
|
||||
extracted_path = self.d.getVar("TEST_EXTRACTED_DIR")
|
||||
packaged_path = self.d.getVar("TEST_PACKAGED_DIR")
|
||||
|
||||
for key,value in needed_packages.items():
|
||||
packages = ()
|
||||
if isinstance(value, dict):
|
||||
packages = (value, )
|
||||
elif isinstance(value, list):
|
||||
packages = value
|
||||
else:
|
||||
bb.fatal("Failed to process needed packages for %s; "
|
||||
"Value must be a dict or list" % key)
|
||||
|
||||
for package in packages:
|
||||
pkg = package["pkg"]
|
||||
rm = package.get("rm", False)
|
||||
extract = package.get("extract", True)
|
||||
if extract:
|
||||
dst_dir = os.path.join(extracted_path, pkg)
|
||||
else:
|
||||
dst_dir = os.path.join(packaged_path)
|
||||
|
||||
# Extract package and copy it to TEST_EXTRACTED_DIR
|
||||
pkg_dir = self._extract_in_tmpdir(pkg)
|
||||
if extract:
|
||||
|
||||
# Same package used for more than one test,
|
||||
# don't need to extract again.
|
||||
if os.path.exists(dst_dir):
|
||||
continue
|
||||
oe.path.copytree(pkg_dir, dst_dir)
|
||||
shutil.rmtree(pkg_dir)
|
||||
|
||||
# Copy package to TEST_PACKAGED_DIR
|
||||
else:
|
||||
self._copy_package(pkg)
|
||||
|
||||
def _getJsonFile(self, module):
|
||||
"""
|
||||
Returns the path of the JSON file for a module, empty if doesn't exitst.
|
||||
"""
|
||||
|
||||
module_file = module.path
|
||||
json_file = "%s.json" % module_file.rsplit(".", 1)[0]
|
||||
if os.path.isfile(module_file) and os.path.isfile(json_file):
|
||||
return json_file
|
||||
else:
|
||||
return ""
|
||||
|
||||
def _getNeededPackages(self, json_file, test=None):
|
||||
"""
|
||||
Returns a dict with needed packages based on a JSON file.
|
||||
|
||||
|
||||
If a test is specified it will return the dict just for that test.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
needed_packages = {}
|
||||
|
||||
with open(json_file) as f:
|
||||
test_packages = json.load(f)
|
||||
for key,value in test_packages.items():
|
||||
needed_packages[key] = value
|
||||
|
||||
if test:
|
||||
if test in needed_packages:
|
||||
needed_packages = needed_packages[test]
|
||||
else:
|
||||
needed_packages = {}
|
||||
|
||||
return needed_packages
|
||||
|
||||
def _extract_in_tmpdir(self, pkg):
|
||||
""""
|
||||
Returns path to a temp directory where the package was
|
||||
extracted without dependencies.
|
||||
"""
|
||||
|
||||
from oeqa.utils.package_manager import get_package_manager
|
||||
|
||||
pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
|
||||
pm = get_package_manager(self.d, pkg_path)
|
||||
extract_dir = pm.extract(pkg)
|
||||
shutil.rmtree(pkg_path)
|
||||
|
||||
return extract_dir
|
||||
|
||||
def _copy_package(self, pkg):
|
||||
"""
|
||||
Copy the RPM, DEB or IPK package to dst_dir
|
||||
"""
|
||||
|
||||
from oeqa.utils.package_manager import get_package_manager
|
||||
|
||||
pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
|
||||
dst_dir = self.d.getVar("TEST_PACKAGED_DIR")
|
||||
pm = get_package_manager(self.d, pkg_path)
|
||||
pkg_info = pm.package_info(pkg)
|
||||
file_path = pkg_info[pkg]["filepath"]
|
||||
shutil.copy2(file_path, dst_dir)
|
||||
shutil.rmtree(pkg_path)
|
||||
|
||||
def install_uninstall_packages(self, test_id, pkg_dir, install):
|
||||
"""
|
||||
Check if the test requires a package and Install/Uninstall it in the DUT
|
||||
"""
|
||||
|
||||
test = test_id.split(".")[4]
|
||||
module = self.getModulefromID(test_id)
|
||||
json = self._getJsonFile(module)
|
||||
if json:
|
||||
needed_packages = self._getNeededPackages(json, test)
|
||||
if needed_packages:
|
||||
self._install_uninstall_packages(needed_packages, pkg_dir, install)
|
||||
|
||||
def _install_uninstall_packages(self, needed_packages, pkg_dir, install=True):
|
||||
"""
|
||||
Install/Uninstall packages in the DUT without using a package manager
|
||||
"""
|
||||
|
||||
if isinstance(needed_packages, dict):
|
||||
packages = [needed_packages]
|
||||
elif isinstance(needed_packages, list):
|
||||
packages = needed_packages
|
||||
|
||||
for package in packages:
|
||||
pkg = package["pkg"]
|
||||
rm = package.get("rm", False)
|
||||
extract = package.get("extract", True)
|
||||
src_dir = os.path.join(pkg_dir, pkg)
|
||||
|
||||
# Install package
|
||||
if install and extract:
|
||||
self.target.connection.copy_dir_to(src_dir, "/")
|
||||
|
||||
# Uninstall package
|
||||
elif not install and rm:
|
||||
self.target.connection.delete_dir_structure(src_dir, "/")
|
||||
|
||||
class ImageTestContext(RuntimeTestContext):
|
||||
def __init__(self, d, target, host_dumper):
|
||||
super(ImageTestContext, self).__init__(d, target)
|
||||
|
||||
self.tagexp = d.getVar("TEST_SUITES_TAGS")
|
||||
|
||||
self.host_dumper = host_dumper
|
||||
|
||||
self.sigterm = False
|
||||
self.origsigtermhandler = signal.getsignal(signal.SIGTERM)
|
||||
signal.signal(signal.SIGTERM, self._sigterm_exception)
|
||||
|
||||
def _sigterm_exception(self, signum, stackframe):
|
||||
bb.warn("TestImage received SIGTERM, shutting down...")
|
||||
self.sigterm = True
|
||||
self.target.stop()
|
||||
|
||||
def install_uninstall_packages(self, test_id, install=True):
|
||||
"""
|
||||
Check if the test requires a package and Install/Uninstall it in the DUT
|
||||
"""
|
||||
|
||||
pkg_dir = self.d.getVar("TEST_EXTRACTED_DIR")
|
||||
super(ImageTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
|
||||
|
||||
class ExportTestContext(RuntimeTestContext):
|
||||
def __init__(self, d, target, exported=False, parsedArgs={}):
|
||||
"""
|
||||
This class is used when exporting tests and when are executed outside OE environment.
|
||||
|
||||
parsedArgs can contain the following:
|
||||
- tag: Filter test by tag.
|
||||
"""
|
||||
super(ExportTestContext, self).__init__(d, target, exported)
|
||||
|
||||
tag = parsedArgs.get("tag", None)
|
||||
self.tagexp = tag if tag != None else d.getVar("TEST_SUITES_TAGS")
|
||||
|
||||
self.sigterm = None
|
||||
|
||||
def install_uninstall_packages(self, test_id, install=True):
|
||||
"""
|
||||
Check if the test requires a package and Install/Uninstall it in the DUT
|
||||
"""
|
||||
|
||||
export_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
extracted_dir = self.d.getVar("TEST_EXPORT_EXTRACTED_DIR")
|
||||
pkg_dir = os.path.join(export_dir, extracted_dir)
|
||||
super(ExportTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
|
||||
Executable
+153
@@ -0,0 +1,153 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# This script should be used outside of the build system to run image tests.
|
||||
# It needs a json file as input as exported by the build.
|
||||
# E.g for an already built image:
|
||||
#- export the tests:
|
||||
# TEST_EXPORT_ONLY = "1"
|
||||
# TEST_TARGET = "simpleremote"
|
||||
# TEST_TARGET_IP = "192.168.7.2"
|
||||
# TEST_SERVER_IP = "192.168.7.1"
|
||||
# bitbake core-image-sato -c testimage
|
||||
# Setup your target, e.g for qemu: runqemu core-image-sato
|
||||
# cd build/tmp/testimage/core-image-sato
|
||||
# ./runexported.py testdata.json
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import argparse
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "oeqa")))
|
||||
|
||||
from oeqa.oetest import ExportTestContext
|
||||
from oeqa.utils.commands import runCmd, updateEnv
|
||||
from oeqa.utils.sshcontrol import SSHControl
|
||||
|
||||
# this isn't pretty but we need a fake target object
|
||||
# for running the tests externally as we don't care
|
||||
# about deploy/start we only care about the connection methods (run, copy)
|
||||
class FakeTarget(object):
|
||||
def __init__(self, d):
|
||||
self.connection = None
|
||||
self.ip = None
|
||||
self.server_ip = None
|
||||
self.datetime = time.strftime('%Y%m%d%H%M%S',time.gmtime())
|
||||
self.testdir = d.getVar("TEST_LOG_DIR")
|
||||
self.pn = d.getVar("PN")
|
||||
|
||||
def exportStart(self):
|
||||
self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
|
||||
sshloglink = os.path.join(self.testdir, "ssh_target_log")
|
||||
if os.path.lexists(sshloglink):
|
||||
os.remove(sshloglink)
|
||||
os.symlink(self.sshlog, sshloglink)
|
||||
print("SSH log file: %s" % self.sshlog)
|
||||
self.connection = SSHControl(self.ip, logfile=self.sshlog)
|
||||
|
||||
def run(self, cmd, timeout=None):
|
||||
return self.connection.run(cmd, timeout)
|
||||
|
||||
def copy_to(self, localpath, remotepath):
|
||||
return self.connection.copy_to(localpath, remotepath)
|
||||
|
||||
def copy_from(self, remotepath, localpath):
|
||||
return self.connection.copy_from(remotepath, localpath)
|
||||
|
||||
|
||||
class MyDataDict(dict):
|
||||
def getVar(self, key, unused = None):
|
||||
return self.get(key, "")
|
||||
|
||||
def main():
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-t", "--target-ip", dest="ip", help="The IP address of the target machine. Use this to \
|
||||
overwrite the value determined from TEST_TARGET_IP at build time")
|
||||
parser.add_argument("-s", "--server-ip", dest="server_ip", help="The IP address of this machine. Use this to \
|
||||
overwrite the value determined from TEST_SERVER_IP at build time.")
|
||||
parser.add_argument("-d", "--deploy-dir", dest="deploy_dir", help="Full path to the package feeds, that this \
|
||||
the contents of what used to be DEPLOY_DIR on the build machine. If not specified it will use the value \
|
||||
specified in the json if that directory actually exists or it will error out.")
|
||||
parser.add_argument("-l", "--log-dir", dest="log_dir", help="This sets the path for TEST_LOG_DIR. If not specified \
|
||||
the current dir is used. This is used for usually creating a ssh log file and a scp test file.")
|
||||
parser.add_argument("-a", "--tag", dest="tag", help="Only run test with specified tag.")
|
||||
parser.add_argument("json", help="The json file exported by the build system", default="testdata.json", nargs='?')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
with open(args.json, "r") as f:
|
||||
loaded = json.load(f)
|
||||
|
||||
if args.ip:
|
||||
loaded["target"]["ip"] = args.ip
|
||||
if args.server_ip:
|
||||
loaded["target"]["server_ip"] = args.server_ip
|
||||
|
||||
d = MyDataDict()
|
||||
for key in loaded["d"].keys():
|
||||
d[key] = loaded["d"][key]
|
||||
|
||||
if args.log_dir:
|
||||
d["TEST_LOG_DIR"] = args.log_dir
|
||||
else:
|
||||
d["TEST_LOG_DIR"] = os.path.abspath(os.path.dirname(__file__))
|
||||
if args.deploy_dir:
|
||||
d["DEPLOY_DIR"] = args.deploy_dir
|
||||
else:
|
||||
if not os.path.isdir(d["DEPLOY_DIR"]):
|
||||
print("WARNING: The path to DEPLOY_DIR does not exist: %s" % d["DEPLOY_DIR"])
|
||||
|
||||
parsedArgs = {}
|
||||
parsedArgs["tag"] = args.tag
|
||||
|
||||
extract_sdk(d)
|
||||
|
||||
target = FakeTarget(d)
|
||||
for key in loaded["target"].keys():
|
||||
setattr(target, key, loaded["target"][key])
|
||||
|
||||
target.exportStart()
|
||||
tc = ExportTestContext(d, target, True, parsedArgs)
|
||||
tc.loadTests()
|
||||
tc.runTests()
|
||||
|
||||
return 0
|
||||
|
||||
def extract_sdk(d):
|
||||
"""
|
||||
Extract SDK if needed
|
||||
"""
|
||||
|
||||
export_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
tools_dir = d.getVar("TEST_EXPORT_SDK_DIR")
|
||||
tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME")
|
||||
tarball_path = os.path.join(export_dir, tools_dir, tarball_name)
|
||||
extract_path = os.path.join(export_dir, "sysroot")
|
||||
if os.path.isfile(tarball_path):
|
||||
print ("Found SDK tarball %s. Extracting..." % tarball_path)
|
||||
result = runCmd("%s -y -d %s" % (tarball_path, extract_path))
|
||||
for f in os.listdir(extract_path):
|
||||
if f.startswith("environment-setup"):
|
||||
print("Setting up SDK environment...")
|
||||
env_file = os.path.join(extract_path, f)
|
||||
updateEnv(env_file)
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
ret = main()
|
||||
except Exception:
|
||||
ret = 1
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(ret)
|
||||
@@ -0,0 +1,20 @@
|
||||
#
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.core.case import OETestCase
|
||||
from oeqa.utils.package_manager import install_package, uninstall_package
|
||||
|
||||
class OERuntimeTestCase(OETestCase):
|
||||
# target instance set by OERuntimeTestLoader.
|
||||
target = None
|
||||
|
||||
def setUp(self):
|
||||
super(OERuntimeTestCase, self).setUp()
|
||||
install_package(self)
|
||||
|
||||
def tearDown(self):
|
||||
super(OERuntimeTestCase, self).tearDown()
|
||||
uninstall_package(self)
|
||||
@@ -0,0 +1,14 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
|
||||
class QemuTinyTest(OERuntimeTestCase):
|
||||
|
||||
def test_boot_tiny(self):
|
||||
status, output = self.target.run_serial('uname -a')
|
||||
msg = "Cannot detect poky tiny boot!"
|
||||
self.assertTrue("yocto-tiny" in output, msg)
|
||||
@@ -0,0 +1,79 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
from oeqa.utils.httpserver import HTTPService
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class AptTest(OERuntimeTestCase):
|
||||
|
||||
def pkg(self, command, expected = 0):
|
||||
command = 'apt-get %s' % command
|
||||
status, output = self.target.run(command, 1500)
|
||||
message = os.linesep.join([command, output])
|
||||
self.assertEqual(status, expected, message)
|
||||
return output
|
||||
|
||||
class AptRepoTest(AptTest):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
service_repo = os.path.join(cls.tc.td['DEPLOY_DIR_DEB'], '')
|
||||
cls.repo_server = HTTPService(service_repo,
|
||||
'0.0.0.0', port=cls.tc.target.server_port,
|
||||
logger=cls.tc.logger)
|
||||
cls.repo_server.start()
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.repo_server.stop()
|
||||
|
||||
def setup_source_config_for_package_install(self):
|
||||
apt_get_source_server = 'http://%s:%s/' % (self.tc.target.server_ip, self.repo_server.port)
|
||||
apt_get_sourceslist_dir = '/etc/apt/'
|
||||
self.target.run('cd %s; echo deb [ allow-insecure=yes ] %s/all ./ > sources.list' % (apt_get_sourceslist_dir, apt_get_source_server))
|
||||
|
||||
def setup_source_config_for_package_install_signed(self):
|
||||
apt_get_source_server = 'http://%s:%s' % (self.tc.target.server_ip, self.repo_server.port)
|
||||
apt_get_sourceslist_dir = '/etc/apt/'
|
||||
self.target.run("cd %s; cp sources.list sources.list.bak; sed -i 's|\[trusted=yes\] http://bogus_ip:bogus_port|%s|g' sources.list" % (apt_get_sourceslist_dir, apt_get_source_server))
|
||||
|
||||
def cleanup_source_config_for_package_install(self):
|
||||
apt_get_sourceslist_dir = '/etc/apt/'
|
||||
self.target.run('cd %s; rm sources.list' % (apt_get_sourceslist_dir))
|
||||
|
||||
def cleanup_source_config_for_package_install_signed(self):
|
||||
apt_get_sourceslist_dir = '/etc/apt/'
|
||||
self.target.run('cd %s; mv sources.list.bak sources.list' % (apt_get_sourceslist_dir))
|
||||
|
||||
def setup_key(self):
|
||||
# the key is found on the target /etc/pki/packagefeed-gpg/
|
||||
# named PACKAGEFEED-GPG-KEY-poky-branch
|
||||
self.target.run('cd %s; apt-key add P*' % ('/etc/pki/packagefeed-gpg'))
|
||||
|
||||
@skipIfNotFeature('package-management',
|
||||
'Test requires package-management to be in IMAGE_FEATURES')
|
||||
@skipIfNotDataVar('IMAGE_PKGTYPE', 'deb',
|
||||
'DEB is not the primary package manager')
|
||||
@OEHasPackage(['apt'])
|
||||
def test_apt_install_from_repo(self):
|
||||
if not self.tc.td.get('PACKAGE_FEED_GPG_NAME'):
|
||||
self.setup_source_config_for_package_install()
|
||||
self.pkg('update')
|
||||
self.pkg('remove --yes run-postinsts-dev')
|
||||
self.pkg('install --yes --allow-unauthenticated run-postinsts-dev')
|
||||
self.cleanup_source_config_for_package_install()
|
||||
else:
|
||||
# when we are here a key has been set to sign the package feed and
|
||||
# public key and gnupg installed on the image by test_testimage_apt
|
||||
self.setup_source_config_for_package_install_signed()
|
||||
self.setup_key()
|
||||
self.pkg('update')
|
||||
self.pkg('install --yes run-postinsts-dev')
|
||||
self.pkg('remove --yes run-postinsts-dev')
|
||||
self.cleanup_source_config_for_package_install_signed()
|
||||
@@ -0,0 +1,35 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from subprocess import Popen, PIPE
|
||||
import time
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.oetimeout import OETimeout
|
||||
from oeqa.core.decorator.data import skipIfQemu
|
||||
|
||||
class BootTest(OERuntimeTestCase):
|
||||
|
||||
@OETimeout(120)
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_reboot(self):
|
||||
output = ''
|
||||
count = 0
|
||||
(status, output) = self.target.run('reboot -h')
|
||||
while count < 5:
|
||||
time.sleep(5)
|
||||
cmd = 'ping -c 1 %s' % self.target.ip
|
||||
proc = Popen(cmd, shell=True, stdout=PIPE)
|
||||
output += proc.communicate()[0].decode('utf-8')
|
||||
if proc.poll() == 0:
|
||||
count += 1
|
||||
else:
|
||||
count = 0
|
||||
msg = ('Expected 5 consecutive, got %d.\n'
|
||||
'ping output is:\n%s' % (count,output))
|
||||
self.assertEqual(count, 5, msg = msg)
|
||||
@@ -0,0 +1,38 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
from oeqa.runtime.utils.targetbuildproject import TargetBuildProject
|
||||
|
||||
class BuildCpioTest(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
uri = 'https://downloads.yoctoproject.org/mirror/sources/cpio-2.13.tar.gz'
|
||||
cls.project = TargetBuildProject(cls.tc.target,
|
||||
uri,
|
||||
dl_dir = cls.tc.td['DL_DIR'])
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.project.clean()
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['gcc'])
|
||||
@OEHasPackage(['make'])
|
||||
@OEHasPackage(['autoconf'])
|
||||
def test_cpio(self):
|
||||
self.project.download_archive()
|
||||
self.project.run_configure('--disable-maintainer-mode')
|
||||
# This sed is needed until
|
||||
# https://git.savannah.gnu.org/cgit/cpio.git/commit/src/global.c?id=641d3f489cf6238bb916368d4ba0d9325a235afb
|
||||
# is in a release.
|
||||
self.project._run(r'sed -i -e "/char \*program_name/d" %s/src/global.c' % self.project.targetdir)
|
||||
self.project.run_make()
|
||||
self.project.run_install()
|
||||
@@ -0,0 +1,34 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
from oeqa.runtime.utils.targetbuildproject import TargetBuildProject
|
||||
|
||||
class GalculatorTest(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
uri = 'http://galculator.mnim.org/downloads/galculator-2.1.4.tar.bz2'
|
||||
cls.project = TargetBuildProject(cls.tc.target,
|
||||
uri,
|
||||
dl_dir = cls.tc.td['DL_DIR'])
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.project.clean()
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['gcc'])
|
||||
@OEHasPackage(['make'])
|
||||
@OEHasPackage(['autoconf'])
|
||||
@OEHasPackage(['gtk+3'])
|
||||
def test_galculator(self):
|
||||
self.project.download_archive()
|
||||
self.project.run_configure()
|
||||
self.project.run_make()
|
||||
@@ -0,0 +1,36 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
from oeqa.runtime.utils.targetbuildproject import TargetBuildProject
|
||||
|
||||
class BuildLzipTest(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
uri = 'http://downloads.yoctoproject.org/mirror/sources'
|
||||
uri = '%s/lzip-1.19.tar.gz' % uri
|
||||
cls.project = TargetBuildProject(cls.tc.target,
|
||||
uri,
|
||||
dl_dir = cls.tc.td['DL_DIR'])
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.project.clean()
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['gcc'])
|
||||
@OEHasPackage(['make'])
|
||||
@OEHasPackage(['autoconf'])
|
||||
def test_lzip(self):
|
||||
self.project.download_archive()
|
||||
self.project.run_configure()
|
||||
self.project.run_make()
|
||||
self.project.run_install()
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class ConnmanTest(OERuntimeTestCase):
|
||||
|
||||
def service_status(self, service):
|
||||
if 'systemd' in self.tc.td['DISTRO_FEATURES']:
|
||||
(_, output) = self.target.run('systemctl status -l %s' % service)
|
||||
return output
|
||||
else:
|
||||
return "Unable to get status or logs for %s" % service
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(["connman"])
|
||||
def test_connmand_help(self):
|
||||
(status, output) = self.target.run('/usr/sbin/connmand --help')
|
||||
msg = 'Failed to get connman help. Output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
@OETestDepends(['connman.ConnmanTest.test_connmand_help'])
|
||||
def test_connmand_running(self):
|
||||
cmd = '%s | grep [c]onnmand' % self.tc.target_cmds['ps']
|
||||
(status, output) = self.target.run(cmd)
|
||||
if status != 0:
|
||||
self.logger.info(self.service_status("connman"))
|
||||
self.fail("No connmand process running")
|
||||
@@ -0,0 +1,43 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import re
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class DateTest(OERuntimeTestCase):
|
||||
|
||||
def setUp(self):
|
||||
if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
|
||||
self.logger.debug('Stopping systemd-timesyncd daemon')
|
||||
self.target.run('systemctl disable --now --runtime systemd-timesyncd')
|
||||
|
||||
def tearDown(self):
|
||||
if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
|
||||
self.logger.debug('Starting systemd-timesyncd daemon')
|
||||
self.target.run('systemctl enable --now --runtime systemd-timesyncd')
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['coreutils', 'busybox'])
|
||||
def test_date(self):
|
||||
(status, output) = self.target.run('date +"%Y-%m-%d %T"')
|
||||
msg = 'Failed to get initial date, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
oldDate = output
|
||||
|
||||
sampleTimestamp = 1488800000
|
||||
(status, output) = self.target.run("date -s @%d" % sampleTimestamp)
|
||||
self.assertEqual(status, 0, msg='Date set failed, output: %s' % output)
|
||||
|
||||
(status, output) = self.target.run('date +"%s"')
|
||||
msg = 'The date was not set correctly, output: %s' % output
|
||||
self.assertTrue(int(output) - sampleTimestamp < 300, msg=msg)
|
||||
|
||||
(status, output) = self.target.run('date -s "%s"' % oldDate)
|
||||
msg = 'Failed to reset date, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
@@ -0,0 +1,21 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfDataVar, skipIfInDataVar
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class DfTest(OERuntimeTestCase):
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['coreutils', 'busybox'])
|
||||
@skipIfInDataVar('IMAGE_FEATURES', 'read-only-rootfs', 'Test case df requires a writable rootfs')
|
||||
def test_df(self):
|
||||
cmd = "df -P / | sed -n '2p' | awk '{print $4}'"
|
||||
(status,output) = self.target.run(cmd)
|
||||
msg = 'Not enough space on image. Current size is %s' % output
|
||||
self.assertTrue(int(output)>5120, msg=msg)
|
||||
@@ -0,0 +1,173 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
from oeqa.utils.httpserver import HTTPService
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature, skipIfInDataVar, skipIfNotInDataVar
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class DnfTest(OERuntimeTestCase):
|
||||
|
||||
def dnf(self, command, expected = 0):
|
||||
command = 'dnf %s' % command
|
||||
status, output = self.target.run(command, 1500)
|
||||
message = os.linesep.join([command, output])
|
||||
self.assertEqual(status, expected, message)
|
||||
return output
|
||||
|
||||
class DnfBasicTest(DnfTest):
|
||||
|
||||
@skipIfNotFeature('package-management',
|
||||
'Test requires package-management to be in IMAGE_FEATURES')
|
||||
@skipIfNotDataVar('IMAGE_PKGTYPE', 'rpm',
|
||||
'RPM is not the primary package manager')
|
||||
@OEHasPackage(['dnf'])
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_dnf_help(self):
|
||||
self.dnf('--help')
|
||||
|
||||
@OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
|
||||
def test_dnf_version(self):
|
||||
self.dnf('--version')
|
||||
|
||||
@OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
|
||||
def test_dnf_info(self):
|
||||
self.dnf('info dnf')
|
||||
|
||||
@OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
|
||||
def test_dnf_search(self):
|
||||
self.dnf('search dnf')
|
||||
|
||||
@OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
|
||||
def test_dnf_history(self):
|
||||
self.dnf('history')
|
||||
|
||||
class DnfRepoTest(DnfTest):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.repo_server = HTTPService(os.path.join(cls.tc.td['WORKDIR'], 'oe-testimage-repo'),
|
||||
'0.0.0.0', port=cls.tc.target.server_port,
|
||||
logger=cls.tc.logger)
|
||||
cls.repo_server.start()
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.repo_server.stop()
|
||||
|
||||
def dnf_with_repo(self, command):
|
||||
pkgarchs = os.listdir(os.path.join(self.tc.td['WORKDIR'], 'oe-testimage-repo'))
|
||||
deploy_url = 'http://%s:%s/' %(self.target.server_ip, self.repo_server.port)
|
||||
cmdlinerepoopts = ["--repofrompath=oe-testimage-repo-%s,%s%s" %(arch, deploy_url, arch) for arch in pkgarchs]
|
||||
|
||||
output = self.dnf(" ".join(cmdlinerepoopts) + " --nogpgcheck " + command)
|
||||
return output
|
||||
|
||||
@OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
|
||||
def test_dnf_makecache(self):
|
||||
self.dnf_with_repo('makecache')
|
||||
|
||||
@OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
|
||||
def test_dnf_repoinfo(self):
|
||||
self.dnf_with_repo('repoinfo')
|
||||
|
||||
@OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
|
||||
def test_dnf_install(self):
|
||||
self.dnf_with_repo('remove -y dnf-test-*')
|
||||
self.dnf_with_repo('install -y dnf-test-dep')
|
||||
|
||||
@OETestDepends(['dnf.DnfRepoTest.test_dnf_install'])
|
||||
def test_dnf_install_dependency(self):
|
||||
self.dnf_with_repo('remove -y dnf-test-*')
|
||||
self.dnf_with_repo('install -y dnf-test-main')
|
||||
output = self.dnf('list --installed dnf-test-*')
|
||||
self.assertIn("dnf-test-main.", output)
|
||||
self.assertIn("dnf-test-dep.", output)
|
||||
|
||||
@OETestDepends(['dnf.DnfRepoTest.test_dnf_install_dependency'])
|
||||
def test_dnf_install_from_disk(self):
|
||||
self.dnf_with_repo('remove -y dnf-test-dep')
|
||||
self.dnf_with_repo('install -y --downloadonly dnf-test-dep')
|
||||
status, output = self.target.run('find /var/cache/dnf -name dnf-test-dep*rpm')
|
||||
self.assertEqual(status, 0, output)
|
||||
self.dnf_with_repo('install -y %s' % output)
|
||||
|
||||
@OETestDepends(['dnf.DnfRepoTest.test_dnf_install_from_disk'])
|
||||
def test_dnf_install_from_http(self):
|
||||
output = subprocess.check_output('%s %s -name dnf-test-dep*' % (bb.utils.which(os.getenv('PATH'), "find"),
|
||||
os.path.join(self.tc.td['WORKDIR'], 'oe-testimage-repo')), shell=True).decode("utf-8")
|
||||
rpm_path = output.split("/")[-2] + "/" + output.split("/")[-1]
|
||||
url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, rpm_path)
|
||||
self.dnf_with_repo('remove -y dnf-test-dep')
|
||||
self.dnf_with_repo('install -y %s' % url)
|
||||
|
||||
@OETestDepends(['dnf.DnfRepoTest.test_dnf_install'])
|
||||
def test_dnf_reinstall(self):
|
||||
self.dnf_with_repo('reinstall -y dnf-test-main')
|
||||
|
||||
@OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
|
||||
@skipIfInDataVar('DISTRO_FEATURES', 'usrmerge', 'Test run when not enable usrmerge')
|
||||
@OEHasPackage('busybox')
|
||||
def test_dnf_installroot(self):
|
||||
rootpath = '/home/root/chroot/test'
|
||||
#Copy necessary files to avoid errors with not yet installed tools on
|
||||
#installroot directory.
|
||||
self.target.run('mkdir -p %s/etc' % rootpath, 1500)
|
||||
self.target.run('mkdir -p %s/bin %s/sbin %s/usr/bin %s/usr/sbin' % (rootpath, rootpath, rootpath, rootpath), 1500)
|
||||
self.target.run('mkdir -p %s/dev' % rootpath, 1500)
|
||||
#Handle different architectures lib dirs
|
||||
self.target.run('mkdir -p %s/lib' % rootpath, 1500)
|
||||
self.target.run('mkdir -p %s/libx32' % rootpath, 1500)
|
||||
self.target.run('mkdir -p %s/lib64' % rootpath, 1500)
|
||||
self.target.run('cp /lib/libtinfo.so.5 %s/lib' % rootpath, 1500)
|
||||
self.target.run('cp /libx32/libtinfo.so.5 %s/libx32' % rootpath, 1500)
|
||||
self.target.run('cp /lib64/libtinfo.so.5 %s/lib64' % rootpath, 1500)
|
||||
self.target.run('cp -r /etc/rpm %s/etc' % rootpath, 1500)
|
||||
self.target.run('cp -r /etc/dnf %s/etc' % rootpath, 1500)
|
||||
self.target.run('cp /bin/sh %s/bin' % rootpath, 1500)
|
||||
self.target.run('mount -o bind /dev %s/dev/' % rootpath, 1500)
|
||||
self.dnf_with_repo('install --installroot=%s -v -y --rpmverbosity=debug busybox' % rootpath)
|
||||
status, output = self.target.run('test -e %s/var/cache/dnf' % rootpath, 1500)
|
||||
self.assertEqual(0, status, output)
|
||||
status, output = self.target.run('test -e %s/bin/busybox' % rootpath, 1500)
|
||||
self.assertEqual(0, status, output)
|
||||
|
||||
@OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
|
||||
@skipIfNotInDataVar('DISTRO_FEATURES', 'usrmerge', 'Test run when enable usrmerge')
|
||||
@OEHasPackage('busybox')
|
||||
def test_dnf_installroot_usrmerge(self):
|
||||
rootpath = '/home/root/chroot/test'
|
||||
#Copy necessary files to avoid errors with not yet installed tools on
|
||||
#installroot directory.
|
||||
self.target.run('mkdir -p %s/etc' % rootpath)
|
||||
self.target.run('mkdir -p %s/usr/bin %s/usr/sbin' % (rootpath, rootpath))
|
||||
self.target.run('ln -sf usr/bin %s/bin' % (rootpath))
|
||||
self.target.run('ln -sf usr/sbin %s/sbin' % (rootpath))
|
||||
self.target.run('mkdir -p %s/dev' % rootpath)
|
||||
#Handle different architectures lib dirs
|
||||
self.target.run("for l in /lib*; do mkdir -p %s/usr/$l; ln -s usr/$l %s/$l; done" % (rootpath, rootpath))
|
||||
self.target.run('cp -r /etc/rpm %s/etc' % rootpath)
|
||||
self.target.run('cp -r /etc/dnf %s/etc' % rootpath)
|
||||
self.target.run('cp /bin/busybox %s/bin/sh' % rootpath)
|
||||
self.target.run('mount -o bind /dev %s/dev/' % rootpath)
|
||||
self.dnf_with_repo('install --installroot=%s -v -y --rpmverbosity=debug busybox' % rootpath)
|
||||
status, output = self.target.run('test -e %s/var/cache/dnf' % rootpath)
|
||||
self.assertEqual(0, status, output)
|
||||
status, output = self.target.run('test -e %s/bin/busybox' % rootpath)
|
||||
self.assertEqual(0, status, output)
|
||||
|
||||
@OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
|
||||
def test_dnf_exclude(self):
|
||||
self.dnf_with_repo('remove -y dnf-test-*')
|
||||
self.dnf_with_repo('install -y --exclude=dnf-test-dep dnf-test-*')
|
||||
output = self.dnf('list --installed dnf-test-*')
|
||||
self.assertIn("dnf-test-main.", output)
|
||||
self.assertNotIn("dnf-test-dev.", output)
|
||||
@@ -0,0 +1,42 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfQemu
|
||||
|
||||
class Ethernet_Test(OERuntimeTestCase):
|
||||
|
||||
def set_ip(self, x):
|
||||
x = x.split(".")
|
||||
sample_host_address = '150'
|
||||
x[3] = sample_host_address
|
||||
x = '.'.join(x)
|
||||
return x
|
||||
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_set_virtual_ip(self):
|
||||
(status, output) = self.target.run("ifconfig eth0 | grep 'inet ' | awk '{print $2}'")
|
||||
self.assertEqual(status, 0, msg='Failed to get ip address. Make sure you have an ethernet connection on your device, output: %s' % output)
|
||||
original_ip = output
|
||||
virtual_ip = self.set_ip(original_ip)
|
||||
|
||||
(status, output) = self.target.run("ifconfig eth0:1 %s netmask 255.255.255.0 && sleep 2 && ping -c 5 %s && ifconfig eth0:1 down" % (virtual_ip,virtual_ip))
|
||||
self.assertEqual(status, 0, msg='Failed to create virtual ip address, output: %s' % output)
|
||||
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['ethernet_ip_connman.Ethernet_Test.test_set_virtual_ip'])
|
||||
def test_get_ip_from_dhcp(self):
|
||||
(status, output) = self.target.run("connmanctl services | grep -E '*AO Wired|*AR Wired' | awk '{print $3}'")
|
||||
self.assertEqual(status, 0, msg='No wired interfaces are detected, output: %s' % output)
|
||||
wired_interfaces = output
|
||||
|
||||
(status, output) = self.target.run("ip route | grep default | awk '{print $3}'")
|
||||
self.assertEqual(status, 0, msg='Failed to retrieve the default gateway, output: %s' % output)
|
||||
default_gateway = output
|
||||
|
||||
(status, output) = self.target.run("connmanctl config %s --ipv4 dhcp && sleep 2 && ping -c 5 %s" % (wired_interfaces,default_gateway))
|
||||
self.assertEqual(status, 0, msg='Failed to get dynamic IP address via DHCP in connmand, output: %s' % output)
|
||||
@@ -0,0 +1,71 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class GccCompileTest(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
dst = '/tmp/'
|
||||
src = os.path.join(cls.tc.files_dir, 'test.c')
|
||||
cls.tc.target.copyTo(src, dst)
|
||||
|
||||
src = os.path.join(cls.tc.runtime_files_dir, 'testmakefile')
|
||||
cls.tc.target.copyTo(src, dst)
|
||||
|
||||
src = os.path.join(cls.tc.files_dir, 'test.cpp')
|
||||
cls.tc.target.copyTo(src, dst)
|
||||
|
||||
@classmethod
|
||||
def tearDown(cls):
|
||||
files = '/tmp/test.c /tmp/test.o /tmp/test /tmp/testmakefile'
|
||||
cls.tc.target.run('rm %s' % files)
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['gcc'])
|
||||
def test_gcc_compile(self):
|
||||
status, output = self.target.run('gcc /tmp/test.c -o /tmp/test -lm')
|
||||
msg = 'gcc compile failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
status, output = self.target.run('/tmp/test')
|
||||
msg = 'running compiled file failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['g++'])
|
||||
def test_gpp_compile(self):
|
||||
status, output = self.target.run('g++ /tmp/test.c -o /tmp/test -lm')
|
||||
msg = 'g++ compile failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
status, output = self.target.run('/tmp/test')
|
||||
msg = 'running compiled file failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['g++'])
|
||||
def test_gpp2_compile(self):
|
||||
status, output = self.target.run('g++ /tmp/test.cpp -o /tmp/test -lm')
|
||||
msg = 'g++ compile failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
status, output = self.target.run('/tmp/test')
|
||||
msg = 'running compiled file failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['gcc'])
|
||||
@OEHasPackage(['make'])
|
||||
def test_make(self):
|
||||
status, output = self.target.run('cd /tmp; make -f testmakefile')
|
||||
msg = 'running make failed, output %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
@@ -0,0 +1,21 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class GObjectIntrospectionTest(OERuntimeTestCase):
|
||||
|
||||
@OETestDepends(["ssh.SSHTest.test_ssh"])
|
||||
@OEHasPackage(["python3-pygobject"])
|
||||
def test_python(self):
|
||||
script = """from gi.repository import GLib; print(GLib.markup_escape_text("<testing&testing>"))"""
|
||||
status, output = self.target.run("python3 -c '%s'" % script)
|
||||
self.assertEqual(status, 0, msg="Python failed (%s)" % (output))
|
||||
self.assertEqual(output, "<testing&testing>", msg="Unexpected output (%s)" % output)
|
||||
@@ -0,0 +1,21 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class GoHelloworldTest(OERuntimeTestCase):
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['go-helloworld'])
|
||||
def test_gohelloworld(self):
|
||||
cmd = "go-helloworld"
|
||||
status, output = self.target.run(cmd)
|
||||
msg = 'Exit status was not 0. Output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
msg = 'Incorrect output: %s' % output
|
||||
self.assertEqual(output, "Hello, Go examples!", msg=msg)
|
||||
@@ -0,0 +1,20 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class GstreamerCliTest(OERuntimeTestCase):
|
||||
|
||||
@OEHasPackage(['gstreamer1.0'])
|
||||
def test_gst_inspect_can_list_all_plugins(self):
|
||||
status, output = self.target.run('gst-inspect-1.0')
|
||||
self.assertEqual(status, 0, 'gst-inspect-1.0 does not appear to be running.')
|
||||
|
||||
@OEHasPackage(['gstreamer1.0'])
|
||||
def test_gst_launch_can_create_video_pipeline(self):
|
||||
status, output = self.target.run('gst-launch-1.0 -v fakesrc silent=false num-buffers=3 ! fakesink silent=false')
|
||||
self.assertEqual(status, 0, 'gst-launch-1.0 does not appear to be running.')
|
||||
@@ -0,0 +1,48 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfNotFeature
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class KernelModuleTest(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
src = os.path.join(cls.tc.runtime_files_dir, 'hellomod.c')
|
||||
dst = '/tmp/hellomod.c'
|
||||
cls.tc.target.copyTo(src, dst)
|
||||
|
||||
src = os.path.join(cls.tc.runtime_files_dir, 'hellomod_makefile')
|
||||
dst = '/tmp/Makefile'
|
||||
cls.tc.target.copyTo(src, dst)
|
||||
|
||||
@classmethod
|
||||
def tearDown(cls):
|
||||
files = '/tmp/Makefile /tmp/hellomod.c'
|
||||
cls.tc.target.run('rm %s' % files)
|
||||
|
||||
@skipIfNotFeature('tools-sdk',
|
||||
'Test requires tools-sdk to be in IMAGE_FEATURES')
|
||||
@OETestDepends(['gcc.GccCompileTest.test_gcc_compile'])
|
||||
@OEHasPackage(['kernel-devsrc'])
|
||||
@OEHasPackage(['make'])
|
||||
@OEHasPackage(['gcc'])
|
||||
def test_kernel_module(self):
|
||||
cmds = [
|
||||
'cd /usr/src/kernel && make scripts prepare',
|
||||
'cd /tmp && make',
|
||||
'cd /tmp && insmod hellomod.ko',
|
||||
'lsmod | grep hellomod',
|
||||
'dmesg | grep Hello',
|
||||
'rmmod hellomod', 'dmesg | grep "Cleaning up hellomod"'
|
||||
]
|
||||
for cmd in cmds:
|
||||
status, output = self.target.run(cmd, 900)
|
||||
self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
|
||||
@@ -0,0 +1,233 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfNotFeature
|
||||
|
||||
# need some kernel fragments
|
||||
# echo "KERNEL_FEATURES:append = \" features\/kernel\-sample\/kernel\-sample.scc\"" >> local.conf
|
||||
class KSample(OERuntimeTestCase):
|
||||
def cmd_and_check(self, cmd='', match_string=''):
|
||||
status, output = self.target.run(cmd)
|
||||
if not match_string:
|
||||
# send cmd
|
||||
msg = '%s failed, %s' % (cmd, output)
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
else:
|
||||
# check result
|
||||
result = ("%s" % match_string) in output
|
||||
msg = output
|
||||
self.assertTrue(result, msg)
|
||||
self.assertEqual(status, 0, cmd)
|
||||
|
||||
def check_arch(self, archset=''):
|
||||
status, output = self.target.run("uname -m")
|
||||
result = ("%s" % output) in archset
|
||||
if not result:
|
||||
self.skipTest("This case doesn't support %s" % output)
|
||||
|
||||
def check_config(self, config_opt=''):
|
||||
cmd = "zcat /proc/config.gz | grep %s" % config_opt
|
||||
status, output = self.target.run(cmd)
|
||||
result = ("%s=y" % config_opt) in output
|
||||
if not result:
|
||||
self.skipTest("%s is not set" % config_opt)
|
||||
|
||||
def check_module_exist(self, path='', module_name=''):
|
||||
status, output = self.target.run("uname -r")
|
||||
cmd = "ls " + "/lib/modules/" + output + "/kernel/samples/" + path + module_name
|
||||
status, output = self.target.run(cmd)
|
||||
if status != 0:
|
||||
error_info = module_name + " doesn't exist"
|
||||
self.skipTest(error_info)
|
||||
|
||||
def kfifo_func(self, name=''):
|
||||
module_prename = name + "-example"
|
||||
module_name = name + "-example.ko"
|
||||
sysmbol_name = name + "_example"
|
||||
|
||||
# make sure if module exists
|
||||
self.check_module_exist("kfifo/", module_name)
|
||||
# modprobe
|
||||
self.cmd_and_check("modprobe %s" % module_prename)
|
||||
# lsmod
|
||||
self.cmd_and_check("lsmod | grep %s | cut -d\' \' -f1" % sysmbol_name, sysmbol_name)
|
||||
# check result
|
||||
self.cmd_and_check("dmesg | grep \"test passed\" ", "test passed")
|
||||
# rmmod
|
||||
self.cmd_and_check("rmmod %s" % module_prename)
|
||||
|
||||
def kprobe_func(self, name=''):
|
||||
# check config
|
||||
self.check_config("CONFIG_KPROBES")
|
||||
|
||||
module_prename = name + "_example"
|
||||
module_name = name + "_example.ko"
|
||||
sysmbol_name = module_prename
|
||||
|
||||
# make sure if module exists
|
||||
self.check_module_exist("kprobes/", module_name)
|
||||
# modprobe
|
||||
self.cmd_and_check("modprobe %s" % module_prename)
|
||||
# lsmod
|
||||
self.cmd_and_check("lsmod | grep %s | cut -d\' \' -f1" % sysmbol_name, sysmbol_name)
|
||||
# check result
|
||||
self.cmd_and_check("dmesg | grep Planted | head -n10", "Planted")
|
||||
# rmmod
|
||||
self.cmd_and_check("rmmod %s" % module_prename)
|
||||
|
||||
def kobject_func(self, name=''):
|
||||
module_prename = name + "_example"
|
||||
module_name = name + "-example.ko"
|
||||
sysmbol_name = module_prename
|
||||
|
||||
# make sure if module exists
|
||||
self.check_module_exist("kobject/", module_name)
|
||||
# modprobe
|
||||
self.cmd_and_check("modprobe %s" % module_prename)
|
||||
# lsmod
|
||||
self.cmd_and_check("lsmod | grep %s | cut -d\' \' -f1" % sysmbol_name, sysmbol_name)
|
||||
# check result
|
||||
self.cmd_and_check("ls /sys/kernel/%s/" % sysmbol_name, "bar")
|
||||
# rmmod
|
||||
self.cmd_and_check("rmmod %s" % module_prename)
|
||||
|
||||
class KSampleTest(KSample):
|
||||
# kfifo
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_kfifo_test(self):
|
||||
index = ["dma", "bytestream", "inttype", "record"]
|
||||
for i in index:
|
||||
self.kfifo_func(i)
|
||||
|
||||
# kprobe
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_kprobe_test(self):
|
||||
self.check_arch("x86_64 i686 ppc")
|
||||
index = ["kprobe", "kretprobe"]
|
||||
for i in index:
|
||||
self.kprobe_func(i)
|
||||
|
||||
# kobject
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_kobject_test(self):
|
||||
index = ["kobject", "kset"]
|
||||
for i in index:
|
||||
self.kobject_func(i)
|
||||
|
||||
#trace
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_trace_events(self):
|
||||
# check config
|
||||
self.check_config("CONFIG_TRACING_SUPPORT")
|
||||
# make sure if module exists
|
||||
self.check_module_exist("trace_events/", "trace-events-sample.ko")
|
||||
# modprobe
|
||||
self.cmd_and_check("modprobe trace-events-sample")
|
||||
# lsmod
|
||||
self.cmd_and_check("lsmod | grep trace_events_sample | cut -d\' \' -f1", "trace_events_sample")
|
||||
# check dir
|
||||
self.cmd_and_check("ls /sys/kernel/debug/tracing/events/ | grep sample-trace", "sample-trace")
|
||||
# enable trace
|
||||
self.cmd_and_check("echo 1 > /sys/kernel/debug/tracing/events/sample-trace/enable")
|
||||
self.cmd_and_check("cat /sys/kernel/debug/tracing/events/sample-trace/enable")
|
||||
# check result
|
||||
status = 1
|
||||
count = 0
|
||||
while status != 0:
|
||||
time.sleep(1)
|
||||
status, output = self.target.run('cat /sys/kernel/debug/tracing/trace | grep hello | head -n1 | cut -d\':\' -f2')
|
||||
if " foo_bar" in output:
|
||||
break
|
||||
count = count + 1
|
||||
if count > 5:
|
||||
self.assertTrue(False, "Time out when check result")
|
||||
# disable trace
|
||||
self.cmd_and_check("echo 0 > /sys/kernel/debug/tracing/events/sample-trace/enable")
|
||||
# clean up trace
|
||||
self.cmd_and_check("echo > /sys/kernel/debug/tracing/trace")
|
||||
# rmmod
|
||||
self.cmd_and_check("rmmod trace-events-sample")
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_trace_printk(self):
|
||||
# check config
|
||||
self.check_config("CONFIG_TRACING_SUPPORT")
|
||||
# make sure if module exists
|
||||
self.check_module_exist("trace_printk/", "trace-printk.ko")
|
||||
# modprobe
|
||||
self.cmd_and_check("modprobe trace-printk")
|
||||
# lsmod
|
||||
self.cmd_and_check("lsmod | grep trace_printk | cut -d\' \' -f1", "trace_printk")
|
||||
# check result
|
||||
self.cmd_and_check("cat /sys/kernel/debug/tracing/trace | grep trace_printk_irq_work | head -n1 | cut -d\':\' -f2", " trace_printk_irq_work")
|
||||
# clean up trace
|
||||
self.cmd_and_check("echo > /sys/kernel/debug/tracing/trace")
|
||||
# rmmod
|
||||
self.cmd_and_check("rmmod trace-printk")
|
||||
|
||||
# hw breakpoint
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_hw_breakpoint_example(self):
|
||||
# check arch
|
||||
status, output = self.target.run("uname -m")
|
||||
result = ("x86_64" in output) or ("aarch64" in output)
|
||||
if not result:
|
||||
self.skipTest("the arch %s doesn't support hw breakpoint" % output)
|
||||
# check config
|
||||
self.check_config("CONFIG_KALLSYMS_ALL")
|
||||
# make sure if module exists
|
||||
self.check_module_exist("hw_breakpoint/", "data_breakpoint.ko")
|
||||
# modprobe
|
||||
self.cmd_and_check("modprobe data_breakpoint")
|
||||
# lsmod
|
||||
self.cmd_and_check("lsmod | grep data_breakpoint | cut -d\' \' -f1", "data_breakpoint")
|
||||
# check result
|
||||
self.cmd_and_check("cat /var/log/messages | grep sample_hbp_handler", "sample_hbp_handler")
|
||||
# rmmod
|
||||
self.cmd_and_check("rmmod data_breakpoint")
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_configfs_sample(self):
|
||||
# check config
|
||||
status, ret = self.target.run('zcat /proc/config.gz | grep CONFIG_CONFIGFS_FS')
|
||||
if not ["CONFIG_CONFIGFS_FS=m" in ret or "CONFIG_CONFIGFS_FS=y" in ret]:
|
||||
self.skipTest("CONFIG error")
|
||||
# make sure if module exists
|
||||
self.check_module_exist("configfs/", "configfs_sample.ko")
|
||||
# modprobe
|
||||
self.cmd_and_check("modprobe configfs_sample")
|
||||
# lsmod
|
||||
self.cmd_and_check("lsmod | grep configfs_sample | cut -d\' \' -f1 | head -n1", "configfs_sample")
|
||||
|
||||
status = 1
|
||||
count = 0
|
||||
while status != 0:
|
||||
time.sleep(1)
|
||||
status, ret = self.target.run('cat /sys/kernel/config/01-childless/description')
|
||||
count = count + 1
|
||||
if count > 200:
|
||||
self.skipTest("Time out for check dir")
|
||||
|
||||
# rmmod
|
||||
self.cmd_and_check("rmmod configfs_sample")
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_cn_test(self):
|
||||
# make sure if module exists
|
||||
self.check_module_exist("connector/", "cn_test.ko")
|
||||
# modprobe
|
||||
self.cmd_and_check("modprobe cn_test")
|
||||
# lsmod
|
||||
self.cmd_and_check("lsmod | grep cn_test | cut -d\' \' -f1", "cn_test")
|
||||
# check result
|
||||
self.cmd_and_check("cat /proc/net/connector | grep cn_test | head -n1 | cut -d\' \' -f1", "cn_test")
|
||||
# rmmod
|
||||
self.cmd_and_check("rmmod cn_test")
|
||||
@@ -0,0 +1,28 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfNotFeature
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class LddTest(OERuntimeTestCase):
|
||||
|
||||
@OEHasPackage(["ldd"])
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_ldd(self):
|
||||
status, output = self.target.run('which ldd')
|
||||
msg = 'ldd does not exist in PATH: which ldd: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
cmd = ('for i in $(which ldd | xargs cat | grep "^RTLDLIST"| '
|
||||
'cut -d\'=\' -f2|tr -d \'"\'); '
|
||||
'do test -f $i && echo $i && break; done')
|
||||
status, output = self.target.run(cmd)
|
||||
self.assertEqual(status, 0, msg="ldd path not correct or RTLDLIST files don't exist.")
|
||||
|
||||
status, output = self.target.run("ldd /bin/true")
|
||||
self.assertEqual(status, 0, msg="ldd failed to execute: %s" % output)
|
||||
@@ -0,0 +1,73 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=289 testcase
|
||||
# Note that the image under test must have logrotate installed
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class LogrotateTest(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.tc.target.run('cp /etc/logrotate.d/wtmp $HOME/wtmp.oeqabak')
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.tc.target.run('mv -f $HOME/wtmp.oeqabak /etc/logrotate.d/wtmp && rm -rf /var/log//logrotate_dir')
|
||||
cls.tc.target.run('rm -rf /var/log/logrotate_testfile && rm -rf /etc/logrotate.d/logrotate_testfile')
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['logrotate'])
|
||||
def test_logrotate_wtmp(self):
|
||||
|
||||
# /var/log/wtmp may not always exist initially, so use touch to ensure it is present
|
||||
status, output = self.target.run('touch /var/log/wtmp')
|
||||
msg = ('Could not create/update /var/log/wtmp with touch')
|
||||
self.assertEqual(status, 0, msg = msg)
|
||||
|
||||
status, output = self.target.run('mkdir /var/log//logrotate_dir')
|
||||
msg = ('Could not create logrotate_dir. Output: %s' % output)
|
||||
self.assertEqual(status, 0, msg = msg)
|
||||
|
||||
status, output = self.target.run('echo "create \n olddir /var/log//logrotate_dir \n include /etc/logrotate.d/wtmp" > /tmp/logrotate-test.conf')
|
||||
msg = ('Could not write to /tmp/logrotate-test.conf')
|
||||
self.assertEqual(status, 0, msg = msg)
|
||||
|
||||
# If logrotate fails to rotate the log, view the verbose output of logrotate to see what prevented it
|
||||
_, logrotate_output = self.target.run('logrotate -vf /tmp/logrotate-test.conf')
|
||||
status, _ = self.target.run('find /var/log//logrotate_dir -type f | grep wtmp.1')
|
||||
msg = ("logrotate did not successfully rotate the wtmp log. Output from logrotate -vf: \n%s" % (logrotate_output))
|
||||
self.assertEqual(status, 0, msg = msg)
|
||||
|
||||
@OETestDepends(['logrotate.LogrotateTest.test_logrotate_wtmp'])
|
||||
def test_logrotate_newlog(self):
|
||||
|
||||
status, output = self.target.run('echo "oeqa logrotate test file" > /var/log/logrotate_testfile')
|
||||
msg = ('Could not create logrotate test file in /var/log')
|
||||
self.assertEqual(status, 0, msg = msg)
|
||||
|
||||
status, output = self.target.run('echo "/var/log/logrotate_testfile {\n missingok \n monthly \n rotate 1" > /etc/logrotate.d/logrotate_testfile')
|
||||
msg = ('Could not write to /etc/logrotate.d/logrotate_testfile')
|
||||
self.assertEqual(status, 0, msg = msg)
|
||||
|
||||
status, output = self.target.run('echo "create \n olddir /var/log//logrotate_dir \n include /etc/logrotate.d/logrotate_testfile" > /tmp/logrotate-test2.conf')
|
||||
msg = ('Could not write to /tmp/logrotate_test2.conf')
|
||||
self.assertEqual(status, 0, msg = msg)
|
||||
|
||||
status, output = self.target.run('find /var/log//logrotate_dir -type f | grep logrotate_testfile.1')
|
||||
msg = ('A rotated log for logrotate_testfile is already present in logrotate_dir')
|
||||
self.assertEqual(status, 1, msg = msg)
|
||||
|
||||
# If logrotate fails to rotate the log, view the verbose output of logrotate instead of just listing the files in olddir
|
||||
_, logrotate_output = self.target.run('logrotate -vf /tmp/logrotate-test2.conf')
|
||||
status, _ = self.target.run('find /var/log//logrotate_dir -type f | grep logrotate_testfile.1')
|
||||
msg = ('logrotate did not successfully rotate the logrotate_test log. Output from logrotate -vf: \n%s' % (logrotate_output))
|
||||
self.assertEqual(status, 0, msg = msg)
|
||||
|
||||
|
||||
@@ -0,0 +1,124 @@
|
||||
# LTP runtime
|
||||
#
|
||||
# Copyright (c) 2019 MontaVista Software, LLC
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import time
|
||||
import datetime
|
||||
import pprint
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
from oeqa.utils.logparser import LtpParser
|
||||
|
||||
class LtpTestBase(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.ltp_startup()
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.ltp_finishup()
|
||||
|
||||
@classmethod
|
||||
def ltp_startup(cls):
|
||||
cls.sections = {}
|
||||
cls.failmsg = ""
|
||||
test_log_dir = os.path.join(cls.td.get('WORKDIR', ''), 'testimage')
|
||||
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
|
||||
|
||||
cls.ltptest_log_dir_link = os.path.join(test_log_dir, 'ltp_log')
|
||||
cls.ltptest_log_dir = '%s.%s' % (cls.ltptest_log_dir_link, timestamp)
|
||||
os.makedirs(cls.ltptest_log_dir)
|
||||
|
||||
cls.tc.target.run("mkdir -p /opt/ltp/results")
|
||||
|
||||
if not hasattr(cls.tc, "extraresults"):
|
||||
cls.tc.extraresults = {}
|
||||
cls.extras = cls.tc.extraresults
|
||||
cls.extras['ltpresult.rawlogs'] = {'log': ""}
|
||||
|
||||
|
||||
@classmethod
|
||||
def ltp_finishup(cls):
|
||||
cls.extras['ltpresult.sections'] = cls.sections
|
||||
|
||||
# update symlink to ltp_log
|
||||
if os.path.exists(cls.ltptest_log_dir_link):
|
||||
os.remove(cls.ltptest_log_dir_link)
|
||||
os.symlink(os.path.basename(cls.ltptest_log_dir), cls.ltptest_log_dir_link)
|
||||
|
||||
if cls.failmsg:
|
||||
cls.fail(cls.failmsg)
|
||||
|
||||
class LtpTest(LtpTestBase):
|
||||
|
||||
ltp_groups = ["math", "syscalls", "dio", "io", "mm", "ipc", "sched", "nptl", "pty", "containers", "controllers", "filecaps", "cap_bounds", "fcntl-locktests", "connectors", "commands", "net.ipv6_lib", "input","fs_perms_simple"]
|
||||
|
||||
ltp_fs = ["fs", "fsx", "fs_bind"]
|
||||
# skip kernel cpuhotplug
|
||||
ltp_kernel = ["power_management_tests", "hyperthreading ", "kernel_misc", "hugetlb"]
|
||||
ltp_groups += ltp_fs
|
||||
|
||||
def runltp(self, ltp_group):
|
||||
# LTP appends to log files, so ensure we start with a clean log
|
||||
self.target.deleteFiles("/opt/ltp/results/", ltp_group)
|
||||
|
||||
cmd = '/opt/ltp/runltp -f %s -q -r /opt/ltp -l /opt/ltp/results/%s -I 1 -d /opt/ltp' % (ltp_group, ltp_group)
|
||||
|
||||
starttime = time.time()
|
||||
(status, output) = self.target.run(cmd)
|
||||
endtime = time.time()
|
||||
|
||||
# Write the console log to disk for convenience
|
||||
with open(os.path.join(self.ltptest_log_dir, "%s-raw.log" % ltp_group), 'w') as f:
|
||||
f.write(output)
|
||||
|
||||
# Also put the console log into the test result JSON
|
||||
self.extras['ltpresult.rawlogs']['log'] = self.extras['ltpresult.rawlogs']['log'] + output
|
||||
|
||||
# Copy the machine-readable test results locally so we can parse it
|
||||
dst = os.path.join(self.ltptest_log_dir, ltp_group)
|
||||
remote_src = "/opt/ltp/results/%s" % ltp_group
|
||||
(status, output) = self.target.copyFrom(remote_src, dst, True)
|
||||
if status:
|
||||
msg = 'File could not be copied. Output: %s' % output
|
||||
self.target.logger.warning(msg)
|
||||
|
||||
parser = LtpParser()
|
||||
results, sections = parser.parse(dst)
|
||||
|
||||
sections['duration'] = int(endtime-starttime)
|
||||
self.sections[ltp_group] = sections
|
||||
|
||||
failed_tests = {}
|
||||
for test in results:
|
||||
result = results[test]
|
||||
testname = ("ltpresult." + ltp_group + "." + test)
|
||||
self.extras[testname] = {'status': result}
|
||||
if result == 'FAILED':
|
||||
failed_tests[ltp_group] = test
|
||||
|
||||
if failed_tests:
|
||||
self.failmsg = self.failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
|
||||
|
||||
# LTP runtime tests
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(["ltp"])
|
||||
def test_ltp_help(self):
|
||||
(status, output) = self.target.run('/opt/ltp/runltp --help')
|
||||
msg = 'Failed to get ltp help. Output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
@OETestDepends(['ltp.LtpTest.test_ltp_help'])
|
||||
def test_ltp_groups(self):
|
||||
for ltp_group in self.ltp_groups:
|
||||
self.runltp(ltp_group)
|
||||
|
||||
@OETestDepends(['ltp.LtpTest.test_ltp_groups'])
|
||||
def test_ltp_runltp_cve(self):
|
||||
self.runltp("cve")
|
||||
@@ -0,0 +1,97 @@
|
||||
# LTP compliance runtime
|
||||
#
|
||||
# Copyright (c) 2019 MontaVista Software, LLC
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import time
|
||||
import datetime
|
||||
import pprint
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
from oeqa.utils.logparser import LtpComplianceParser
|
||||
|
||||
class LtpPosixBase(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.ltp_startup()
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.ltp_finishup()
|
||||
|
||||
@classmethod
|
||||
def ltp_startup(cls):
|
||||
cls.sections = {}
|
||||
cls.failmsg = ""
|
||||
test_log_dir = os.path.join(cls.td.get('WORKDIR', ''), 'testimage')
|
||||
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
|
||||
|
||||
cls.ltptest_log_dir_link = os.path.join(test_log_dir, 'ltpcomp_log')
|
||||
cls.ltptest_log_dir = '%s.%s' % (cls.ltptest_log_dir_link, timestamp)
|
||||
os.makedirs(cls.ltptest_log_dir)
|
||||
|
||||
cls.tc.target.run("mkdir -p /opt/ltp/results")
|
||||
|
||||
if not hasattr(cls.tc, "extraresults"):
|
||||
cls.tc.extraresults = {}
|
||||
cls.extras = cls.tc.extraresults
|
||||
cls.extras['ltpposixresult.rawlogs'] = {'log': ""}
|
||||
|
||||
|
||||
@classmethod
|
||||
def ltp_finishup(cls):
|
||||
cls.extras['ltpposixresult.sections'] = cls.sections
|
||||
|
||||
# update symlink to ltp_log
|
||||
if os.path.exists(cls.ltptest_log_dir_link):
|
||||
os.remove(cls.ltptest_log_dir_link)
|
||||
|
||||
os.symlink(os.path.basename(cls.ltptest_log_dir), cls.ltptest_log_dir_link)
|
||||
|
||||
if cls.failmsg:
|
||||
cls.fail(cls.failmsg)
|
||||
|
||||
class LtpPosixTest(LtpPosixBase):
|
||||
posix_groups = ["AIO", "MEM", "MSG", "SEM", "SIG", "THR", "TMR", "TPS"]
|
||||
|
||||
def runltp(self, posix_group):
|
||||
cmd = "/opt/ltp/bin/run-posix-option-group-test.sh %s 2>@1 | tee /opt/ltp/results/%s" % (posix_group, posix_group)
|
||||
starttime = time.time()
|
||||
(status, output) = self.target.run(cmd)
|
||||
endtime = time.time()
|
||||
|
||||
with open(os.path.join(self.ltptest_log_dir, "%s" % posix_group), 'w') as f:
|
||||
f.write(output)
|
||||
|
||||
self.extras['ltpposixresult.rawlogs']['log'] = self.extras['ltpposixresult.rawlogs']['log'] + output
|
||||
|
||||
parser = LtpComplianceParser()
|
||||
results, sections = parser.parse(os.path.join(self.ltptest_log_dir, "%s" % posix_group))
|
||||
|
||||
runtime = int(endtime-starttime)
|
||||
sections['duration'] = runtime
|
||||
self.sections[posix_group] = sections
|
||||
|
||||
failed_tests = {}
|
||||
for test in results:
|
||||
result = results[test]
|
||||
testname = ("ltpposixresult." + posix_group + "." + test)
|
||||
self.extras[testname] = {'status': result}
|
||||
if result == 'FAILED':
|
||||
failed_tests[posix_group] = test
|
||||
|
||||
if failed_tests:
|
||||
self.failmsg = self.failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
|
||||
|
||||
# LTP Posix compliance runtime tests
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(["ltp"])
|
||||
def test_posix_groups(self):
|
||||
for posix_group in self.posix_groups:
|
||||
self.runltp(posix_group)
|
||||
@@ -0,0 +1,97 @@
|
||||
# LTP Stress runtime
|
||||
#
|
||||
# Copyright (c) 2019 MontaVista Software, LLC
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import time
|
||||
import datetime
|
||||
import pprint
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
from oeqa.core.decorator.data import skipIfQemu
|
||||
from oeqa.utils.logparser import LtpParser
|
||||
|
||||
class LtpStressBase(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.ltp_startup()
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.ltp_finishup()
|
||||
|
||||
@classmethod
|
||||
def ltp_startup(cls):
|
||||
cls.sections = {}
|
||||
cls.failmsg = ""
|
||||
test_log_dir = os.path.join(cls.td.get('WORKDIR', ''), 'testimage')
|
||||
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
|
||||
|
||||
cls.ltptest_log_dir_link = os.path.join(test_log_dir, 'ltpstress_log')
|
||||
cls.ltptest_log_dir = '%s.%s' % (cls.ltptest_log_dir_link, timestamp)
|
||||
os.makedirs(cls.ltptest_log_dir)
|
||||
|
||||
cls.tc.target.run("mkdir -p /opt/ltp/results")
|
||||
|
||||
if not hasattr(cls.tc, "extraresults"):
|
||||
cls.tc.extraresults = {}
|
||||
cls.extras = cls.tc.extraresults
|
||||
cls.extras['ltpstressresult.rawlogs'] = {'log': ""}
|
||||
|
||||
|
||||
@classmethod
|
||||
def ltp_finishup(cls):
|
||||
cls.extras['ltpstressresult.sections'] = cls.sections
|
||||
|
||||
# update symlink to ltp_log
|
||||
if os.path.exists(cls.ltptest_log_dir_link):
|
||||
os.remove(cls.ltptest_log_dir_link)
|
||||
|
||||
os.symlink(os.path.basename(cls.ltptest_log_dir), cls.ltptest_log_dir_link)
|
||||
|
||||
if cls.failmsg:
|
||||
cls.fail(cls.failmsg)
|
||||
|
||||
class LtpStressTest(LtpStressBase):
|
||||
|
||||
def runltp(self, stress_group):
|
||||
cmd = '/opt/ltp/runltp -f %s -p -q 2>@1 | tee /opt/ltp/results/%s' % (stress_group, stress_group)
|
||||
starttime = time.time()
|
||||
(status, output) = self.target.run(cmd)
|
||||
endtime = time.time()
|
||||
with open(os.path.join(self.ltptest_log_dir, "%s" % stress_group), 'w') as f:
|
||||
f.write(output)
|
||||
|
||||
self.extras['ltpstressresult.rawlogs']['log'] = self.extras['ltpstressresult.rawlogs']['log'] + output
|
||||
|
||||
parser = LtpParser()
|
||||
results, sections = parser.parse(os.path.join(self.ltptest_log_dir, "%s" % stress_group))
|
||||
|
||||
runtime = int(endtime-starttime)
|
||||
sections['duration'] = runtime
|
||||
self.sections[stress_group] = sections
|
||||
|
||||
failed_tests = {}
|
||||
for test in results:
|
||||
result = results[test]
|
||||
testname = ("ltpstressresult." + stress_group + "." + test)
|
||||
self.extras[testname] = {'status': result}
|
||||
if result == 'FAILED':
|
||||
failed_tests[stress_group] = test
|
||||
|
||||
if failed_tests:
|
||||
self.failmsg = self.failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
|
||||
|
||||
# LTP stress runtime tests
|
||||
#
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(["ltp"])
|
||||
def test_ltp_stress(self):
|
||||
self.tc.target.run("sed -i -r 's/^fork12.*//' /opt/ltp/runtest/crashme")
|
||||
self.runltp('crashme')
|
||||
@@ -0,0 +1,49 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfNotInDataVar
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
import subprocess
|
||||
|
||||
class MultilibTest(OERuntimeTestCase):
|
||||
|
||||
def archtest(self, binary, arch):
|
||||
"""
|
||||
Check that ``binary`` has the ELF class ``arch`` (e.g. ELF32/ELF64).
|
||||
"""
|
||||
|
||||
dest = "{}/test_binary".format(self.td.get('T', ''))
|
||||
self.target.copyFrom(binary, dest)
|
||||
output = subprocess.check_output("readelf -h {}".format(dest), shell=True).decode()
|
||||
os.remove(dest)
|
||||
|
||||
l = [l.split()[1] for l in output.split('\n') if "Class:" in l]
|
||||
if l:
|
||||
theclass = l[0]
|
||||
else:
|
||||
self.fail('Cannot parse readelf. Output:\n%s' % output)
|
||||
|
||||
msg = "%s isn't %s (is %s)" % (binary, arch, theclass)
|
||||
self.assertEqual(theclass, arch, msg=msg)
|
||||
|
||||
@skipIfNotInDataVar('MULTILIBS', 'multilib:lib32',
|
||||
"This isn't a multilib:lib32 image")
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['lib32-libc6'])
|
||||
def test_check_multilib_libc(self):
|
||||
"""
|
||||
Check that a multilib image has both 32-bit and 64-bit libc in.
|
||||
"""
|
||||
self.archtest("/lib/libc.so.6", "ELF32")
|
||||
self.archtest("/lib64/libc.so.6", "ELF64")
|
||||
|
||||
@OETestDepends(['multilib.MultilibTest.test_check_multilib_libc'])
|
||||
@OEHasPackage(['lib32-connman'])
|
||||
def test_file_connman(self):
|
||||
self.archtest("/usr/sbin/connmand", "ELF32")
|
||||
@@ -0,0 +1,138 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfDataVar
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
import time
|
||||
|
||||
class SyslogTest(OERuntimeTestCase):
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(["busybox-syslog", "sysklogd", "rsyslog", "syslog-ng"])
|
||||
def test_syslog_running(self):
|
||||
status, output = self.target.run(self.tc.target_cmds['ps'])
|
||||
msg = "Failed to execute %s" % self.tc.target_cmds['ps']
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
msg = "No syslog daemon process; %s output:\n%s" % (self.tc.target_cmds['ps'], output)
|
||||
hasdaemon = "syslogd" in output or "syslog-ng" in output or "svlogd" in output
|
||||
self.assertTrue(hasdaemon, msg=msg)
|
||||
|
||||
class SyslogTestConfig(OERuntimeTestCase):
|
||||
|
||||
def verif_not_running(self, pids):
|
||||
for pid in pids:
|
||||
status, err_output = self.target.run('kill -0 %s' %pid)
|
||||
if not status:
|
||||
self.logger.debug("previous %s is still running" %pid)
|
||||
return 1
|
||||
|
||||
def verify_running(self, names):
|
||||
pids = []
|
||||
for name in names:
|
||||
status, pid = self.target.run('pidof %s' %name)
|
||||
if status:
|
||||
self.logger.debug("%s is not running" %name)
|
||||
return 1, pids
|
||||
pids.append(pid)
|
||||
return 0, pids
|
||||
|
||||
|
||||
def restart_sanity(self, names, restart_cmd, pidchange=True):
|
||||
status, original_pids = self.verify_running(names)
|
||||
if status:
|
||||
return False
|
||||
|
||||
status, output = self.target.run(restart_cmd)
|
||||
|
||||
msg = ('Could not restart %s service. Status and output: %s and %s' % (names, status, output))
|
||||
self.assertEqual(status, 0, msg)
|
||||
|
||||
if not pidchange:
|
||||
return True
|
||||
|
||||
# Always check for an error, most likely a race between shutting down and starting up
|
||||
timeout = time.time() + 30
|
||||
|
||||
restarted = False
|
||||
status = ""
|
||||
while time.time() < timeout:
|
||||
# Verify the previous ones are no longer running
|
||||
status = self.verif_not_running(original_pids)
|
||||
if status:
|
||||
status = "Original syslog processes still running"
|
||||
continue
|
||||
|
||||
status, pids = self.verify_running(names)
|
||||
if status:
|
||||
status = "New syslog processes not running"
|
||||
continue
|
||||
|
||||
# Everything is fine now, so exit to continue the test
|
||||
restarted = True
|
||||
break
|
||||
|
||||
msg = ('%s didn\'t appear to restart: %s' % (names, status))
|
||||
self.assertTrue(restarted, msg)
|
||||
|
||||
return True
|
||||
|
||||
@OETestDepends(['oe_syslog.SyslogTest.test_syslog_running'])
|
||||
def test_syslog_logger(self):
|
||||
status, output = self.target.run('logger foobar')
|
||||
msg = "Can't log into syslog. Output: %s " % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
# There is no way to flush the logger to disk in all cases
|
||||
time.sleep(1)
|
||||
|
||||
status, output = self.target.run('grep foobar /var/log/messages')
|
||||
if status != 0:
|
||||
if self.tc.td.get("VIRTUAL-RUNTIME_init_manager") == "systemd":
|
||||
status, output = self.target.run('journalctl -o cat | grep foobar')
|
||||
else:
|
||||
status, output = self.target.run('logread | grep foobar')
|
||||
msg = ('Test log string not found in /var/log/messages or logread.'
|
||||
' Output: %s ' % output)
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
|
||||
@OETestDepends(['oe_syslog.SyslogTest.test_syslog_running'])
|
||||
def test_syslog_restart(self):
|
||||
if self.restart_sanity(['systemd-journald'], 'systemctl restart syslog.service', pidchange=False):
|
||||
pass
|
||||
elif self.restart_sanity(['rsyslogd'], '/etc/init.d/rsyslog restart'):
|
||||
pass
|
||||
elif self.restart_sanity(['syslogd', 'klogd'], '/etc/init.d/syslog restart'):
|
||||
pass
|
||||
else:
|
||||
self.logger.info("No syslog found to restart, ignoring")
|
||||
|
||||
|
||||
@OETestDepends(['oe_syslog.SyslogTestConfig.test_syslog_logger'])
|
||||
@OEHasPackage(["busybox-syslog"])
|
||||
@skipIfDataVar('VIRTUAL-RUNTIME_init_manager', 'systemd',
|
||||
'Not appropiate for systemd image')
|
||||
def test_syslog_startup_config(self):
|
||||
cmd = 'echo "LOGFILE=/var/log/test" >> /etc/syslog-startup.conf'
|
||||
self.target.run(cmd)
|
||||
|
||||
self.test_syslog_restart()
|
||||
|
||||
cmd = 'logger foobar'
|
||||
status, output = self.target.run(cmd)
|
||||
msg = 'Logger command failed, %s. Output: %s ' % (status, output)
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
cmd = 'cat /var/log/test'
|
||||
status, output = self.target.run(cmd)
|
||||
if "foobar" not in output or status:
|
||||
self.fail("'foobar' not found in logfile, status %s, contents %s" % (status, output))
|
||||
|
||||
cmd = "sed -i 's#LOGFILE=/var/log/test##' /etc/syslog-startup.conf"
|
||||
self.target.run(cmd)
|
||||
self.test_syslog_restart()
|
||||
@@ -0,0 +1,60 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
from oeqa.utils.httpserver import HTTPService
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature, skipIfFeature
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class OpkgTest(OERuntimeTestCase):
|
||||
|
||||
def pkg(self, command, expected = 0):
|
||||
command = 'opkg %s' % command
|
||||
status, output = self.target.run(command, 1500)
|
||||
message = os.linesep.join([command, output])
|
||||
self.assertEqual(status, expected, message)
|
||||
return output
|
||||
|
||||
class OpkgRepoTest(OpkgTest):
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
allarchfeed = 'all'
|
||||
if cls.tc.td["MULTILIB_VARIANTS"]:
|
||||
allarchfeed = cls.tc.td["TUNE_PKGARCH"]
|
||||
service_repo = os.path.join(cls.tc.td['DEPLOY_DIR_IPK'], allarchfeed)
|
||||
cls.repo_server = HTTPService(service_repo,
|
||||
'0.0.0.0', port=cls.tc.target.server_port,
|
||||
logger=cls.tc.logger)
|
||||
cls.repo_server.start()
|
||||
|
||||
@classmethod
|
||||
def tearDown(cls):
|
||||
cls.repo_server.stop()
|
||||
|
||||
def setup_source_config_for_package_install(self):
|
||||
apt_get_source_server = 'http://%s:%s/' % (self.tc.target.server_ip, self.repo_server.port)
|
||||
apt_get_sourceslist_dir = '/etc/opkg/'
|
||||
self.target.run('cd %s; echo src/gz all %s >> opkg.conf' % (apt_get_sourceslist_dir, apt_get_source_server))
|
||||
|
||||
def cleanup_source_config_for_package_install(self):
|
||||
apt_get_sourceslist_dir = '/etc/opkg/'
|
||||
self.target.run('cd %s; sed -i "/^src/d" opkg.conf' % (apt_get_sourceslist_dir))
|
||||
|
||||
@skipIfNotFeature('package-management',
|
||||
'Test requires package-management to be in IMAGE_FEATURES')
|
||||
@skipIfNotDataVar('IMAGE_PKGTYPE', 'ipk',
|
||||
'IPK is not the primary package manager')
|
||||
@skipIfFeature('read-only-rootfs',
|
||||
'Test does not work with read-only-rootfs in IMAGE_FEATURES')
|
||||
@OEHasPackage(['opkg'])
|
||||
def test_opkg_install_from_repo(self):
|
||||
self.setup_source_config_for_package_install()
|
||||
self.pkg('update')
|
||||
self.pkg('remove run-postinsts-dev')
|
||||
self.pkg('install run-postinsts-dev')
|
||||
self.cleanup_source_config_for_package_install()
|
||||
@@ -0,0 +1,40 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=287 testcase
|
||||
# Note that the image under test must have "pam" in DISTRO_FEATURES
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfNotFeature
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class PamBasicTest(OERuntimeTestCase):
|
||||
|
||||
@skipIfNotFeature('pam', 'Test requires pam to be in DISTRO_FEATURES')
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['shadow'])
|
||||
@OEHasPackage(['shadow-base'])
|
||||
def test_pam(self):
|
||||
status, output = self.target.run('login --help')
|
||||
msg = ('login command does not work as expected. '
|
||||
'Status and output:%s and %s' % (status, output))
|
||||
self.assertEqual(status, 1, msg = msg)
|
||||
|
||||
status, output = self.target.run('passwd --help')
|
||||
msg = ('passwd command does not work as expected. '
|
||||
'Status and output:%s and %s' % (status, output))
|
||||
self.assertEqual(status, 0, msg = msg)
|
||||
|
||||
status, output = self.target.run('su --help')
|
||||
msg = ('su command does not work as expected. '
|
||||
'Status and output:%s and %s' % (status, output))
|
||||
self.assertEqual(status, 0, msg = msg)
|
||||
|
||||
status, output = self.target.run('useradd --help')
|
||||
msg = ('useradd command does not work as expected. '
|
||||
'Status and output:%s and %s' % (status, output))
|
||||
self.assertEqual(status, 0, msg = msg)
|
||||
@@ -0,0 +1,393 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from subprocess import check_output
|
||||
from shutil import rmtree
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfDataVar
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
#in the future these lists could be moved outside of module
|
||||
errors = ["error", "cannot", "can\'t", "failed"]
|
||||
|
||||
common_errors = [
|
||||
"(WW) warning, (EE) error, (NI) not implemented, (??) unknown.",
|
||||
"dma timeout",
|
||||
"can\'t add hid device:",
|
||||
"usbhid: probe of ",
|
||||
"_OSC failed (AE_ERROR)",
|
||||
"_OSC failed (AE_SUPPORT)",
|
||||
"AE_ALREADY_EXISTS",
|
||||
"ACPI _OSC request failed (AE_SUPPORT)",
|
||||
"can\'t disable ASPM",
|
||||
"Failed to load module \"vesa\"",
|
||||
"Failed to load module vesa",
|
||||
"Failed to load module \"modesetting\"",
|
||||
"Failed to load module modesetting",
|
||||
"Failed to load module \"glx\"",
|
||||
"Failed to load module \"fbdev\"",
|
||||
"Failed to load module fbdev",
|
||||
"Failed to load module glx",
|
||||
"[drm] Cannot find any crtc or sizes",
|
||||
"_OSC failed (AE_NOT_FOUND); disabling ASPM",
|
||||
"Open ACPI failed (/var/run/acpid.socket) (No such file or directory)",
|
||||
"NX (Execute Disable) protection cannot be enabled: non-PAE kernel!",
|
||||
"hd.: possibly failed opcode",
|
||||
'NETLINK INITIALIZATION FAILED',
|
||||
'kernel: Cannot find map file',
|
||||
'omap_hwmod: debugss: _wait_target_disable failed',
|
||||
'VGA arbiter: cannot open kernel arbiter, no multi-card support',
|
||||
'Failed to find URL:http://ipv4.connman.net/online/status.html',
|
||||
'Online check failed for',
|
||||
'netlink init failed',
|
||||
'Fast TSC calibration',
|
||||
"BAR 0-9",
|
||||
"Failed to load module \"ati\"",
|
||||
"controller can't do DEVSLP, turning off",
|
||||
"stmmac_dvr_probe: warning: cannot get CSR clock",
|
||||
"error: couldn\'t mount because of unsupported optional features",
|
||||
"GPT: Use GNU Parted to correct GPT errors",
|
||||
"Cannot set xattr user.Librepo.DownloadInProgress",
|
||||
"Failed to read /var/lib/nfs/statd/state: Success",
|
||||
"error retry time-out =",
|
||||
"logind: cannot setup systemd-logind helper (-61), using legacy fallback",
|
||||
"Failed to rename network interface",
|
||||
"Failed to process device, ignoring: Device or resource busy",
|
||||
"Cannot find a map file",
|
||||
"[rdrand]: Initialization Failed",
|
||||
"[rndr ]: Initialization Failed",
|
||||
"[pulseaudio] authkey.c: Failed to open cookie file",
|
||||
"[pulseaudio] authkey.c: Failed to load authentication key",
|
||||
"was skipped because of a failed condition check",
|
||||
"was skipped because all trigger condition checks failed",
|
||||
"xf86OpenConsole: Switching VT failed",
|
||||
"Failed to read LoaderConfigTimeoutOneShot variable, ignoring: Operation not supported",
|
||||
"Failed to read LoaderEntryOneShot variable, ignoring: Operation not supported",
|
||||
]
|
||||
|
||||
video_related = [
|
||||
]
|
||||
|
||||
x86_common = [
|
||||
'[drm:psb_do_init] *ERROR* Debug is',
|
||||
'wrong ELF class',
|
||||
'Could not enable PowerButton event',
|
||||
'probe of LNXPWRBN:00 failed with error -22',
|
||||
'pmd_set_huge: Cannot satisfy',
|
||||
'failed to setup card detect gpio',
|
||||
'amd_nb: Cannot enumerate AMD northbridges',
|
||||
'failed to retrieve link info, disabling eDP',
|
||||
'Direct firmware load for iwlwifi',
|
||||
'Direct firmware load for regulatory.db',
|
||||
'failed to load regulatory.db',
|
||||
] + common_errors
|
||||
|
||||
qemux86_common = [
|
||||
'wrong ELF class',
|
||||
"fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.",
|
||||
"can't claim BAR ",
|
||||
'amd_nb: Cannot enumerate AMD northbridges',
|
||||
'tsc: HPET/PMTIMER calibration failed',
|
||||
"modeset(0): Failed to initialize the DRI2 extension",
|
||||
"glamor initialization failed",
|
||||
"blk_update_request: I/O error, dev fd0, sector 0 op 0x0:(READ)",
|
||||
"floppy: error",
|
||||
'failed to IDENTIFY (I/O error, err_mask=0x4)',
|
||||
] + common_errors
|
||||
|
||||
ignore_errors = {
|
||||
'default' : common_errors,
|
||||
'qemux86' : [
|
||||
'Failed to access perfctr msr (MSR',
|
||||
'pci 0000:00:00.0: [Firmware Bug]: reg 0x..: invalid BAR (can\'t size)',
|
||||
] + qemux86_common,
|
||||
'qemux86-64' : qemux86_common,
|
||||
'qemumips' : [
|
||||
'Failed to load module "glx"',
|
||||
'pci 0000:00:00.0: [Firmware Bug]: reg 0x..: invalid BAR (can\'t size)',
|
||||
'cacheinfo: Failed to find cpu0 device node',
|
||||
] + common_errors,
|
||||
'qemumips64' : [
|
||||
'pci 0000:00:00.0: [Firmware Bug]: reg 0x..: invalid BAR (can\'t size)',
|
||||
'cacheinfo: Failed to find cpu0 device node',
|
||||
] + common_errors,
|
||||
'qemuppc' : [
|
||||
'PCI 0000:00 Cannot reserve Legacy IO [io 0x0000-0x0fff]',
|
||||
'host side 80-wire cable detection failed, limiting max speed',
|
||||
'mode "640x480" test failed',
|
||||
'Failed to load module "glx"',
|
||||
'can\'t handle BAR above 4GB',
|
||||
'Cannot reserve Legacy IO',
|
||||
] + common_errors,
|
||||
'qemuppc64' : [
|
||||
'vio vio: uevent: failed to send synthetic uevent',
|
||||
'synth uevent: /devices/vio: failed to send uevent',
|
||||
'PCI 0000:00 Cannot reserve Legacy IO [io 0x10000-0x10fff]',
|
||||
] + common_errors,
|
||||
'qemuarmv5' : [
|
||||
'mmci-pl18x: probe of fpga:05 failed with error -22',
|
||||
'mmci-pl18x: probe of fpga:0b failed with error -22',
|
||||
'Failed to load module "glx"',
|
||||
'OF: amba_device_add() failed (-19) for /amba/smc@10100000',
|
||||
'OF: amba_device_add() failed (-19) for /amba/mpmc@10110000',
|
||||
'OF: amba_device_add() failed (-19) for /amba/sctl@101e0000',
|
||||
'OF: amba_device_add() failed (-19) for /amba/watchdog@101e1000',
|
||||
'OF: amba_device_add() failed (-19) for /amba/sci@101f0000',
|
||||
'OF: amba_device_add() failed (-19) for /amba/spi@101f4000',
|
||||
'OF: amba_device_add() failed (-19) for /amba/ssp@101f4000',
|
||||
'OF: amba_device_add() failed (-19) for /amba/fpga/sci@a000',
|
||||
'Failed to initialize \'/amba/timer@101e3000\': -22',
|
||||
'jitterentropy: Initialization failed with host not compliant with requirements: 2',
|
||||
'clcd-pl11x: probe of 10120000.display failed with error -2',
|
||||
'arm-charlcd 10008000.lcd: error -ENXIO: IRQ index 0 not found'
|
||||
] + common_errors,
|
||||
'qemuarm64' : [
|
||||
'Fatal server error:',
|
||||
'(EE) Server terminated with error (1). Closing log file.',
|
||||
'dmi: Firmware registration failed.',
|
||||
'irq: type mismatch, failed to map hwirq-27 for /intc',
|
||||
'logind: failed to get session seat',
|
||||
] + common_errors,
|
||||
'intel-core2-32' : [
|
||||
'ACPI: No _BQC method, cannot determine initial brightness',
|
||||
'[Firmware Bug]: ACPI: No _BQC method, cannot determine initial brightness',
|
||||
'(EE) Failed to load module "psb"',
|
||||
'(EE) Failed to load module psb',
|
||||
'(EE) Failed to load module "psbdrv"',
|
||||
'(EE) Failed to load module psbdrv',
|
||||
'(EE) open /dev/fb0: No such file or directory',
|
||||
'(EE) AIGLX: reverting to software rendering',
|
||||
'dmi: Firmware registration failed.',
|
||||
'ioremap error for 0x78',
|
||||
] + x86_common,
|
||||
'intel-corei7-64' : [
|
||||
'can\'t set Max Payload Size to 256',
|
||||
'intel_punit_ipc: can\'t request region for resource',
|
||||
'[drm] parse error at position 4 in video mode \'efifb\'',
|
||||
'ACPI Error: Could not enable RealTimeClock event',
|
||||
'ACPI Warning: Could not enable fixed event - RealTimeClock',
|
||||
'hci_intel INT33E1:00: Unable to retrieve gpio',
|
||||
'hci_intel: probe of INT33E1:00 failed',
|
||||
'can\'t derive routing for PCI INT A',
|
||||
'failed to read out thermal zone',
|
||||
'Bluetooth: hci0: Setting Intel event mask failed',
|
||||
'ttyS2 - failed to request DMA',
|
||||
'Bluetooth: hci0: Failed to send firmware data (-38)',
|
||||
'atkbd serio0: Failed to enable keyboard on isa0060/serio0',
|
||||
] + x86_common,
|
||||
'genericx86' : x86_common,
|
||||
'genericx86-64' : [
|
||||
'Direct firmware load for i915',
|
||||
'Failed to load firmware i915',
|
||||
'Failed to fetch GuC',
|
||||
'Failed to initialize GuC',
|
||||
'Failed to load DMC firmware',
|
||||
'The driver is built-in, so to load the firmware you need to',
|
||||
] + x86_common,
|
||||
'edgerouter' : [
|
||||
'not creating \'/sys/firmware/fdt\'',
|
||||
'Failed to find cpu0 device node',
|
||||
'Fatal server error:',
|
||||
'Server terminated with error',
|
||||
] + common_errors,
|
||||
'beaglebone-yocto' : [
|
||||
'Direct firmware load for regulatory.db',
|
||||
'failed to load regulatory.db',
|
||||
'l4_wkup_cm',
|
||||
'Failed to load module "glx"',
|
||||
'Failed to make EGL context current',
|
||||
'glamor initialization failed',
|
||||
] + common_errors,
|
||||
}
|
||||
|
||||
log_locations = ["/var/log/","/var/log/dmesg", "/tmp/dmesg_output.log"]
|
||||
|
||||
class ParseLogsTest(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.errors = errors
|
||||
|
||||
# When systemd is enabled we need to notice errors on
|
||||
# circular dependencies in units.
|
||||
if 'systemd' in cls.td.get('DISTRO_FEATURES', ''):
|
||||
cls.errors.extend([
|
||||
'Found ordering cycle on',
|
||||
'Breaking ordering cycle by deleting job',
|
||||
'deleted to break ordering cycle',
|
||||
'Ordering cycle found, skipping',
|
||||
])
|
||||
|
||||
cls.ignore_errors = ignore_errors
|
||||
cls.log_locations = log_locations
|
||||
cls.msg = ''
|
||||
is_lsb, _ = cls.tc.target.run("which LSB_Test.sh")
|
||||
if is_lsb == 0:
|
||||
for machine in cls.ignore_errors:
|
||||
cls.ignore_errors[machine] = cls.ignore_errors[machine] \
|
||||
+ video_related
|
||||
|
||||
def getMachine(self):
|
||||
return self.td.get('MACHINE', '')
|
||||
|
||||
def getWorkdir(self):
|
||||
return self.td.get('WORKDIR', '')
|
||||
|
||||
# Get some information on the CPU of the machine to display at the
|
||||
# beginning of the output. This info might be useful in some cases.
|
||||
def getHardwareInfo(self):
|
||||
hwi = ""
|
||||
cmd = ('cat /proc/cpuinfo | grep "model name" | head -n1 | '
|
||||
" awk 'BEGIN{FS=\":\"}{print $2}'")
|
||||
_, cpu_name = self.target.run(cmd)
|
||||
|
||||
cmd = ('cat /proc/cpuinfo | grep "cpu cores" | head -n1 | '
|
||||
"awk {'print $4'}")
|
||||
_, cpu_physical_cores = self.target.run(cmd)
|
||||
|
||||
cmd = 'cat /proc/cpuinfo | grep "processor" | wc -l'
|
||||
_, cpu_logical_cores = self.target.run(cmd)
|
||||
|
||||
_, cpu_arch = self.target.run('uname -m')
|
||||
|
||||
hwi += 'Machine information: \n'
|
||||
hwi += '*******************************\n'
|
||||
hwi += 'Machine name: ' + self.getMachine() + '\n'
|
||||
hwi += 'CPU: ' + str(cpu_name) + '\n'
|
||||
hwi += 'Arch: ' + str(cpu_arch)+ '\n'
|
||||
hwi += 'Physical cores: ' + str(cpu_physical_cores) + '\n'
|
||||
hwi += 'Logical cores: ' + str(cpu_logical_cores) + '\n'
|
||||
hwi += '*******************************\n'
|
||||
|
||||
return hwi
|
||||
|
||||
# Go through the log locations provided and if it's a folder
|
||||
# create a list with all the .log files in it, if it's a file
|
||||
# just add it to that list.
|
||||
def getLogList(self, log_locations):
|
||||
logs = []
|
||||
for location in log_locations:
|
||||
status, _ = self.target.run('test -f ' + str(location))
|
||||
if status == 0:
|
||||
logs.append(str(location))
|
||||
else:
|
||||
status, _ = self.target.run('test -d ' + str(location))
|
||||
if status == 0:
|
||||
cmd = 'find ' + str(location) + '/*.log -maxdepth 1 -type f'
|
||||
status, output = self.target.run(cmd)
|
||||
if status == 0:
|
||||
output = output.splitlines()
|
||||
for logfile in output:
|
||||
logs.append(os.path.join(location, str(logfile)))
|
||||
return logs
|
||||
|
||||
# Copy the log files to be parsed locally
|
||||
def transfer_logs(self, log_list):
|
||||
workdir = self.getWorkdir()
|
||||
self.target_logs = workdir + '/' + 'target_logs'
|
||||
target_logs = self.target_logs
|
||||
if os.path.exists(target_logs):
|
||||
rmtree(self.target_logs)
|
||||
os.makedirs(target_logs)
|
||||
for f in log_list:
|
||||
self.target.copyFrom(str(f), target_logs)
|
||||
|
||||
# Get the local list of logs
|
||||
def get_local_log_list(self, log_locations):
|
||||
self.transfer_logs(self.getLogList(log_locations))
|
||||
list_dir = os.listdir(self.target_logs)
|
||||
dir_files = [os.path.join(self.target_logs, f) for f in list_dir]
|
||||
logs = [f for f in dir_files if os.path.isfile(f)]
|
||||
return logs
|
||||
|
||||
# Build the grep command to be used with filters and exclusions
|
||||
def build_grepcmd(self, errors, ignore_errors, log):
|
||||
grepcmd = 'grep '
|
||||
grepcmd += '-Ei "'
|
||||
for error in errors:
|
||||
grepcmd += r'\<' + error + r'\>' + '|'
|
||||
grepcmd = grepcmd[:-1]
|
||||
grepcmd += '" ' + str(log) + " | grep -Eiv \'"
|
||||
|
||||
try:
|
||||
errorlist = ignore_errors[self.getMachine()]
|
||||
except KeyError:
|
||||
self.msg += 'No ignore list found for this machine, using default\n'
|
||||
errorlist = ignore_errors['default']
|
||||
|
||||
for ignore_error in errorlist:
|
||||
ignore_error = ignore_error.replace('(', r'\(')
|
||||
ignore_error = ignore_error.replace(')', r'\)')
|
||||
ignore_error = ignore_error.replace("'", '.')
|
||||
ignore_error = ignore_error.replace('?', r'\?')
|
||||
ignore_error = ignore_error.replace('[', r'\[')
|
||||
ignore_error = ignore_error.replace(']', r'\]')
|
||||
ignore_error = ignore_error.replace('*', r'\*')
|
||||
ignore_error = ignore_error.replace('0-9', '[0-9]')
|
||||
grepcmd += ignore_error + '|'
|
||||
grepcmd = grepcmd[:-1]
|
||||
grepcmd += "\'"
|
||||
|
||||
return grepcmd
|
||||
|
||||
# Grep only the errors so that their context could be collected.
|
||||
# Default context is 10 lines before and after the error itself
|
||||
def parse_logs(self, errors, ignore_errors, logs,
|
||||
lines_before = 10, lines_after = 10):
|
||||
results = {}
|
||||
rez = []
|
||||
grep_output = ''
|
||||
|
||||
for log in logs:
|
||||
result = None
|
||||
thegrep = self.build_grepcmd(errors, ignore_errors, log)
|
||||
|
||||
try:
|
||||
result = check_output(thegrep, shell=True).decode('utf-8')
|
||||
except:
|
||||
pass
|
||||
|
||||
if result is not None:
|
||||
results[log] = {}
|
||||
rez = result.splitlines()
|
||||
|
||||
for xrez in rez:
|
||||
try:
|
||||
cmd = ['grep', '-F', xrez, '-B', str(lines_before)]
|
||||
cmd += ['-A', str(lines_after), log]
|
||||
grep_output = check_output(cmd).decode('utf-8')
|
||||
except:
|
||||
pass
|
||||
results[log][xrez]=grep_output
|
||||
|
||||
return results
|
||||
|
||||
# Get the output of dmesg and write it in a file.
|
||||
# This file is added to log_locations.
|
||||
def write_dmesg(self):
|
||||
(status, dmesg) = self.target.run('dmesg > /tmp/dmesg_output.log')
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_parselogs(self):
|
||||
self.write_dmesg()
|
||||
log_list = self.get_local_log_list(self.log_locations)
|
||||
result = self.parse_logs(self.errors, self.ignore_errors, log_list)
|
||||
print(self.getHardwareInfo())
|
||||
errcount = 0
|
||||
for log in result:
|
||||
self.msg += 'Log: ' + log + '\n'
|
||||
self.msg += '-----------------------\n'
|
||||
for error in result[log]:
|
||||
errcount += 1
|
||||
self.msg += 'Central error: ' + str(error) + '\n'
|
||||
self.msg += '***********************\n'
|
||||
self.msg += result[str(log)][str(error)] + '\n'
|
||||
self.msg += '***********************\n'
|
||||
self.msg += '%s errors found in logs.' % errcount
|
||||
self.assertEqual(errcount, 0, msg=self.msg)
|
||||
@@ -0,0 +1,19 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class PerlTest(OERuntimeTestCase):
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['perl'])
|
||||
def test_perl_works(self):
|
||||
status, output = self.target.run("perl -e '$_=\"Uryyb, jbeyq\"; tr/a-zA-Z/n-za-mN-ZA-M/;print'")
|
||||
self.assertEqual(status, 0)
|
||||
self.assertEqual(output, "Hello, world")
|
||||
@@ -0,0 +1,35 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from subprocess import Popen, PIPE
|
||||
from time import sleep
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.oetimeout import OETimeout
|
||||
from oeqa.core.exception import OEQATimeoutError
|
||||
|
||||
class PingTest(OERuntimeTestCase):
|
||||
|
||||
@OETimeout(30)
|
||||
def test_ping(self):
|
||||
output = ''
|
||||
count = 0
|
||||
self.assertNotEqual(len(self.target.ip), 0, msg="No target IP address set")
|
||||
try:
|
||||
while count < 5:
|
||||
cmd = 'ping -c 1 %s' % self.target.ip
|
||||
proc = Popen(cmd, shell=True, stdout=PIPE)
|
||||
output += proc.communicate()[0].decode('utf-8')
|
||||
if proc.poll() == 0:
|
||||
count += 1
|
||||
else:
|
||||
count = 0
|
||||
sleep(1)
|
||||
except OEQATimeoutError:
|
||||
self.fail("Ping timeout error for address %s, count %s, output: %s" % (self.target.ip, count, output))
|
||||
msg = ('Expected 5 consecutive, got %d.\n'
|
||||
'ping output is:\n%s' % (count,output))
|
||||
self.assertEqual(count, 5, msg = msg)
|
||||
@@ -0,0 +1,120 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import unittest
|
||||
import pprint
|
||||
import datetime
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfNotFeature
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
from oeqa.utils.logparser import PtestParser
|
||||
|
||||
class PtestRunnerTest(OERuntimeTestCase):
|
||||
|
||||
@skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['ptest-runner'])
|
||||
@unittest.expectedFailure
|
||||
def test_ptestrunner_expectfail(self):
|
||||
if not self.td.get('PTEST_EXPECT_FAILURE'):
|
||||
self.skipTest('Cannot run ptests with @expectedFailure as ptests are required to pass')
|
||||
self.do_ptestrunner()
|
||||
|
||||
@skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['ptest-runner'])
|
||||
def test_ptestrunner_expectsuccess(self):
|
||||
if self.td.get('PTEST_EXPECT_FAILURE'):
|
||||
self.skipTest('Cannot run ptests without @expectedFailure as ptests are expected to fail')
|
||||
self.do_ptestrunner()
|
||||
|
||||
def do_ptestrunner(self):
|
||||
status, output = self.target.run('which ptest-runner', 0)
|
||||
if status != 0:
|
||||
self.skipTest("No -ptest packages are installed in the image")
|
||||
|
||||
test_log_dir = self.td.get('TEST_LOG_DIR', '')
|
||||
# The TEST_LOG_DIR maybe NULL when testimage is added after
|
||||
# testdata.json is generated.
|
||||
if not test_log_dir:
|
||||
test_log_dir = os.path.join(self.td.get('WORKDIR', ''), 'testimage')
|
||||
# Make the test output path absolute, otherwise the output content will be
|
||||
# created relative to current directory
|
||||
if not os.path.isabs(test_log_dir):
|
||||
test_log_dir = os.path.join(self.td.get('TOPDIR', ''), test_log_dir)
|
||||
# Don't use self.td.get('DATETIME'), it's from testdata.json, not
|
||||
# up-to-date, and may cause "File exists" when re-reun.
|
||||
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
|
||||
ptest_log_dir_link = os.path.join(test_log_dir, 'ptest_log')
|
||||
ptest_log_dir = '%s.%s' % (ptest_log_dir_link, timestamp)
|
||||
ptest_runner_log = os.path.join(ptest_log_dir, 'ptest-runner.log')
|
||||
|
||||
libdir = self.td.get('libdir', '')
|
||||
ptest_dirs = [ '/usr/lib' ]
|
||||
if not libdir in ptest_dirs:
|
||||
ptest_dirs.append(libdir)
|
||||
status, output = self.target.run('ptest-runner -t 450 -d \"{}\"'.format(' '.join(ptest_dirs)), 0)
|
||||
os.makedirs(ptest_log_dir)
|
||||
with open(ptest_runner_log, 'w') as f:
|
||||
f.write(output)
|
||||
|
||||
# status != 0 is OK since some ptest tests may fail
|
||||
self.assertTrue(status != 127, msg="Cannot execute ptest-runner!")
|
||||
|
||||
if not hasattr(self.tc, "extraresults"):
|
||||
self.tc.extraresults = {}
|
||||
extras = self.tc.extraresults
|
||||
extras['ptestresult.rawlogs'] = {'log': output}
|
||||
|
||||
# Parse and save results
|
||||
parser = PtestParser()
|
||||
results, sections = parser.parse(ptest_runner_log)
|
||||
parser.results_as_files(ptest_log_dir)
|
||||
if os.path.exists(ptest_log_dir_link):
|
||||
# Remove the old link to create a new one
|
||||
os.remove(ptest_log_dir_link)
|
||||
os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)
|
||||
|
||||
extras['ptestresult.sections'] = sections
|
||||
|
||||
zerolength = []
|
||||
trans = str.maketrans("()", "__")
|
||||
for section in results:
|
||||
for test in results[section]:
|
||||
result = results[section][test]
|
||||
testname = "ptestresult." + (section or "No-section") + "." + "_".join(test.translate(trans).split())
|
||||
extras[testname] = {'status': result}
|
||||
if not results[section]:
|
||||
zerolength.append(section)
|
||||
|
||||
failed_tests = {}
|
||||
|
||||
for section in sections:
|
||||
if 'exitcode' in sections[section].keys():
|
||||
failed_tests[section] = sections[section]["log"]
|
||||
|
||||
for section in results:
|
||||
failed_testcases = [ "_".join(test.translate(trans).split()) for test in results[section] if results[section][test] == 'FAILED' ]
|
||||
if failed_testcases:
|
||||
failed_tests[section] = failed_testcases
|
||||
|
||||
failmsg = ""
|
||||
status, output = self.target.run('dmesg | grep "Killed process"', 0)
|
||||
if output:
|
||||
failmsg = "ERROR: Processes were killed by the OOM Killer:\n%s\n" % output
|
||||
|
||||
if failed_tests:
|
||||
failmsg = failmsg + "\nFailed ptests:\n%s\n" % pprint.pformat(failed_tests)
|
||||
|
||||
if zerolength:
|
||||
failmsg = failmsg + "\nptests which had no test results:\n%s" % pprint.pformat(zerolength)
|
||||
|
||||
if failmsg:
|
||||
self.logger.warning("There were failing ptests.")
|
||||
self.fail(failmsg)
|
||||
@@ -0,0 +1,21 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class PythonTest(OERuntimeTestCase):
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['python3-core'])
|
||||
def test_python3(self):
|
||||
cmd = "python3 -c \"import codecs; print(codecs.encode('Uryyb, jbeyq', 'rot13'))\""
|
||||
status, output = self.target.run(cmd)
|
||||
msg = 'Exit status was not 0. Output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
msg = 'Incorrect output: %s' % output
|
||||
self.assertEqual(output, "Hello, world", msg=msg)
|
||||
@@ -0,0 +1,145 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
import fnmatch
|
||||
import time
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfDataVar
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
from oeqa.core.utils.path import findFile
|
||||
|
||||
class RpmBasicTest(OERuntimeTestCase):
|
||||
|
||||
@OEHasPackage(['rpm'])
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_rpm_help(self):
|
||||
status, output = self.target.run('rpm --help')
|
||||
msg = 'status and output: %s and %s' % (status, output)
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
@OETestDepends(['rpm.RpmBasicTest.test_rpm_help'])
|
||||
def test_rpm_query(self):
|
||||
status, output = self.target.run('ls /var/lib/rpm/')
|
||||
if status != 0:
|
||||
self.skipTest('No /var/lib/rpm on target')
|
||||
status, output = self.target.run('rpm -q rpm')
|
||||
msg = 'status and output: %s and %s' % (status, output)
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
@OETestDepends(['rpm.RpmBasicTest.test_rpm_query'])
|
||||
def test_rpm_query_nonroot(self):
|
||||
|
||||
def set_up_test_user(u):
|
||||
status, output = self.target.run('id -u %s' % u)
|
||||
if status:
|
||||
status, output = self.target.run('useradd %s' % u)
|
||||
msg = 'Failed to create new user: %s' % output
|
||||
self.assertTrue(status == 0, msg=msg)
|
||||
|
||||
def exec_as_test_user(u):
|
||||
status, output = self.target.run('su -c id %s' % u)
|
||||
msg = 'Failed to execute as new user'
|
||||
self.assertTrue("({0})".format(u) in output, msg=msg)
|
||||
|
||||
status, output = self.target.run('su -c "rpm -qa" %s ' % u)
|
||||
msg = 'status: %s. Cannot run rpm -qa: %s' % (status, output)
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
def wait_for_no_process_for_user(u, timeout = 120):
|
||||
timeout_at = time.time() + timeout
|
||||
while time.time() < timeout_at:
|
||||
_, output = self.target.run(self.tc.target_cmds['ps'])
|
||||
if u + ' ' not in output:
|
||||
return
|
||||
time.sleep(1)
|
||||
user_pss = [ps for ps in output.split("\n") if u + ' ' in ps]
|
||||
msg = "User %s has processes still running: %s" % (u, "\n".join(user_pss))
|
||||
self.fail(msg=msg)
|
||||
|
||||
def unset_up_test_user(u):
|
||||
# ensure no test1 process in running
|
||||
wait_for_no_process_for_user(u)
|
||||
status, output = self.target.run('userdel -r %s' % u)
|
||||
msg = 'Failed to erase user: %s' % output
|
||||
self.assertTrue(status == 0, msg=msg)
|
||||
|
||||
tuser = 'test1'
|
||||
|
||||
try:
|
||||
set_up_test_user(tuser)
|
||||
exec_as_test_user(tuser)
|
||||
finally:
|
||||
unset_up_test_user(tuser)
|
||||
|
||||
|
||||
class RpmInstallRemoveTest(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
pkgarch = cls.td['TUNE_PKGARCH'].replace('-', '_')
|
||||
rpmdir = os.path.join(cls.tc.td['DEPLOY_DIR'], 'rpm', pkgarch)
|
||||
# Pick base-passwd-doc as a test file to get installed, because it's small
|
||||
# and it will always be built for standard targets
|
||||
rpm_doc = 'base-passwd-doc-*.%s.rpm' % pkgarch
|
||||
if not os.path.exists(rpmdir):
|
||||
return
|
||||
for f in fnmatch.filter(os.listdir(rpmdir), rpm_doc):
|
||||
cls.test_file = os.path.join(rpmdir, f)
|
||||
cls.dst = '/tmp/base-passwd-doc.rpm'
|
||||
|
||||
@OETestDepends(['rpm.RpmBasicTest.test_rpm_query'])
|
||||
def test_rpm_install(self):
|
||||
self.tc.target.copyTo(self.test_file, self.dst)
|
||||
status, output = self.target.run('rpm -ivh /tmp/base-passwd-doc.rpm')
|
||||
msg = 'Failed to install base-passwd-doc package: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
self.tc.target.run('rm -f %s' % self.dst)
|
||||
|
||||
@OETestDepends(['rpm.RpmInstallRemoveTest.test_rpm_install'])
|
||||
def test_rpm_remove(self):
|
||||
status,output = self.target.run('rpm -e base-passwd-doc')
|
||||
msg = 'Failed to remove base-passwd-doc package: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
@OETestDepends(['rpm.RpmInstallRemoveTest.test_rpm_remove'])
|
||||
def test_check_rpm_install_removal_log_file_size(self):
|
||||
"""
|
||||
Summary: Check that rpm writes into /var/log/messages
|
||||
Expected: There should be some RPM prefixed entries in the above file.
|
||||
Product: BSPs
|
||||
Author: Alexandru Georgescu <alexandru.c.georgescu@intel.com>
|
||||
Author: Alexander Kanavin <alex.kanavin@gmail.com>
|
||||
AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
|
||||
"""
|
||||
db_files_cmd = 'ls /var/lib/rpm/rpmdb.sqlite*'
|
||||
check_log_cmd = "grep RPM /var/log/messages | wc -l"
|
||||
|
||||
# Make sure that some database files are under /var/lib/rpm as 'rpmdb.sqlite'
|
||||
status, output = self.target.run(db_files_cmd)
|
||||
msg = 'Failed to find database files under /var/lib/rpm/ as rpmdb.sqlite'
|
||||
self.assertEqual(0, status, msg=msg)
|
||||
|
||||
self.tc.target.copyTo(self.test_file, self.dst)
|
||||
|
||||
# Remove the package just in case
|
||||
self.target.run('rpm -e base-passwd-doc')
|
||||
|
||||
# Install/Remove a package 10 times
|
||||
for i in range(10):
|
||||
status, output = self.target.run('rpm -ivh /tmp/base-passwd-doc.rpm')
|
||||
msg = 'Failed to install base-passwd-doc package. Reason: {}'.format(output)
|
||||
self.assertEqual(0, status, msg=msg)
|
||||
|
||||
status, output = self.target.run('rpm -e base-passwd-doc')
|
||||
msg = 'Failed to remove base-passwd-doc package. Reason: {}'.format(output)
|
||||
self.assertEqual(0, status, msg=msg)
|
||||
|
||||
self.tc.target.run('rm -f %s' % self.dst)
|
||||
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
|
||||
class RtTest(OERuntimeTestCase):
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_is_rt(self):
|
||||
"""
|
||||
Check that the kernel has CONFIG_PREEMPT_RT enabled.
|
||||
"""
|
||||
status, output = self.target.run("uname -a")
|
||||
self.assertEqual(status, 0, msg=output)
|
||||
# Split so we don't get a substring false-positive
|
||||
self.assertIn("PREEMPT_RT", output.split())
|
||||
@@ -0,0 +1,45 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfFeature
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
import re
|
||||
|
||||
class RTCTest(OERuntimeTestCase):
|
||||
|
||||
def setUp(self):
|
||||
if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
|
||||
self.logger.debug('Stopping systemd-timesyncd daemon')
|
||||
self.target.run('systemctl disable --now --runtime systemd-timesyncd')
|
||||
|
||||
def tearDown(self):
|
||||
if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
|
||||
self.logger.debug('Starting systemd-timesyncd daemon')
|
||||
self.target.run('systemctl enable --now --runtime systemd-timesyncd')
|
||||
|
||||
@skipIfFeature('read-only-rootfs',
|
||||
'Test does not work with read-only-rootfs in IMAGE_FEATURES')
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['coreutils', 'busybox'])
|
||||
def test_rtc(self):
|
||||
(status, output) = self.target.run('hwclock -r')
|
||||
self.assertEqual(status, 0, msg='Failed to get RTC time, output: %s' % output)
|
||||
|
||||
(status, current_datetime) = self.target.run('date +"%m%d%H%M%Y"')
|
||||
self.assertEqual(status, 0, msg='Failed to get system current date & time, output: %s' % current_datetime)
|
||||
|
||||
example_datetime = '062309452008'
|
||||
(status, output) = self.target.run('date %s ; hwclock -w ; hwclock -r' % example_datetime)
|
||||
check_hwclock = re.search('2008-06-23 09:45:..', output)
|
||||
self.assertTrue(check_hwclock, msg='The RTC time was not set correctly, output: %s' % output)
|
||||
|
||||
(status, output) = self.target.run('date %s' % current_datetime)
|
||||
self.assertEqual(status, 0, msg='Failed to reset system date & time, output: %s' % output)
|
||||
|
||||
(status, output) = self.target.run('hwclock -w')
|
||||
self.assertEqual(status, 0, msg='Failed to reset RTC time, output: %s' % output)
|
||||
@@ -0,0 +1,27 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
|
||||
import time
|
||||
|
||||
class RunLevel_Test(OERuntimeTestCase):
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_runlevel_3(self):
|
||||
(status, output) = self.target.run("init 3 && sleep 5 && runlevel")
|
||||
runlevel= '5 3'
|
||||
self.assertEqual(output, runlevel, msg='Failed to set current runlevel to runlevel 3, current runlevel : %s' % output[-1])
|
||||
(status, output) = self.target.run("uname -a")
|
||||
self.assertEqual(status, 0, msg='Failed to run uname command, output: %s' % output)
|
||||
|
||||
@OETestDepends(['runlevel.RunLevel_Test.test_runlevel_3'])
|
||||
def test_runlevel_5(self):
|
||||
(status, output) = self.target.run("init 5 && sleep 5 && runlevel")
|
||||
runlevel = '3 5'
|
||||
self.assertEqual(output, runlevel, msg='Failed to set current runlevel to runlevel 5, current runlevel : %s' % output[-1])
|
||||
(status, output) = self.target.run('export DISPLAY=:0 && x11perf -aa10text')
|
||||
self.assertEqual(status, 0, msg='Failed to run 2D graphic test, output: %s' % output)
|
||||
@@ -0,0 +1,62 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class RustCompileTest(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
dst = '/tmp/'
|
||||
src = os.path.join(cls.tc.files_dir, 'test.rs')
|
||||
cls.tc.target.copyTo(src, dst)
|
||||
|
||||
@classmethod
|
||||
def tearDown(cls):
|
||||
files = '/tmp/test.rs /tmp/test'
|
||||
cls.tc.target.run('rm %s' % files)
|
||||
dirs = '/tmp/hello'
|
||||
cls.tc.target.run('rm -r %s' % dirs)
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['rust'])
|
||||
def test_rust_compile(self):
|
||||
status, output = self.target.run('rustc /tmp/test.rs -o /tmp/test')
|
||||
msg = 'rust compile failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
status, output = self.target.run('/tmp/test')
|
||||
msg = 'running compiled file failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['cargo'])
|
||||
def test_cargo_compile(self):
|
||||
status, output = self.target.run('cargo new /tmp/hello')
|
||||
msg = 'cargo new failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
status, output = self.target.run('cargo build --manifest-path=/tmp/hello/Cargo.toml')
|
||||
msg = 'cargo build failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
status, output = self.target.run('cargo run --manifest-path=/tmp/hello/Cargo.toml')
|
||||
msg = 'running compiled file failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
class RustHelloworldTest(OERuntimeTestCase):
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['rust-hello-world'])
|
||||
def test_rusthelloworld(self):
|
||||
cmd = "rust-hello-world"
|
||||
status, output = self.target.run(cmd)
|
||||
msg = 'Exit status was not 0. Output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
msg = 'Incorrect output: %s' % output
|
||||
self.assertEqual(output, "Hello, world!", msg=msg)
|
||||
@@ -0,0 +1,39 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class SconsCompileTest(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
dst = '/tmp/'
|
||||
src = os.path.join(cls.tc.runtime_files_dir, 'hello.c')
|
||||
cls.tc.target.copyTo(src, dst)
|
||||
|
||||
src = os.path.join(cls.tc.runtime_files_dir, 'SConstruct')
|
||||
cls.tc.target.copyTo(src, dst)
|
||||
|
||||
@classmethod
|
||||
def tearDown(cls):
|
||||
files = '/tmp/hello.c /tmp/hello.o /tmp/hello /tmp/SConstruct'
|
||||
cls.tc.target.run('rm %s' % files)
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['gcc'])
|
||||
@OEHasPackage(['python3-scons'])
|
||||
def test_scons_compile(self):
|
||||
status, output = self.target.run('cd /tmp/ && scons')
|
||||
msg = 'scons compile failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
status, output = self.target.run('/tmp/hello')
|
||||
msg = 'running compiled file failed, output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
@@ -0,0 +1,39 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
from tempfile import mkstemp
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class ScpTest(OERuntimeTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.tmp_fd, cls.tmp_path = mkstemp()
|
||||
with os.fdopen(cls.tmp_fd, 'w') as f:
|
||||
f.seek(2 ** 22 -1)
|
||||
f.write(os.linesep)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
os.remove(cls.tmp_path)
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['openssh-scp'])
|
||||
def test_scp_file(self):
|
||||
dst = '/tmp/test_scp_file'
|
||||
|
||||
(status, output) = self.target.copyTo(self.tmp_path, dst)
|
||||
msg = 'File could not be copied. Output: %s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
(status, output) = self.target.run('ls -la %s' % dst)
|
||||
self.assertEqual(status, 0, msg = 'SCP test failed')
|
||||
|
||||
self.target.run('rm %s' % dst)
|
||||
@@ -0,0 +1,37 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=284
|
||||
# testcase. Image under test must have meta-skeleton layer in bblayers and
|
||||
# IMAGE_INSTALL:append = " service" in local.conf
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfDataVar
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class SkeletonBasicTest(OERuntimeTestCase):
|
||||
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
@OEHasPackage(['service'])
|
||||
@skipIfDataVar('VIRTUAL-RUNTIME_init_manager', 'systemd',
|
||||
'Not appropiate for systemd image')
|
||||
def test_skeleton_availability(self):
|
||||
status, output = self.target.run('ls /etc/init.d/skeleton')
|
||||
msg = 'skeleton init script not found. Output:\n%s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
status, output = self.target.run('ls /usr/sbin/skeleton-test')
|
||||
msg = 'skeleton-test not found. Output:\n%s' % output
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
|
||||
@OETestDepends(['skeletoninit.SkeletonBasicTest.test_skeleton_availability'])
|
||||
def test_skeleton_script(self):
|
||||
output1 = self.target.run("/etc/init.d/skeleton start")[1]
|
||||
cmd = '%s | grep [s]keleton-test' % self.tc.target_cmds['ps']
|
||||
status, output2 = self.target.run(cmd)
|
||||
msg = ('Skeleton script could not be started:'
|
||||
'\n%s\n%s' % (output1, output2))
|
||||
self.assertEqual(status, 0, msg=msg)
|
||||
@@ -0,0 +1,21 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class SSHTest(OERuntimeTestCase):
|
||||
|
||||
@OETestDepends(['ping.PingTest.test_ping'])
|
||||
@OEHasPackage(['dropbear', 'openssh-sshd'])
|
||||
def test_ssh(self):
|
||||
(status, output) = self.target.run('uname -a')
|
||||
self.assertEqual(status, 0, msg='SSH Test failed: %s' % output)
|
||||
(status, output) = self.target.run('cat /etc/controllerimage')
|
||||
msg = "This isn't the right image - /etc/controllerimage " \
|
||||
"shouldn't be here %s" % output
|
||||
self.assertEqual(status, 1, msg=msg)
|
||||
@@ -0,0 +1,34 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.data import skipIfNotFeature
|
||||
from oeqa.runtime.decorator.package import OEHasPackage
|
||||
|
||||
class StapTest(OERuntimeTestCase):
|
||||
@skipIfNotFeature('tools-profile', 'Test requires tools-profile to be in IMAGE_FEATURES')
|
||||
@OEHasPackage(['systemtap'])
|
||||
@OEHasPackage(['gcc-symlinks'])
|
||||
@OEHasPackage(['kernel-devsrc'])
|
||||
def test_stap(self):
|
||||
try:
|
||||
cmd = 'make -j -C /usr/src/kernel scripts prepare'
|
||||
status, output = self.target.run(cmd, 900)
|
||||
self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
|
||||
|
||||
cmd = 'stap -v -p4 -m stap-hello --disable-cache -DSTP_NO_VERREL_CHECK -e \'probe oneshot { print("Hello, "); println("SystemTap!") }\''
|
||||
status, output = self.target.run(cmd, 900)
|
||||
self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
|
||||
|
||||
cmd = 'staprun -v -R -b1 stap-hello.ko'
|
||||
self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
|
||||
self.assertIn('Hello, SystemTap!', output, msg='\n'.join([cmd, output]))
|
||||
except:
|
||||
status, dmesg = self.target.run('dmesg')
|
||||
if status == 0:
|
||||
print(dmesg)
|
||||
@@ -0,0 +1,151 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
import re
|
||||
import time
|
||||
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfQemu
|
||||
|
||||
class StorageBase(OERuntimeTestCase):
|
||||
def storage_mount(cls, tmo=1):
|
||||
|
||||
(status, output) = cls.target.run('mkdir -p %s' % cls.mount_point)
|
||||
(status, output) = cls.target.run('mount %s %s' % (cls.device, cls.mount_point))
|
||||
msg = ('Mount failed: %s.' % status)
|
||||
cls.assertFalse(output, msg = msg)
|
||||
time.sleep(tmo)
|
||||
(status, output) = cls.target.run('cat /proc/mounts')
|
||||
match = re.search('%s' % cls.device, output)
|
||||
if match:
|
||||
msg = ('Device %s not mounted.' % cls.device)
|
||||
cls.assertTrue(match, msg = msg)
|
||||
|
||||
(status, output) = cls.target.run('mkdir -p %s' % cls.test_dir)
|
||||
|
||||
(status, output) = cls.target.run('rm -f %s/*' % cls.test_dir)
|
||||
msg = ('Failed to cleanup files @ %s/*' % cls.test_dir)
|
||||
cls.assertFalse(output, msg = msg)
|
||||
|
||||
|
||||
def storage_basic(cls):
|
||||
# create file on device
|
||||
(status, output) = cls.target.run('touch %s/%s' % (cls.test_dir, cls.test_file))
|
||||
msg = ('File %s not created on %s' % (cls.test_file, cls.device))
|
||||
cls.assertFalse(status, msg = msg)
|
||||
# move file
|
||||
(status, output) = cls.target.run('mv %s/%s %s/%s1' %
|
||||
(cls.test_dir, cls.test_file, cls.test_dir, cls.test_file))
|
||||
msg = ('File %s not moved to %s' % (cls.test_file, cls.device))
|
||||
cls.assertFalse(status, msg = msg)
|
||||
# remove file
|
||||
(status, output) = cls.target.run('rm %s/%s1' % (cls.test_dir, cls.test_file))
|
||||
msg = ('File %s not removed on %s' % (cls.test_file, cls.device))
|
||||
cls.assertFalse(status, msg = msg)
|
||||
|
||||
def storage_read(cls):
|
||||
# check if message is in file
|
||||
(status, output) = cls.target.run('cat %s/%s' %
|
||||
(cls.test_dir, cls.test_file))
|
||||
|
||||
match = re.search('%s' % cls.test_msg, output)
|
||||
msg = ('Test message %s not in file %s.' % (cls.test_msg, cls.test_file))
|
||||
cls.assertEqual(status, 0, msg = msg)
|
||||
|
||||
def storage_write(cls):
|
||||
# create test message in file on device
|
||||
(status, output) = cls.target.run('echo "%s" > %s/%s' %
|
||||
(cls.test_msg, cls.test_dir, cls.test_file))
|
||||
msg = ('File %s not create test message on %s' % (cls.test_file, cls.device))
|
||||
cls.assertEqual(status, 0, msg = msg)
|
||||
|
||||
def storage_umount(cls, tmo=1):
|
||||
time.sleep(tmo)
|
||||
(status, output) = cls.target.run('umount %s' % cls.mount_point)
|
||||
|
||||
if status == 32:
|
||||
# already unmounted, should it fail?
|
||||
return
|
||||
else:
|
||||
msg = ('Device not unmount %s' % cls.mount_point)
|
||||
cls.assertEqual(status, 0, msg = msg)
|
||||
|
||||
(status, output) = cls.target.run('cat /proc/mounts')
|
||||
match = re.search('%s' % cls.device, output)
|
||||
if match:
|
||||
msg = ('Device %s still mounted.' % cls.device)
|
||||
cls.assertTrue(match, msg = msg)
|
||||
|
||||
|
||||
class UsbTest(StorageBase):
|
||||
'''
|
||||
This is to mimic the usb test previously done in manual bsp-hw.json
|
||||
'''
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
self.test_msg = "Hello World - USB"
|
||||
self.mount_point = "/media/usb"
|
||||
self.device = "/dev/sda1"
|
||||
self.test_file = "usb.tst"
|
||||
self.test_dir = os.path.join(self.mount_point, "oeqa")
|
||||
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_usb_mount(self):
|
||||
self.storage_umount(2)
|
||||
self.storage_mount(5)
|
||||
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['storage.UsbTest.test_usb_mount'])
|
||||
def test_usb_basic_operations(self):
|
||||
self.storage_basic()
|
||||
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['storage.UsbTest.test_usb_basic_operations'])
|
||||
def test_usb_basic_rw(self):
|
||||
self.storage_write()
|
||||
self.storage_read()
|
||||
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['storage.UsbTest.test_usb_mount'])
|
||||
def test_usb_umount(self):
|
||||
self.storage_umount(2)
|
||||
|
||||
|
||||
class MMCTest(StorageBase):
|
||||
'''
|
||||
This is to mimic the usb test previously done in manual bsp-hw.json
|
||||
'''
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
self.test_msg = "Hello World - MMC"
|
||||
self.mount_point = "/media/mmc"
|
||||
self.device = "/dev/mmcblk1p1"
|
||||
self.test_file = "mmc.tst"
|
||||
self.test_dir = os.path.join(self.mount_point, "oeqa")
|
||||
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_mmc_mount(self):
|
||||
self.storage_umount(2)
|
||||
self.storage_mount()
|
||||
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['storage.MMCTest.test_mmc_mount'])
|
||||
def test_mmc_basic_operations(self):
|
||||
self.storage_basic()
|
||||
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['storage.MMCTest.test_mmc_basic_operations'])
|
||||
def test_mmc_basic_rw(self):
|
||||
self.storage_write()
|
||||
self.storage_read()
|
||||
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['storage.MMCTest.test_mmc_mount'])
|
||||
def test_mmc_umount(self):
|
||||
self.storage_umount(2)
|
||||
@@ -0,0 +1,38 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
from oeqa.runtime.case import OERuntimeTestCase
|
||||
from oeqa.core.decorator.depends import OETestDepends
|
||||
from oeqa.core.decorator.data import skipIfQemu
|
||||
import threading
|
||||
import time
|
||||
|
||||
class Suspend_Test(OERuntimeTestCase):
|
||||
|
||||
def test_date(self):
|
||||
(status, output) = self.target.run('date')
|
||||
self.assertEqual(status, 0, msg = 'Failed to run date command, output : %s' % output)
|
||||
|
||||
def test_ping(self):
|
||||
t_thread = threading.Thread(target=self.target.run, args=("ping 8.8.8.8",))
|
||||
t_thread.start()
|
||||
time.sleep(2)
|
||||
|
||||
status, output = self.target.run('pidof ping')
|
||||
self.target.run('kill -9 %s' % output)
|
||||
self.assertEqual(status, 0, msg = 'Not able to find process that runs ping, output : %s' % output)
|
||||
|
||||
def set_suspend(self):
|
||||
(status, output) = self.target.run('sudo rtcwake -m mem -s 10')
|
||||
self.assertEqual(status, 0, msg = 'Failed to suspends your system to RAM, output : %s' % output)
|
||||
|
||||
@skipIfQemu()
|
||||
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
||||
def test_suspend(self):
|
||||
self.test_date()
|
||||
self.test_ping()
|
||||
self.set_suspend()
|
||||
self.test_date()
|
||||
self.test_ping()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user