mirror of
https://github.com/GNOME/libxml2.git
synced 2025-05-07 20:39:30 +08:00
tests: Remove XSTC Python tests
I think this has been ported to runsuite.c. Convert part of Makefile.am into a script to download the test suite.
This commit is contained in:
parent
e0c7a92977
commit
e9366ffbc4
1
.gitignore
vendored
1
.gitignore
vendored
@ -26,6 +26,7 @@
|
||||
/runxmlconf.log
|
||||
/test.out
|
||||
/xmlconf
|
||||
/xstc/Tests
|
||||
|
||||
# Generated by build system
|
||||
/config.h
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
ACLOCAL_AMFLAGS = -I m4
|
||||
|
||||
SUBDIRS = include . example xstc
|
||||
SUBDIRS = include . example
|
||||
if WITH_DOXYGEN
|
||||
SUBDIRS += doc
|
||||
endif
|
||||
@ -13,7 +13,7 @@ if WITH_GLOB
|
||||
SUBDIRS += fuzz
|
||||
endif
|
||||
|
||||
DIST_SUBDIRS = include . doc example fuzz python xstc
|
||||
DIST_SUBDIRS = include . doc example fuzz python
|
||||
|
||||
AM_CPPFLAGS = -I$(top_builddir)/include -I$(srcdir)/include -DSYSCONFDIR='"$(sysconfdir)"'
|
||||
|
||||
|
@ -53,8 +53,6 @@ AC_PROG_CC
|
||||
AC_PROG_INSTALL
|
||||
AC_PROG_LN_S
|
||||
AC_PROG_MKDIR_P
|
||||
AC_PATH_PROG(TAR, tar, /bin/tar)
|
||||
AC_PATH_PROG(WGET, wget, /usr/bin/wget)
|
||||
PKG_PROG_PKG_CONFIG
|
||||
|
||||
LT_INIT([disable-static])
|
||||
@ -1075,7 +1073,7 @@ AC_DEFINE_UNQUOTED([XML_SYSCONFDIR], ["$XML_SYSCONFDIR"],
|
||||
[System configuration directory (/etc)])
|
||||
|
||||
# keep on one line for cygwin c.f. #130896
|
||||
AC_CONFIG_FILES([Makefile include/Makefile include/libxml/Makefile include/private/Makefile doc/Makefile example/Makefile fuzz/Makefile python/Makefile python/tests/Makefile xstc/Makefile include/libxml/xmlversion.h libxml-2.0.pc libxml2-config.cmake])
|
||||
AC_CONFIG_FILES([Makefile include/Makefile include/libxml/Makefile include/private/Makefile doc/Makefile example/Makefile fuzz/Makefile python/Makefile python/tests/Makefile include/libxml/xmlversion.h libxml-2.0.pc libxml2-config.cmake])
|
||||
AC_CONFIG_FILES([python/setup.py], [chmod +x python/setup.py])
|
||||
AC_CONFIG_FILES([xml2-config], [chmod +x xml2-config])
|
||||
AC_OUTPUT
|
||||
|
3
xstc/.gitignore
vendored
3
xstc/.gitignore
vendored
@ -1,3 +0,0 @@
|
||||
/*-test.py
|
||||
/Tests
|
||||
/xsts-*.tar.gz
|
132
xstc/Makefile.am
132
xstc/Makefile.am
@ -1,132 +0,0 @@
|
||||
#
|
||||
# Definition for the tests from W3C
|
||||
#
|
||||
PYSCRIPTS=nist-test.py ms-test.py sun-test.py
|
||||
TESTDIR=Tests
|
||||
TESTDIRS=$(TESTDIR)/msxsdtest $(TESTDIR)/suntest $(TESTDIR)/Datatypes
|
||||
TARBALL=xsts-2002-01-16.tar.gz
|
||||
TARBALL_2=xsts-2004-01-14.tar.gz
|
||||
TSNAME=xmlschema2002-01-16
|
||||
TSNAME_2=xmlschema2004-01-14
|
||||
TARBALLURL=http://www.w3.org/XML/2004/xml-schema-test-suite/$(TSNAME)/$(TARBALL)
|
||||
TARBALLURL_2=http://www.w3.org/XML/2004/xml-schema-test-suite/$(TSNAME_2)/$(TARBALL_2)
|
||||
MSTESTDEF=MSXMLSchema1-0-20020116.testSet
|
||||
SUNTESTDEF=SunXMLSchema1-0-20020116.testSet
|
||||
NISTTESTDEF=NISTXMLSchema1-0-20020116.testSet
|
||||
NISTTESTDEF_2=NISTXMLSchemaDatatypes.testSet
|
||||
|
||||
#
|
||||
# The local data and scripts
|
||||
#
|
||||
EXTRA_DIST=xstc.py xstc-to-python.xsl
|
||||
#
|
||||
# Nothing is done by make, only make tests and
|
||||
# only if Python and Schemas are enabled.
|
||||
#
|
||||
all:
|
||||
|
||||
#
|
||||
# Rule to load the test description and extract the information
|
||||
#
|
||||
$(TESTDIRS) Tests/Metadata/$(NISTTESTDEF_2) Tests/Metadata/$(MSTTESTDEF) Tests/Metadata/$(SUNTESTDEF):
|
||||
-@(if [ ! -d Tests ] ; then \
|
||||
mkdir Tests ; \
|
||||
fi)
|
||||
-@(if [ ! -f $(TARBALL_2) ] ; then \
|
||||
if [ -f $(srcdir)/$(TARBALL_2) ] ; then \
|
||||
$(LN_S) $(srcdir)/$(TARBALL_2) $(TARBALL_2) ; else \
|
||||
echo "Missing the test suite description (2004-01-14), trying to fetch it" ;\
|
||||
if [ -x "$(WGET)" ] ; then \
|
||||
$(WGET) $(TARBALLURL_2) ; \
|
||||
else echo "Dont' know how to fetch $(TARBALLURL_2)" ; fi ; fi ; fi)
|
||||
-@(if [ -f $(TARBALL_2) ] ; then \
|
||||
echo -n "extracting test data (NIST)..." ; \
|
||||
$(TAR) -xzf $(TARBALL_2) --wildcards '*/Datatypes' '*/Metadata/$(NISTTESTDEF_2)' ; \
|
||||
echo "done" ; \
|
||||
fi)
|
||||
-@(if [ ! -f $(TARBALL) ] ; then \
|
||||
if [ -f $(srcdir)/$(TARBALL) ] ; then \
|
||||
$(LN_S) $(srcdir)/$(TARBALL) $(TARBALL) ; else \
|
||||
echo "Missing the test suite description (2002-01-16), trying to fetch it" ;\
|
||||
if [ -x "$(WGET)" ] ; then \
|
||||
$(WGET) $(TARBALLURL) ; \
|
||||
else echo "Dont' know how to fetch $(TARBALLURL)" ; fi ; fi ; fi)
|
||||
-@(if [ -f $(TARBALL) ] ; then \
|
||||
echo -n "extracting test data (Sun, Microsoft)..." ; \
|
||||
$(TAR) -C Tests -xzf $(TARBALL) --wildcards '*/suntest' '*/msxsdtest' '*/$(MSTESTDEF)' '*/$(SUNTESTDEF)' ; \
|
||||
if [ -d Tests/suntest ] ; then rm -r Tests/suntest ; fi ; \
|
||||
if [ -d Tests/msxsdtest ] ; then rm -r Tests/msxsdtest ; fi ; \
|
||||
mv Tests/xmlschema2002-01-16/* Tests ; \
|
||||
mv Tests/*.testSet Tests/Metadata ; \
|
||||
rm -r Tests/xmlschema2002-01-16 ; \
|
||||
echo "done" ; \
|
||||
fi)
|
||||
|
||||
#
|
||||
# The python tests are generated via XSLT
|
||||
#
|
||||
nist-test.py: Tests/Metadata/$(NISTTESTDEF_2) xstc-to-python.xsl
|
||||
-@(if [ -x $(XSLTPROC) ] ; then \
|
||||
echo "Rebuilding script (NIST)" $@ ; \
|
||||
$(XSLTPROC) --nonet --stringparam vendor NIST-2 \
|
||||
$(srcdir)/xstc-to-python.xsl \
|
||||
$(srcdir)/Tests/Metadata/$(NISTTESTDEF_2) > $@ ; \
|
||||
chmod +x $@ ; fi )
|
||||
|
||||
ms-test.py: Tests/Metadata/$(MSTTESTDEF) xstc-to-python.xsl
|
||||
-@(if [ -x $(XSLTPROC) ] ; then \
|
||||
echo "Rebuilding script (Microsoft)" $@ ; \
|
||||
$(XSLTPROC) --nonet --stringparam vendor MS \
|
||||
$(srcdir)/xstc-to-python.xsl \
|
||||
$(srcdir)/Tests/Metadata/$(MSTESTDEF) > $@ ; \
|
||||
chmod +x $@ ; fi )
|
||||
|
||||
sun-test.py: Tests/Metadata/$(SUNTESTDEF) xstc-to-python.xsl
|
||||
-@(if [ -x $(XSLTPROC) ] ; then \
|
||||
echo "Rebuilding script (Sun)" $@ ; \
|
||||
$(XSLTPROC) --nonet --stringparam vendor SUN \
|
||||
$(srcdir)/xstc-to-python.xsl \
|
||||
$(srcdir)/Tests/Metadata/$(SUNTESTDEF) > $@ ; \
|
||||
chmod +x $@ ; fi )
|
||||
|
||||
#
|
||||
# The actual test run if present. PYTHONPATH is updated to make sure
|
||||
# we run the version from the loacl build and not preinstalled bindings
|
||||
#
|
||||
pytests: $(PYSCRIPTS) $(TESTDIRS)
|
||||
-@(if [ -x nist-test.py -a -d $(TESTDIR)/Datatypes ] ; then \
|
||||
echo "## Running XML Schema tests (NIST)"; \
|
||||
PYTHONPATH="../python:../python/.libs:..:../.libs:$$PYTHONPATH" ;\
|
||||
export PYTHONPATH; \
|
||||
LD_LIBRARY_PATH="$(top_builddir)/.libs:$$LD_LIBRARY_PATH" ; \
|
||||
export LD_LIBRARY_PATH; \
|
||||
$(CHECKER) $(PYTHON) nist-test.py -s -b $(srcdir) ; fi)
|
||||
-@(if [ -x sun-test.py -a -d $(TESTDIR)/suntest ] ; then \
|
||||
echo "## Running Schema tests (Sun)"; \
|
||||
PYTHONPATH="../python:../python/.libs:..:../.libs:$$PYTHONPATH" ;\
|
||||
export PYTHONPATH; \
|
||||
LD_LIBRARY_PATH="$(top_builddir)/.libs:$$LD_LIBRARY_PATH" ; \
|
||||
export LD_LIBRARY_PATH; \
|
||||
$(CHECKER) $(PYTHON) sun-test.py -s -b $(srcdir) ; fi)
|
||||
-@(if [ -x ms-test.py -a -d $(TESTDIR)/msxsdtest ] ; then \
|
||||
echo "## Running Schema tests (Microsoft)"; \
|
||||
PYTHONPATH="../python:../python/.libs:..:../.libs:$$PYTHONPATH" ;\
|
||||
export PYTHONPATH; \
|
||||
LD_LIBRARY_PATH="$(top_builddir)/.libs:$$LD_LIBRARY_PATH" ; \
|
||||
export LD_LIBRARY_PATH; \
|
||||
$(CHECKER) $(PYTHON) ms-test.py -s -b $(srcdir) ; fi)
|
||||
|
||||
tests:
|
||||
-@(if [ -x $(PYTHON) ] ; then \
|
||||
$(MAKE) pytests ; fi);
|
||||
|
||||
#
|
||||
# Heavy, works well only on RHEL3
|
||||
#
|
||||
valgrind:
|
||||
-@(if [ -x $(PYTHON) ] ; then \
|
||||
echo '## Running the regression tests under Valgrind' ; \
|
||||
$(MAKE) CHECKER='valgrind -q' pytests ; fi);
|
||||
|
||||
CLEANFILES=$(PYSCRIPTS) test.log
|
||||
|
32
xstc/fetch-xstc.sh
Executable file
32
xstc/fetch-xstc.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
# Additional tests for runsuite
|
||||
|
||||
URL=http://www.w3.org/XML/2004/xml-schema-test-suite
|
||||
|
||||
mkdir -p Tests
|
||||
|
||||
TARBALL_2=xsts-2004-01-14.tar.gz
|
||||
NISTTESTDEF_2=NISTXMLSchemaDatatypes.testSet
|
||||
curl -LJO $URL/xmlschema2004-01-14/$TARBALL_2
|
||||
tar -xzf $TARBALL_2 Tests/Datatypes Tests/Metadata/$NISTTESTDEF_2
|
||||
rm $TARBALL_2
|
||||
|
||||
TARBALL=xsts-2002-01-16.tar.gz
|
||||
MSTESTDEF=MSXMLSchema1-0-20020116.testSet
|
||||
SUNTESTDEF=SunXMLSchema1-0-20020116.testSet
|
||||
NISTTESTDEF=NISTXMLSchema1-0-20020116.testSet
|
||||
curl -LJO $URL/xmlschema2002-01-16/$TARBALL
|
||||
tar -C Tests -xzf $TARBALL \
|
||||
xmlschema2002-01-16/suntest \
|
||||
xmlschema2002-01-16/msxsdtest \
|
||||
xmlschema2002-01-16/$MSTESTDEF \
|
||||
xmlschema2002-01-16/$SUNTESTDEF
|
||||
if [ -d Tests/suntest ] ; then rm -r Tests/suntest ; fi
|
||||
if [ -d Tests/msxsdtest ] ; then rm -r Tests/msxsdtest ; fi
|
||||
mv Tests/xmlschema2002-01-16/* Tests
|
||||
mv Tests/*.testSet Tests/Metadata
|
||||
rm -r Tests/xmlschema2002-01-16
|
||||
rm $TARBALL
|
@ -3,6 +3,7 @@
|
||||
import sys, os
|
||||
import libxml2
|
||||
|
||||
# TODO: Check what this does and move away from libxml2 Python bindings
|
||||
|
||||
libxml2.debugMemory(1)
|
||||
baseDir = os.path.join('msxsdtest', 'Particles')
|
||||
|
@ -1,114 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<xsl:stylesheet
|
||||
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:ts="TestSuite" version="1.0"
|
||||
xmlns:xl="http://www.w3.org/1999/xlink">
|
||||
<xsl:param name="vendor" select="'NIST'"/>
|
||||
<xsl:output method="text"/>
|
||||
|
||||
<xsl:template match="/">
|
||||
<xsl:text>#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
#
|
||||
# This file is generated from the W3C test suite description file.
|
||||
#
|
||||
|
||||
import xstc
|
||||
from xstc import XSTCTestRunner, XSTCTestGroup, XSTCSchemaTest, XSTCInstanceTest
|
||||
|
||||
xstc.vendor = "</xsl:text><xsl:value-of select="$vendor"/><xsl:text>"
|
||||
|
||||
r = XSTCTestRunner()
|
||||
|
||||
# Group definitions.
|
||||
|
||||
</xsl:text>
|
||||
|
||||
<xsl:apply-templates select="ts:testSet/ts:testGroup" mode="group-def"/>
|
||||
<xsl:text>
|
||||
|
||||
# Test definitions.
|
||||
|
||||
</xsl:text>
|
||||
<xsl:apply-templates select="ts:testSet/ts:testGroup" mode="test-def"/>
|
||||
<xsl:text>
|
||||
|
||||
r.run()
|
||||
|
||||
</xsl:text>
|
||||
|
||||
</xsl:template>
|
||||
|
||||
<!-- groupName, descr -->
|
||||
<xsl:template match="ts:testGroup" mode="group-def">
|
||||
<xsl:text>r.addGroup(XSTCTestGroup("</xsl:text>
|
||||
<!-- group -->
|
||||
<xsl:value-of select="@name"/><xsl:text>", "</xsl:text>
|
||||
<!-- main schema -->
|
||||
<xsl:value-of select="ts:schemaTest[1]/ts:schemaDocument/@xl:href"/><xsl:text>", """</xsl:text>
|
||||
<!-- group-description -->
|
||||
<xsl:call-template name="str">
|
||||
<xsl:with-param name="str" select="ts:annotation/ts:documentation/text()"/>
|
||||
</xsl:call-template>
|
||||
<xsl:text>"""))
|
||||
</xsl:text>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template name="str">
|
||||
<xsl:param name="str"/>
|
||||
<xsl:choose>
|
||||
<xsl:when test="contains($str, '"')">
|
||||
<xsl:call-template name="str">
|
||||
<xsl:with-param name="str" select="substring-before($str, '"')"/>
|
||||
</xsl:call-template>
|
||||
<xsl:text>'</xsl:text>
|
||||
<xsl:call-template name="str">
|
||||
<xsl:with-param name="str" select="substring-after($str, '"')"/>
|
||||
</xsl:call-template>
|
||||
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<xsl:value-of select="$str"/>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match="ts:testGroup" mode="test-def">
|
||||
<xsl:param name="group" select="@name"/>
|
||||
<xsl:for-each select="ts:schemaTest">
|
||||
<!-- groupName, isSchema, Name, Accepted, File, Val, Descr -->
|
||||
<xsl:text>r.addTest(XSTCSchemaTest("</xsl:text>
|
||||
<!-- group -->
|
||||
<xsl:value-of select="$group"/><xsl:text>", "</xsl:text>
|
||||
<!-- test-name -->
|
||||
<xsl:value-of select="@name"/><xsl:text>", </xsl:text>
|
||||
<!-- accepted -->
|
||||
<xsl:value-of select="number(ts:current/@status = 'accepted')"/><xsl:text>, "</xsl:text>
|
||||
<!-- filename -->
|
||||
<xsl:value-of select="ts:schemaDocument/@xl:href"/><xsl:text>", </xsl:text>
|
||||
<!-- validity -->
|
||||
<xsl:value-of select="number(ts:expected/@validity = 'valid')"/><xsl:text>, "</xsl:text>
|
||||
<!-- test-description -->
|
||||
<xsl:value-of select="ts:annotation/ts:documentation/text()"/><xsl:text>"))
|
||||
</xsl:text>
|
||||
</xsl:for-each>
|
||||
<xsl:for-each select="ts:instanceTest">
|
||||
<!-- groupName, isSchema, Name, Accepted, File, Val, Descr -->
|
||||
<xsl:text>r.addTest(XSTCInstanceTest("</xsl:text>
|
||||
<!-- group -->
|
||||
<xsl:value-of select="$group"/><xsl:text>", "</xsl:text>
|
||||
<!-- test-name -->
|
||||
<xsl:value-of select="@name"/><xsl:text>", </xsl:text>
|
||||
<!-- accepted -->
|
||||
<xsl:value-of select="number(ts:current/@status = 'accepted')"/><xsl:text>, "</xsl:text>
|
||||
<!-- filename -->
|
||||
<xsl:value-of select="ts:instanceDocument/@xl:href"/><xsl:text>", </xsl:text>
|
||||
<!-- validity -->
|
||||
<xsl:value-of select="number(ts:expected/@validity = 'valid')"/><xsl:text>, "</xsl:text>
|
||||
<!-- test-description -->
|
||||
<xsl:value-of select="ts:annotation/ts:documentation/text()"/><xsl:text>"))
|
||||
</xsl:text>
|
||||
</xsl:for-each>
|
||||
</xsl:template>
|
||||
|
||||
</xsl:stylesheet>
|
693
xstc/xstc.py
693
xstc/xstc.py
@ -1,693 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
#
|
||||
# This is the MS subset of the W3C test suite for XML Schemas.
|
||||
# This file is generated from the MS W3c test suite description file.
|
||||
#
|
||||
|
||||
import sys, os
|
||||
import optparse
|
||||
import libxml2
|
||||
|
||||
opa = optparse.OptionParser()
|
||||
|
||||
opa.add_option("-b", "--base", action="store", type="string", dest="baseDir",
|
||||
default="",
|
||||
help="""The base directory; i.e. the parent folder of the
|
||||
"nisttest", "suntest" and "msxsdtest" directories.""")
|
||||
|
||||
opa.add_option("-o", "--out", action="store", type="string", dest="logFile",
|
||||
default="test.log",
|
||||
help="The filepath of the log file to be created")
|
||||
|
||||
opa.add_option("--log", action="store_true", dest="enableLog",
|
||||
default=False,
|
||||
help="Create the log file")
|
||||
|
||||
opa.add_option("--no-test-out", action="store_true", dest="disableTestStdOut",
|
||||
default=False,
|
||||
help="Don't output test results")
|
||||
|
||||
opa.add_option("-s", "--silent", action="store_true", dest="silent", default=False,
|
||||
help="Disables display of all tests")
|
||||
|
||||
opa.add_option("-v", "--verbose", action="store_true", dest="verbose",
|
||||
default=False,
|
||||
help="Displays all tests (only if --silent is not set)")
|
||||
|
||||
opa.add_option("-x", "--max", type="int", dest="maxTestCount",
|
||||
default="-1",
|
||||
help="The maximum number of tests to be run")
|
||||
|
||||
opa.add_option("-t", "--test", type="string", dest="singleTest",
|
||||
default=None,
|
||||
help="Runs the specified test only")
|
||||
|
||||
opa.add_option("--tsw", "--test-starts-with", type="string", dest="testStartsWith",
|
||||
default=None,
|
||||
help="Runs the specified test(s), starting with the given string")
|
||||
|
||||
opa.add_option("--rieo", "--report-internal-errors-only", action="store_true",
|
||||
dest="reportInternalErrOnly", default=False,
|
||||
help="Display erroneous tests of type 'internal' only")
|
||||
|
||||
opa.add_option("--rueo", "--report-unimplemented-errors-only", action="store_true",
|
||||
dest="reportUnimplErrOnly", default=False,
|
||||
help="Display erroneous tests of type 'unimplemented' only")
|
||||
|
||||
opa.add_option("--rmleo", "--report-mem-leak-errors-only", action="store_true",
|
||||
dest="reportMemLeakErrOnly", default=False,
|
||||
help="Display erroneous tests of type 'memory leak' only")
|
||||
|
||||
opa.add_option("-c", "--combines", type="string", dest="combines",
|
||||
default=None,
|
||||
help="Combines to be run (all if omitted)")
|
||||
|
||||
opa.add_option("--csw", "--csw", type="string", dest="combineStartsWith",
|
||||
default=None,
|
||||
help="Combines to be run (all if omitted)")
|
||||
|
||||
opa.add_option("--rc", "--report-combines", action="store_true",
|
||||
dest="reportCombines", default=False,
|
||||
help="Display combine reports")
|
||||
|
||||
opa.add_option("--rec", "--report-err-combines", action="store_true",
|
||||
dest="reportErrCombines", default=False,
|
||||
help="Display erroneous combine reports only")
|
||||
|
||||
opa.add_option("--debug", action="store_true",
|
||||
dest="debugEnabled", default=False,
|
||||
help="Displays debug messages")
|
||||
|
||||
opa.add_option("--info", action="store_true",
|
||||
dest="info", default=False,
|
||||
help="Displays info on the suite only. Does not run any test.")
|
||||
opa.add_option("--sax", action="store_true",
|
||||
dest="validationSAX", default=False,
|
||||
help="Use SAX2-driven validation.")
|
||||
opa.add_option("--tn", action="store_true",
|
||||
dest="displayTestName", default=False,
|
||||
help="Display the test name in every case.")
|
||||
|
||||
(options, args) = opa.parse_args()
|
||||
|
||||
if options.combines is not None:
|
||||
options.combines = options.combines.split()
|
||||
|
||||
################################################
|
||||
# The vars below are not intended to be changed.
|
||||
#
|
||||
|
||||
msgSchemaNotValidButShould = "The schema should be valid."
|
||||
msgSchemaValidButShouldNot = "The schema should be invalid."
|
||||
msgInstanceNotValidButShould = "The instance should be valid."
|
||||
msgInstanceValidButShouldNot = "The instance should be invalid."
|
||||
vendorNIST = "NIST"
|
||||
vendorNIST_2 = "NIST-2"
|
||||
vendorSUN = "SUN"
|
||||
vendorMS = "MS"
|
||||
|
||||
###################
|
||||
# Helper functions.
|
||||
#
|
||||
vendor = None
|
||||
|
||||
def handleError(test, msg):
|
||||
global options
|
||||
if not options.silent:
|
||||
test.addLibLog("'%s' LIB: %s" % (test.name, msg))
|
||||
if msg.find("Unimplemented") > -1:
|
||||
test.failUnimplemented()
|
||||
elif msg.find("Internal") > -1:
|
||||
test.failInternal()
|
||||
|
||||
|
||||
def fixFileNames(fileName):
|
||||
if (fileName is None) or (fileName == ""):
|
||||
return ""
|
||||
dirs = fileName.split("/")
|
||||
if dirs[1] != "Tests":
|
||||
fileName = os.path.join(".", "Tests")
|
||||
for dir in dirs[1:]:
|
||||
fileName = os.path.join(fileName, dir)
|
||||
return fileName
|
||||
|
||||
class XSTCTestGroup:
|
||||
def __init__(self, name, schemaFileName, descr):
|
||||
global vendor, vendorNIST_2
|
||||
self.name = name
|
||||
self.descr = descr
|
||||
self.mainSchema = True
|
||||
self.schemaFileName = fixFileNames(schemaFileName)
|
||||
self.schemaParsed = False
|
||||
self.schemaTried = False
|
||||
|
||||
def setSchema(self, schemaFileName, parsed):
|
||||
if not self.mainSchema:
|
||||
return
|
||||
self.mainSchema = False
|
||||
self.schemaParsed = parsed
|
||||
self.schemaTried = True
|
||||
|
||||
class XSTCTestCase:
|
||||
|
||||
# <!-- groupName, Name, Accepted, File, Val, Descr
|
||||
def __init__(self, isSchema, groupName, name, accepted, file, val, descr):
|
||||
global options
|
||||
#
|
||||
# Constructor.
|
||||
#
|
||||
self.testRunner = None
|
||||
self.isSchema = isSchema
|
||||
self.groupName = groupName
|
||||
self.name = name
|
||||
self.accepted = accepted
|
||||
self.fileName = fixFileNames(file)
|
||||
self.val = val
|
||||
self.descr = descr
|
||||
self.failed = False
|
||||
self.combineName = None
|
||||
|
||||
self.log = []
|
||||
self.libLog = []
|
||||
self.initialMemUsed = 0
|
||||
self.memLeak = 0
|
||||
self.excepted = False
|
||||
self.bad = False
|
||||
self.unimplemented = False
|
||||
self.internalErr = False
|
||||
self.noSchemaErr = False
|
||||
self.failed = False
|
||||
#
|
||||
# Init the log.
|
||||
#
|
||||
if not options.silent:
|
||||
if self.descr is not None:
|
||||
self.log.append("'%s' descr: %s\n" % (self.name, self.descr))
|
||||
self.log.append("'%s' exp validity: %d\n" % (self.name, self.val))
|
||||
|
||||
def initTest(self, runner):
|
||||
global vendorNIST, vendorSUN, vendorMS, vendorNIST_2, options, vendor
|
||||
#
|
||||
# Get the test-group.
|
||||
#
|
||||
self.runner = runner
|
||||
self.group = runner.getGroup(self.groupName)
|
||||
if vendor == vendorMS or vendor == vendorSUN:
|
||||
#
|
||||
# Use the last given directory for the combine name.
|
||||
#
|
||||
dirs = self.fileName.split("/")
|
||||
self.combineName = dirs[len(dirs) -2]
|
||||
elif vendor == vendorNIST:
|
||||
#
|
||||
# NIST files are named in the following form:
|
||||
# "NISTSchema-short-pattern-1.xsd"
|
||||
#
|
||||
tokens = self.name.split("-")
|
||||
self.combineName = tokens[1]
|
||||
elif vendor == vendorNIST_2:
|
||||
#
|
||||
# Group-names have the form: "atomic-normalizedString-length-1"
|
||||
#
|
||||
tokens = self.groupName.split("-")
|
||||
self.combineName = "%s-%s" % (tokens[0], tokens[1])
|
||||
else:
|
||||
self.combineName = "unknown"
|
||||
raise Exception("Could not compute the combine name of a test.")
|
||||
if (not options.silent) and (self.group.descr is not None):
|
||||
self.log.append("'%s' group-descr: %s\n" % (self.name, self.group.descr))
|
||||
|
||||
|
||||
def addLibLog(self, msg):
|
||||
"""This one is intended to be used by the error handler
|
||||
function"""
|
||||
global options
|
||||
if not options.silent:
|
||||
self.libLog.append(msg)
|
||||
|
||||
def fail(self, msg):
|
||||
global options
|
||||
self.failed = True
|
||||
if not options.silent:
|
||||
self.log.append("'%s' ( FAILED: %s\n" % (self.name, msg))
|
||||
|
||||
def failNoSchema(self):
|
||||
global options
|
||||
self.failed = True
|
||||
self.noSchemaErr = True
|
||||
if not options.silent:
|
||||
self.log.append("'%s' X NO-SCHEMA\n" % (self.name))
|
||||
|
||||
def failInternal(self):
|
||||
global options
|
||||
self.failed = True
|
||||
self.internalErr = True
|
||||
if not options.silent:
|
||||
self.log.append("'%s' * INTERNAL\n" % self.name)
|
||||
|
||||
def failUnimplemented(self):
|
||||
global options
|
||||
self.failed = True
|
||||
self.unimplemented = True
|
||||
if not options.silent:
|
||||
self.log.append("'%s' ? UNIMPLEMENTED\n" % self.name)
|
||||
|
||||
def failCritical(self, msg):
|
||||
global options
|
||||
self.failed = True
|
||||
self.bad = True
|
||||
if not options.silent:
|
||||
self.log.append("'%s' ! BAD: %s\n" % (self.name, msg))
|
||||
|
||||
def failExcept(self, e):
|
||||
global options
|
||||
self.failed = True
|
||||
self.excepted = True
|
||||
if not options.silent:
|
||||
self.log.append("'%s' # EXCEPTION: %s\n" % (self.name, e.__str__()))
|
||||
|
||||
def setUp(self):
|
||||
#
|
||||
# Set up Libxml2.
|
||||
#
|
||||
self.initialMemUsed = libxml2.debugMemory(1)
|
||||
libxml2.initParser()
|
||||
libxml2.lineNumbersDefault(1)
|
||||
libxml2.registerErrorHandler(handleError, self)
|
||||
|
||||
def tearDown(self):
|
||||
libxml2.schemaCleanupTypes()
|
||||
libxml2.cleanupParser()
|
||||
self.memLeak = libxml2.debugMemory(1) - self.initialMemUsed
|
||||
|
||||
def isIOError(self, file, docType):
|
||||
err = None
|
||||
try:
|
||||
err = libxml2.lastError()
|
||||
except:
|
||||
# Suppress exceptions.
|
||||
pass
|
||||
if (err is None):
|
||||
return False
|
||||
if err.domain() == libxml2.XML_FROM_IO:
|
||||
self.failCritical("failed to access the %s resource '%s'\n" % (docType, file))
|
||||
|
||||
def debugMsg(self, msg):
|
||||
global options
|
||||
if options.debugEnabled:
|
||||
sys.stdout.write("'%s' DEBUG: %s\n" % (self.name, msg))
|
||||
|
||||
def finalize(self):
|
||||
global options
|
||||
"""Adds additional info to the log."""
|
||||
#
|
||||
# Add libxml2 messages.
|
||||
#
|
||||
if not options.silent:
|
||||
self.log.extend(self.libLog)
|
||||
#
|
||||
# Add memory leaks.
|
||||
#
|
||||
if self.memLeak != 0:
|
||||
self.log.append("%s + memory leak: %d bytes\n" % (self.name, self.memLeak))
|
||||
|
||||
def run(self):
|
||||
"""Runs a test."""
|
||||
global options
|
||||
|
||||
##filePath = os.path.join(options.baseDir, self.fileName)
|
||||
# filePath = "%s/%s/%s/%s" % (options.baseDir, self.test_Folder, self.schema_Folder, self.schema_File)
|
||||
if options.displayTestName:
|
||||
sys.stdout.write("'%s'\n" % self.name)
|
||||
try:
|
||||
self.validate()
|
||||
except (Exception, libxml2.parserError, libxml2.treeError) as e:
|
||||
self.failExcept(e)
|
||||
|
||||
def parseSchema(fileName):
|
||||
schema = None
|
||||
ctxt = libxml2.schemaNewParserCtxt(fileName)
|
||||
try:
|
||||
try:
|
||||
schema = ctxt.schemaParse()
|
||||
except:
|
||||
pass
|
||||
finally:
|
||||
del ctxt
|
||||
return schema
|
||||
|
||||
|
||||
class XSTCSchemaTest(XSTCTestCase):
|
||||
|
||||
def __init__(self, groupName, name, accepted, file, val, descr):
|
||||
XSTCTestCase.__init__(self, 1, groupName, name, accepted, file, val, descr)
|
||||
|
||||
def validate(self):
|
||||
global msgSchemaNotValidButShould, msgSchemaValidButShouldNot
|
||||
schema = None
|
||||
filePath = self.fileName
|
||||
# os.path.join(options.baseDir, self.fileName)
|
||||
valid = 0
|
||||
try:
|
||||
#
|
||||
# Parse the schema.
|
||||
#
|
||||
self.debugMsg("loading schema: %s" % filePath)
|
||||
schema = parseSchema(filePath)
|
||||
self.debugMsg("after loading schema")
|
||||
if schema is None:
|
||||
self.debugMsg("schema is None")
|
||||
self.debugMsg("checking for IO errors...")
|
||||
if self.isIOError(filePath, "schema"):
|
||||
return
|
||||
self.debugMsg("checking schema result")
|
||||
if (schema is None and self.val) or (schema is not None and self.val == 0):
|
||||
self.debugMsg("schema result is BAD")
|
||||
if (schema == None):
|
||||
self.fail(msgSchemaNotValidButShould)
|
||||
else:
|
||||
self.fail(msgSchemaValidButShouldNot)
|
||||
else:
|
||||
self.debugMsg("schema result is OK")
|
||||
finally:
|
||||
self.group.setSchema(self.fileName, schema is not None)
|
||||
del schema
|
||||
|
||||
class XSTCInstanceTest(XSTCTestCase):
|
||||
|
||||
def __init__(self, groupName, name, accepted, file, val, descr):
|
||||
XSTCTestCase.__init__(self, 0, groupName, name, accepted, file, val, descr)
|
||||
|
||||
def validate(self):
|
||||
instance = None
|
||||
schema = None
|
||||
filePath = self.fileName
|
||||
# os.path.join(options.baseDir, self.fileName)
|
||||
|
||||
if not self.group.schemaParsed and self.group.schemaTried:
|
||||
self.failNoSchema()
|
||||
return
|
||||
|
||||
self.debugMsg("loading instance: %s" % filePath)
|
||||
parserCtxt = libxml2.newParserCtxt()
|
||||
if (parserCtxt is None):
|
||||
# TODO: Is this one necessary, or will an exception
|
||||
# be already raised?
|
||||
raise Exception("Could not create the instance parser context.")
|
||||
if not options.validationSAX:
|
||||
try:
|
||||
try:
|
||||
instance = parserCtxt.ctxtReadFile(filePath, None, libxml2.XML_PARSE_NOWARNING)
|
||||
except:
|
||||
# Suppress exceptions.
|
||||
pass
|
||||
finally:
|
||||
del parserCtxt
|
||||
self.debugMsg("after loading instance")
|
||||
if instance is None:
|
||||
self.debugMsg("instance is None")
|
||||
self.failCritical("Failed to parse the instance for unknown reasons.")
|
||||
return
|
||||
try:
|
||||
#
|
||||
# Validate the instance.
|
||||
#
|
||||
self.debugMsg("loading schema: %s" % self.group.schemaFileName)
|
||||
schema = parseSchema(self.group.schemaFileName)
|
||||
try:
|
||||
validationCtxt = schema.schemaNewValidCtxt()
|
||||
#validationCtxt = libxml2.schemaNewValidCtxt(None)
|
||||
if (validationCtxt is None):
|
||||
self.failCritical("Could not create the validation context.")
|
||||
return
|
||||
try:
|
||||
self.debugMsg("validating instance")
|
||||
if options.validationSAX:
|
||||
instance_Err = validationCtxt.schemaValidateFile(filePath, 0)
|
||||
else:
|
||||
instance_Err = validationCtxt.schemaValidateDoc(instance)
|
||||
self.debugMsg("after instance validation")
|
||||
self.debugMsg("instance-err: %d" % instance_Err)
|
||||
if (instance_Err != 0 and self.val == 1) or (instance_Err == 0 and self.val == 0):
|
||||
self.debugMsg("instance result is BAD")
|
||||
if (instance_Err != 0):
|
||||
self.fail(msgInstanceNotValidButShould)
|
||||
else:
|
||||
self.fail(msgInstanceValidButShouldNot)
|
||||
|
||||
else:
|
||||
self.debugMsg("instance result is OK")
|
||||
finally:
|
||||
del validationCtxt
|
||||
finally:
|
||||
del schema
|
||||
finally:
|
||||
if instance is not None:
|
||||
instance.freeDoc()
|
||||
|
||||
|
||||
####################
|
||||
# Test runner class.
|
||||
#
|
||||
|
||||
class XSTCTestRunner:
|
||||
|
||||
CNT_TOTAL = 0
|
||||
CNT_RAN = 1
|
||||
CNT_SUCCEEDED = 2
|
||||
CNT_FAILED = 3
|
||||
CNT_UNIMPLEMENTED = 4
|
||||
CNT_INTERNAL = 5
|
||||
CNT_BAD = 6
|
||||
CNT_EXCEPTED = 7
|
||||
CNT_MEMLEAK = 8
|
||||
CNT_NOSCHEMA = 9
|
||||
CNT_NOTACCEPTED = 10
|
||||
CNT_SCHEMA_TEST = 11
|
||||
|
||||
def __init__(self):
|
||||
self.logFile = None
|
||||
self.counters = self.createCounters()
|
||||
self.testList = []
|
||||
self.combinesRan = {}
|
||||
self.groups = {}
|
||||
self.curGroup = None
|
||||
|
||||
def createCounters(self):
|
||||
counters = {self.CNT_TOTAL:0, self.CNT_RAN:0, self.CNT_SUCCEEDED:0,
|
||||
self.CNT_FAILED:0, self.CNT_UNIMPLEMENTED:0, self.CNT_INTERNAL:0, self.CNT_BAD:0,
|
||||
self.CNT_EXCEPTED:0, self.CNT_MEMLEAK:0, self.CNT_NOSCHEMA:0, self.CNT_NOTACCEPTED:0,
|
||||
self.CNT_SCHEMA_TEST:0}
|
||||
|
||||
return counters
|
||||
|
||||
def addTest(self, test):
|
||||
self.testList.append(test)
|
||||
test.initTest(self)
|
||||
|
||||
def getGroup(self, groupName):
|
||||
return self.groups[groupName]
|
||||
|
||||
def addGroup(self, group):
|
||||
self.groups[group.name] = group
|
||||
|
||||
def updateCounters(self, test, counters):
|
||||
if test.memLeak != 0:
|
||||
counters[self.CNT_MEMLEAK] += 1
|
||||
if not test.failed:
|
||||
counters[self.CNT_SUCCEEDED] +=1
|
||||
if test.failed:
|
||||
counters[self.CNT_FAILED] += 1
|
||||
if test.bad:
|
||||
counters[self.CNT_BAD] += 1
|
||||
if test.unimplemented:
|
||||
counters[self.CNT_UNIMPLEMENTED] += 1
|
||||
if test.internalErr:
|
||||
counters[self.CNT_INTERNAL] += 1
|
||||
if test.noSchemaErr:
|
||||
counters[self.CNT_NOSCHEMA] += 1
|
||||
if test.excepted:
|
||||
counters[self.CNT_EXCEPTED] += 1
|
||||
if not test.accepted:
|
||||
counters[self.CNT_NOTACCEPTED] += 1
|
||||
if test.isSchema:
|
||||
counters[self.CNT_SCHEMA_TEST] += 1
|
||||
return counters
|
||||
|
||||
def displayResults(self, out, all, combName, counters):
|
||||
out.write("\n")
|
||||
if all:
|
||||
if options.combines is not None:
|
||||
out.write("combine(s): %s\n" % str(options.combines))
|
||||
elif combName is not None:
|
||||
out.write("combine : %s\n" % combName)
|
||||
out.write(" total : %d\n" % counters[self.CNT_TOTAL])
|
||||
if all or options.combines is not None:
|
||||
out.write(" ran : %d\n" % counters[self.CNT_RAN])
|
||||
out.write(" (schemata) : %d\n" % counters[self.CNT_SCHEMA_TEST])
|
||||
# out.write(" succeeded : %d\n" % counters[self.CNT_SUCCEEDED])
|
||||
out.write(" not accepted : %d\n" % counters[self.CNT_NOTACCEPTED])
|
||||
if counters[self.CNT_FAILED] > 0:
|
||||
out.write(" failed : %d\n" % counters[self.CNT_FAILED])
|
||||
out.write(" -> internal : %d\n" % counters[self.CNT_INTERNAL])
|
||||
out.write(" -> unimpl. : %d\n" % counters[self.CNT_UNIMPLEMENTED])
|
||||
out.write(" -> skip-invalid-schema : %d\n" % counters[self.CNT_NOSCHEMA])
|
||||
out.write(" -> bad : %d\n" % counters[self.CNT_BAD])
|
||||
out.write(" -> exceptions : %d\n" % counters[self.CNT_EXCEPTED])
|
||||
out.write(" memory leaks : %d\n" % counters[self.CNT_MEMLEAK])
|
||||
|
||||
def displayShortResults(self, out, all, combName, counters):
|
||||
out.write("Ran %d of %d tests (%d schemata):" % (counters[self.CNT_RAN],
|
||||
counters[self.CNT_TOTAL], counters[self.CNT_SCHEMA_TEST]))
|
||||
# out.write(" succeeded : %d\n" % counters[self.CNT_SUCCEEDED])
|
||||
if counters[self.CNT_NOTACCEPTED] > 0:
|
||||
out.write(" %d not accepted" % (counters[self.CNT_NOTACCEPTED]))
|
||||
if counters[self.CNT_FAILED] > 0 or counters[self.CNT_MEMLEAK] > 0:
|
||||
if counters[self.CNT_FAILED] > 0:
|
||||
out.write(" %d failed" % (counters[self.CNT_FAILED]))
|
||||
out.write(" (")
|
||||
if counters[self.CNT_INTERNAL] > 0:
|
||||
out.write(" %d internal" % (counters[self.CNT_INTERNAL]))
|
||||
if counters[self.CNT_UNIMPLEMENTED] > 0:
|
||||
out.write(" %d unimplemented" % (counters[self.CNT_UNIMPLEMENTED]))
|
||||
if counters[self.CNT_NOSCHEMA] > 0:
|
||||
out.write(" %d skip-invalid-schema" % (counters[self.CNT_NOSCHEMA]))
|
||||
if counters[self.CNT_BAD] > 0:
|
||||
out.write(" %d bad" % (counters[self.CNT_BAD]))
|
||||
if counters[self.CNT_EXCEPTED] > 0:
|
||||
out.write(" %d exception" % (counters[self.CNT_EXCEPTED]))
|
||||
out.write(" )")
|
||||
if counters[self.CNT_MEMLEAK] > 0:
|
||||
out.write(" %d leaks" % (counters[self.CNT_MEMLEAK]))
|
||||
out.write("\n")
|
||||
else:
|
||||
out.write(" all passed\n")
|
||||
|
||||
def reportCombine(self, combName):
|
||||
global options
|
||||
|
||||
counters = self.createCounters()
|
||||
#
|
||||
# Compute evaluation counters.
|
||||
#
|
||||
for test in self.combinesRan[combName]:
|
||||
counters[self.CNT_TOTAL] += 1
|
||||
counters[self.CNT_RAN] += 1
|
||||
counters = self.updateCounters(test, counters)
|
||||
if options.reportErrCombines and (counters[self.CNT_FAILED] == 0) and (counters[self.CNT_MEMLEAK] == 0):
|
||||
pass
|
||||
else:
|
||||
if options.enableLog:
|
||||
self.displayResults(self.logFile, False, combName, counters)
|
||||
self.displayResults(sys.stdout, False, combName, counters)
|
||||
|
||||
def displayTestLog(self, test):
|
||||
sys.stdout.writelines(test.log)
|
||||
sys.stdout.write("~~~~~~~~~~\n")
|
||||
|
||||
def reportTest(self, test):
|
||||
global options
|
||||
|
||||
error = test.failed or test.memLeak != 0
|
||||
#
|
||||
# Only erroneous tests will be written to the log,
|
||||
# except @verbose is switched on.
|
||||
#
|
||||
if options.enableLog and (options.verbose or error):
|
||||
self.logFile.writelines(test.log)
|
||||
self.logFile.write("~~~~~~~~~~\n")
|
||||
#
|
||||
# if not @silent, only erroneous tests will be
|
||||
# written to stdout, except @verbose is switched on.
|
||||
#
|
||||
if not options.silent:
|
||||
if options.reportInternalErrOnly and test.internalErr:
|
||||
self.displayTestLog(test)
|
||||
if options.reportMemLeakErrOnly and test.memLeak != 0:
|
||||
self.displayTestLog(test)
|
||||
if options.reportUnimplErrOnly and test.unimplemented:
|
||||
self.displayTestLog(test)
|
||||
if (options.verbose or error) and (not options.reportInternalErrOnly) and (not options.reportMemLeakErrOnly) and (not options.reportUnimplErrOnly):
|
||||
self.displayTestLog(test)
|
||||
|
||||
|
||||
def addToCombines(self, test):
|
||||
found = False
|
||||
if test.combineName in self.combinesRan:
|
||||
self.combinesRan[test.combineName].append(test)
|
||||
else:
|
||||
self.combinesRan[test.combineName] = [test]
|
||||
|
||||
def run(self):
|
||||
|
||||
global options
|
||||
|
||||
if options.info:
|
||||
for test in self.testList:
|
||||
self.addToCombines(test)
|
||||
sys.stdout.write("Combines: %d\n" % len(self.combinesRan))
|
||||
sys.stdout.write("%s\n" % self.combinesRan.keys())
|
||||
return
|
||||
|
||||
if options.enableLog:
|
||||
self.logFile = open(options.logFile, "w")
|
||||
try:
|
||||
for test in self.testList:
|
||||
self.counters[self.CNT_TOTAL] += 1
|
||||
#
|
||||
# Filter tests.
|
||||
#
|
||||
if options.singleTest is not None and options.singleTest != "":
|
||||
if (test.name != options.singleTest):
|
||||
continue
|
||||
elif options.combines is not None:
|
||||
if not options.combines.__contains__(test.combineName):
|
||||
continue
|
||||
elif options.testStartsWith is not None:
|
||||
if not test.name.startswith(options.testStartsWith):
|
||||
continue
|
||||
elif options.combineStartsWith is not None:
|
||||
if not test.combineName.startswith(options.combineStartsWith):
|
||||
continue
|
||||
|
||||
if options.maxTestCount != -1 and self.counters[self.CNT_RAN] >= options.maxTestCount:
|
||||
break
|
||||
self.counters[self.CNT_RAN] += 1
|
||||
#
|
||||
# Run the thing, dammit.
|
||||
#
|
||||
try:
|
||||
test.setUp()
|
||||
try:
|
||||
test.run()
|
||||
finally:
|
||||
test.tearDown()
|
||||
finally:
|
||||
#
|
||||
# Evaluate.
|
||||
#
|
||||
test.finalize()
|
||||
self.reportTest(test)
|
||||
if options.reportCombines or options.reportErrCombines:
|
||||
self.addToCombines(test)
|
||||
self.counters = self.updateCounters(test, self.counters)
|
||||
finally:
|
||||
if options.reportCombines or options.reportErrCombines:
|
||||
#
|
||||
# Build a report for every single combine.
|
||||
#
|
||||
# TODO: How to sort a dict?
|
||||
#
|
||||
self.combinesRan.keys().sort(None)
|
||||
for key in self.combinesRan.keys():
|
||||
self.reportCombine(key)
|
||||
|
||||
#
|
||||
# Display the final report.
|
||||
#
|
||||
if options.silent:
|
||||
self.displayShortResults(sys.stdout, True, None, self.counters)
|
||||
else:
|
||||
sys.stdout.write("===========================\n")
|
||||
self.displayResults(sys.stdout, True, None, self.counters)
|
Loading…
x
Reference in New Issue
Block a user