# File nqxml/tokenizer.rb, line 701
 def nextDoctype
	    while @currInput.pos >= @currInput.length
		@inputStack.pop()
		@currInput = @inputStack.last
	    end

	    sourceStartPos = @currInput.pos - 2
	    skipChars(7)	# eat 'DOCTYPE'

	    # Name
	    skipSpaces()
	    name = nextName()

	    # External id
	    skipSpaces()
	    externalId = nextExternalId('DOCTYPE') # May return nil

	    # Markupdecl and declsep entities, if any
	    skipSpaces()
	    entities = nil
	    if peekMatches?('[')
		# markupdecl and declsep entities
		entities = Array.new()
		skipChar()	# eat '['
		skipSpaces()
		while !peekMatches?(']')
		    if peekMatches?('<!ENTITY')
			entities << nextEntityTag()
		    elsif peekMatches?('<!ELEMENT')
			entities << nextElementDecl()
		    elsif peekMatches?('<!ATTLIST')
			entities << nextAttributeList()
		    elsif peekMatches?('<!NOTATION')
			entities << nextNotation()
		    elsif peekMatches?('<?')
			skipChars(2)
			entities << nextProcessingInstruction()
		    elsif peekMatches?('<!--')
			skipChars(4)
			entities << nextComment()
		    elsif peekMatches?('%')
			# DeclSep (PEReference)
			skipChar()
			# We won't get back an entity. Instead, we will
			# re-parse the new input stream created by
			# nextDeclSep().
			nextDeclSep()
		    else
			str = 'unknown or illegal tag inside DOCTYPE tag;' +
			    " first 8 chars = '#{nextChars(8)}'"
			raise ParserError.new(str, self)
		    end
		    skipSpaces()
		end
		skipChar()	# eat ']'
	    end

	    if !peekMatches?('>')
		raise ParserError.new("DOCTYPE tag missing '>'", self)
	    end
	    skipChar()		# eat '>'

	    return Doctype.new(name, externalId, entities,
			       @currInput.string[sourceStartPos ... @currInput.pos])
			       
	end