/*
Copyright 2002-2007 MySQL AB, 2008 Sun Microsystems
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
There are special exceptions to the terms and conditions of the GPL
as it is applied to this software. View the full text of the
exception in file EXCEPTIONS-CONNECTOR-J in the directory of this
software distribution.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package com.mysql.jdbc;
import java.io.InputStream;
import java.sql.BatchUpdateException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.sql.Types;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.GregorianCalendar;
import java.util.Iterator;
import java.util.List;
import java.util.TimerTask;
import com.mysql.jdbc.exceptions.DeadlockTimeoutRollbackMarker;
import com.mysql.jdbc.exceptions.MySQLStatementCancelledException;
import com.mysql.jdbc.exceptions.MySQLTimeoutException;
import com.mysql.jdbc.profiler.ProfilerEvent;
import com.mysql.jdbc.profiler.ProfilerEventHandler;
import com.mysql.jdbc.profiler.ProfilerEventHandlerFactory;
/**
* A Statement object is used for executing a static SQL statement and obtaining
* the results produced by it.
*
* <p>
* Only one ResultSet per Statement can be open at any point in time. Therefore,
* if the reading of one ResultSet is interleaved with the reading of another,
* each must have been generated by different Statements. All statement execute
* methods implicitly close a statement's current ResultSet if an open one
* exists.
* </p>
*
* @author Mark Matthews
* @version $Id: Statement.java 4624 2005-11-28 14:24:29 -0600 (Mon, 28 Nov
* 2005) mmatthews $
*
* @see java.sql.Statement
* @see ResultSetInternalMethods
*/
public class StatementImpl implements Statement {
protected static final String PING_MARKER = "/* ping */";
/**
* Thread used to implement query timeouts...Eventually we could be more
* efficient and have one thread with timers, but this is a straightforward
* and simple way to implement a feature that isn't used all that often.
*/
class CancelTask extends TimerTask {
long connectionId = 0;
SQLException caughtWhileCancelling = null;
StatementImpl toCancel;
CancelTask(StatementImpl cancellee) throws SQLException {
connectionId = connection.getIO().getThreadId();
toCancel = cancellee;
}
public void run() {
Thread cancelThread = new Thread() {
public void run() {
Connection cancelConn = null;
java.sql.Statement cancelStmt = null;
try {
synchronized (cancelTimeoutMutex) {
cancelConn = connection.duplicate();
cancelStmt = cancelConn.createStatement();
cancelStmt.execute("KILL QUERY " + connectionId);
toCancel.wasCancelled = true;
toCancel.wasCancelledByTimeout = true;
}
} catch (SQLException sqlEx) {
caughtWhileCancelling = sqlEx;
} catch (NullPointerException npe) {
// Case when connection closed while starting to cancel
// We can't easily synchronize this, because then one thread
// can't cancel() a running query
// ignore, we shouldn't re-throw this, because the connection's
// already closed, so the statement has been timed out.
} finally {
if (cancelStmt != null) {
try {
cancelStmt.close();
} catch (SQLException sqlEx) {
throw new RuntimeException(sqlEx.toString());
}
}
if (cancelConn != null) {
try {
cancelConn.close();
} catch (SQLException sqlEx) {
throw new RuntimeException(sqlEx.toString());
}
}
}
}
};
cancelThread.start();
}
}
/** Mutex to prevent race between returning query results and noticing
that we're timed-out or cancelled. */
protected Object cancelTimeoutMutex = new Object();
/** Used to generate IDs when profiling. */
protected static int statementCounter = 1;
public final static byte USES_VARIABLES_FALSE = 0;
public final static byte USES_VARIABLES_TRUE = 1;
public final static byte USES_VARIABLES_UNKNOWN = -1;
protected boolean wasCancelled = false;
protected boolean wasCancelledByTimeout = false;
/** Holds batched commands */
protected List batchedArgs;
/** The character converter to use (if available) */
protected SingleByteCharsetConverter charConverter = null;
/** The character encoding to use (if available) */
protected String charEncoding = null;
/** The connection that created us */
protected ConnectionImpl connection = null;
protected long connectionId = 0;
/** The catalog in use */
protected String currentCatalog = null;
/** Should we process escape codes? */
protected boolean doEscapeProcessing = true;
/** If we're profiling, where should events go to? */
protected ProfilerEventHandler eventSink = null;
/** The number of rows to fetch at a time (currently ignored) */
private int fetchSize = 0;
/** Has this statement been closed? */
protected boolean isClosed = false;
/** The auto_increment value for the last insert */
protected long lastInsertId = -1;
/** The max field size for this statement */
protected int maxFieldSize = MysqlIO.getMaxBuf();
/**
* The maximum number of rows to return for this statement (-1 means _all_
* rows)
*/
protected int maxRows = -1;
/** Has someone changed this for this statement? */
protected boolean maxRowsChanged = false;
/** List of currently-open ResultSets */
protected List openResults = new ArrayList();
/** Are we in pedantic mode? */
protected boolean pedantic = false;
/**
* Where this statement was created, only used if profileSql or
* useUsageAdvisor set to true.
*/
protected Throwable pointOfOrigin;
/** Should we profile? */
protected boolean profileSQL = false;
/** The current results */
protected ResultSetInternalMethods results = null;
/** The concurrency for this result set (updatable or not) */
protected int resultSetConcurrency = 0;
/** The type of this result set (scroll sensitive or in-sensitive) */
protected int resultSetType = 0;
/** Used to identify this statement when profiling. */
protected int statementId;
/** The timeout for a query */
protected int timeoutInMillis = 0;
/** The update count for this statement */
protected long updateCount = -1;
/** Should we use the usage advisor? */
protected boolean useUsageAdvisor = false;
/** The warnings chain. */
protected SQLWarning warningChain = null;
/**
* Should this statement hold results open over .close() irregardless of
* connection's setting?
*/
protected boolean holdResultsOpenOverClose = false;
protected ArrayList batchedGeneratedKeys = null;
protected boolean retrieveGeneratedKeys = false;
protected boolean continueBatchOnError = false;
protected PingTarget pingTarget = null;
protected boolean useLegacyDatetimeCode;
/**
* Constructor for a Statement.
*
* @param c
* the Connection instantation that creates us
* @param catalog
* the database name in use when we were created
*
* @throws SQLException
* if an error occurs.
*/
public StatementImpl(ConnectionImpl c, String catalog) throws SQLException {
if ((c == null) || c.isClosed()) {
throw SQLError.createSQLException(
Messages.getString("Statement.0"), //$NON-NLS-1$
SQLError.SQL_STATE_CONNECTION_NOT_OPEN); //$NON-NLS-1$ //$NON-NLS-2$
}
this.connection = c;
this.connectionId = this.connection.getId();
this.currentCatalog = catalog;
this.pedantic = this.connection.getPedantic();
this.continueBatchOnError = this.connection.getContinueBatchOnError();
this.useLegacyDatetimeCode = this.connection.getUseLegacyDatetimeCode();
if (!this.connection.getDontTrackOpenResources()) {
this.connection.registerStatement(this);
}
//
// Adjust, if we know it
//
if (this.connection != null) {
this.maxFieldSize = this.connection.getMaxAllowedPacket();
int defaultFetchSize = this.connection.getDefaultFetchSize();
if (defaultFetchSize != 0) {
setFetchSize(defaultFetchSize);
}
}
if (this.connection.getUseUnicode()) {
this.charEncoding = this.connection.getEncoding();
this.charConverter = this.connection
.getCharsetConverter(this.charEncoding);
}
boolean profiling = this.connection.getProfileSql()
|| this.connection.getUseUsageAdvisor() || this.connection.getLogSlowQueries();
if (this.connection.getAutoGenerateTestcaseScript() || profiling) {
this.statementId = statementCounter++;
}
if (profiling) {
this.pointOfOrigin = new Throwable();
this.profileSQL = this.connection.getProfileSql();
this.useUsageAdvisor = this.connection.getUseUsageAdvisor();
this.eventSink = ProfilerEventHandlerFactory.getInstance(this.connection);
}
int maxRowsConn = this.connection.getMaxRows();
if (maxRowsConn != -1) {
setMaxRows(maxRowsConn);
}
}
/**
* DOCUMENT ME!
*
* @param sql
* DOCUMENT ME!
*
* @throws SQLException
* DOCUMENT ME!
*/
public synchronized void addBatch(String sql) throws SQLException {
if (this.batchedArgs == null) {
this.batchedArgs = new ArrayList();
}
if (sql != null) {
this.batchedArgs.add(sql);
}
}
/**
* Cancels this Statement object if both the DBMS and driver support
* aborting an SQL statement. This method can be used by one thread to
* cancel a statement that is being executed by another thread.
*/
public void cancel() throws SQLException {
if (!this.isClosed &&
this.connection != null &&
this.connection.versionMeetsMinimum(5, 0, 0)) {
Connection cancelConn = null;
java.sql.Statement cancelStmt = null;
try {
cancelConn = this.connection.duplicate();
cancelStmt = cancelConn.createStatement();
cancelStmt.execute("KILL QUERY "
+ this.connection.getIO().getThreadId());
this.wasCancelled = true;
} finally {
if (cancelStmt != null) {
cancelStmt.close();
}
if (cancelConn != null) {
cancelConn.close();
}
}
}
}
// --------------------------JDBC 2.0-----------------------------
/**
* Checks if closed() has been called, and throws an exception if so
*
* @throws SQLException
* if this statement has been closed
*/
protected void checkClosed() throws SQLException {
if (this.isClosed) {
throw SQLError.createSQLException(Messages
.getString("Statement.49"), //$NON-NLS-1$
SQLError.SQL_STATE_CONNECTION_NOT_OPEN); //$NON-NLS-1$
}
}
/**
* Checks if the given SQL query with the given first non-ws char is a DML
* statement. Throws an exception if it is.
*
* @param sql
* the SQL to check
* @param firstStatementChar
* the UC first non-ws char of the statement
*
* @throws SQLException
* if the statement contains DML
*/
protected void checkForDml(String sql, char firstStatementChar)
throws SQLException {
if ((firstStatementChar == 'I') || (firstStatementChar == 'U')
|| (firstStatementChar == 'D') || (firstStatementChar == 'A')
|| (firstStatementChar == 'C')) {
String noCommentSql = StringUtils.stripComments(sql,
"'\"", "'\"", true, false, true, true);
if (StringUtils.startsWithIgnoreCaseAndWs(noCommentSql, "INSERT") //$NON-NLS-1$
|| StringUtils.startsWithIgnoreCaseAndWs(noCommentSql, "UPDATE") //$NON-NLS-1$
|| StringUtils.startsWithIgnoreCaseAndWs(noCommentSql, "DELETE") //$NON-NLS-1$
|| StringUtils.startsWithIgnoreCaseAndWs(noCommentSql, "DROP") //$NON-NLS-1$
|| StringUtils.startsWithIgnoreCaseAndWs(noCommentSql, "CREATE") //$NON-NLS-1$
|| StringUtils.startsWithIgnoreCaseAndWs(noCommentSql, "ALTER")) { //$NON-NLS-1$
throw SQLError.createSQLException(Messages
.getString("Statement.57"), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$
}
}
}
/**
* Method checkNullOrEmptyQuery.
*
* @param sql
* the SQL to check
*
* @throws SQLException
* if query is null or empty.
*/
protected void checkNullOrEmptyQuery(String sql) throws SQLException {
if (sql == null) {
throw SQLError.createSQLException(Messages
.getString("Statement.59"), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$ //$NON-NLS-2$
}
if (sql.length() == 0) {
throw SQLError.createSQLException(Messages
.getString("Statement.61"), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$ //$NON-NLS-2$
}
}
/**
* JDBC 2.0 Make the set of commands in the current batch empty. This method
* is optional.
*
* @exception SQLException
* if a database-access error occurs, or the driver does not
* support batch statements
*/
public synchronized void clearBatch() throws SQLException {
if (this.batchedArgs != null) {
this.batchedArgs.clear();
}
}
/**
* After this call, getWarnings returns null until a new warning is reported
* for this Statement.
*
* @exception SQLException
* if a database access error occurs (why?)
*/
public void clearWarnings() throws SQLException {
this.warningChain = null;
}
/**
* In many cases, it is desirable to immediately release a Statement's
* database and JDBC resources instead of waiting for this to happen when it
* is automatically closed. The close method provides this immediate
* release.
*
* <p>
* <B>Note:</B> A Statement is automatically closed when it is garbage
* collected. When a Statement is closed, its current ResultSet, if one
* exists, is also closed.
* </p>
*
* @exception SQLException
* if a database access error occurs
*/
public synchronized void close() throws SQLException {
realClose(true, true);
}
/**
* Close any open result sets that have been 'held open'
*/
protected void closeAllOpenResults() {
if (this.openResults != null) {
for (Iterator iter = this.openResults.iterator(); iter.hasNext();) {
ResultSetInternalMethods element = (ResultSetInternalMethods) iter.next();
try {
element.realClose(false);
} catch (SQLException sqlEx) {
AssertionFailedException.shouldNotHappen(sqlEx);
}
}
this.openResults.clear();
}
}
/**
* @param sql
* @return
*/
private ResultSetInternalMethods createResultSetUsingServerFetch(String sql)
throws SQLException {
java.sql.PreparedStatement pStmt = this.connection.prepareStatement(
sql, this.resultSetType, this.resultSetConcurrency);
pStmt.setFetchSize(this.fetchSize);
if (this.maxRows > -1) {
pStmt.setMaxRows(this.maxRows);
}
pStmt.execute();
//
// Need to be able to get resultset irrespective if we issued DML or
// not to make this work.
//
ResultSetInternalMethods rs = ((com.mysql.jdbc.StatementImpl) pStmt)
.getResultSetInternal();
rs
.setStatementUsedForFetchingRows((com.mysql.jdbc.PreparedStatement) pStmt);
this.results = rs;
return rs;
}
/**
* We only stream result sets when they are forward-only, read-only, and the
* fetch size has been set to Integer.MIN_VALUE
*
* @return true if this result set should be streamed row at-a-time, rather
* than read all at once.
*/
protected boolean createStreamingResultSet() {
return ((this.resultSetType == java.sql.ResultSet.TYPE_FORWARD_ONLY)
&& (this.resultSetConcurrency == java.sql.ResultSet.CONCUR_READ_ONLY) && (this.fetchSize == Integer.MIN_VALUE));
}
private int originalResultSetType = 0;
private int originalFetchSize = 0;
/* (non-Javadoc)
* @see com.mysql.jdbc.IStatement#enableStreamingResults()
*/
public void enableStreamingResults() throws SQLException {
this.originalResultSetType = this.resultSetType;
this.originalFetchSize = this.fetchSize;
setFetchSize(Integer.MIN_VALUE);
setResultSetType(ResultSet.TYPE_FORWARD_ONLY);
}
public void disableStreamingResults() throws SQLException {
if (this.fetchSize == Integer.MIN_VALUE &&
this.resultSetType == ResultSet.TYPE_FORWARD_ONLY) {
setFetchSize(this.originalFetchSize);
setResultSetType(this.originalResultSetType);
}
}
/**
* Execute a SQL statement that may return multiple results. We don't have
* to worry about this since we do not support multiple ResultSets. You can
* use getResultSet or getUpdateCount to retrieve the result.
*
* @param sql
* any SQL statement
*
* @return true if the next result is a ResulSet, false if it is an update
* count or there are no more results
*
* @exception SQLException
* if a database access error occurs
*/
public boolean execute(String sql) throws SQLException {
return execute(sql, false);
}
private boolean execute(String sql, boolean returnGeneratedKeys) throws SQLException {
checkClosed();
ConnectionImpl locallyScopedConn = this.connection;
synchronized (locallyScopedConn.getMutex()) {
this.retrieveGeneratedKeys = returnGeneratedKeys;
resetCancelledState();
checkNullOrEmptyQuery(sql);
checkClosed();
char firstNonWsChar = StringUtils.firstAlphaCharUc(sql, findStartOfStatement(sql));
boolean isSelect = true;
if (firstNonWsChar != 'S') {
isSelect = false;
if (locallyScopedConn.isReadOnly()) {
throw SQLError.createSQLException(Messages
.getString("Statement.27") //$NON-NLS-1$
+ Messages.getString("Statement.28"), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$
}
}
boolean doStreaming = createStreamingResultSet();
// Adjust net_write_timeout to a higher value if we're
// streaming result sets. More often than not, someone runs into
// an issue where they blow net_write_timeout when using this
// feature, and if they're willing to hold a result set open
// for 30 seconds or more, one more round-trip isn't going to hurt
//
// This is reset by RowDataDynamic.close().
if (doStreaming
&& this.connection.getNetTimeoutForStreamingResults() > 0) {
executeSimpleNonQuery(locallyScopedConn, "SET net_write_timeout="
+ this.connection.getNetTimeoutForStreamingResults());
}
if (this.doEscapeProcessing) {
Object escapedSqlResult = EscapeProcessor.escapeSQL(sql,
locallyScopedConn.serverSupportsConvertFn(), locallyScopedConn);
if (escapedSqlResult instanceof String) {
sql = (String) escapedSqlResult;
} else {
sql = ((EscapeProcessorResult) escapedSqlResult).escapedSql;
}
}
if (this.results != null) {
if (!locallyScopedConn.getHoldResultsOpenOverStatementClose()) {
this.results.realClose(false);
}
}
if (sql.charAt(0) == '/') {
if (sql.startsWith(PING_MARKER)) {
doPingInstead();
return true;
}
}
CachedResultSetMetaData cachedMetaData = null;
ResultSetInternalMethods rs = null;
// If there isn't a limit clause in the SQL
// then limit the number of rows to return in
// an efficient manner. Only do this if
// setMaxRows() hasn't been used on any Statements
// generated from the current Connection (saves
// a query, and network traffic).
this.batchedGeneratedKeys = null;
if (useServerFetch()) {
rs = createResultSetUsingServerFetch(sql);
} else {
CancelTask timeoutTask = null;
String oldCatalog = null;
try {
if (locallyScopedConn.getEnableQueryTimeouts() &&
this.timeoutInMillis != 0
&& locallyScopedConn.versionMeetsMinimum(5, 0, 0)) {
timeoutTask = new CancelTask(this);
ConnectionImpl.getCancelTimer().schedule(timeoutTask,
this.timeoutInMillis);
}
if (!locallyScopedConn.getCatalog().equals(
this.currentCatalog)) {
oldCatalog = locallyScopedConn.getCatalog();
locallyScopedConn.setCatalog(this.currentCatalog);
}
//
// Check if we have cached metadata for this query...
//
Field[] cachedFields = null;
if (locallyScopedConn.getCacheResultSetMetadata()) {
cachedMetaData = locallyScopedConn.getCachedMetaData(sql);
if (cachedMetaData != null) {
cachedFields = cachedMetaData.fields;
}
}
//
// Only apply max_rows to selects
//
if (locallyScopedConn.useMaxRows()) {
int rowLimit = -1;
if (isSelect) {
if (StringUtils.indexOfIgnoreCase(sql, "LIMIT") != -1) { //$NON-NLS-1$
rowLimit = this.maxRows;
} else {
if (this.maxRows <= 0) {
executeSimpleNonQuery(locallyScopedConn,
"SET OPTION SQL_SELECT_LIMIT=DEFAULT");
} else {
executeSimpleNonQuery(locallyScopedConn,
"SET OPTION SQL_SELECT_LIMIT="
+ this.maxRows);
}
}
} else {
executeSimpleNonQuery(locallyScopedConn,
"SET OPTION SQL_SELECT_LIMIT=DEFAULT");
}
// Finally, execute the query
rs = locallyScopedConn.execSQL(this, sql, rowLimit, null,
this.resultSetType, this.resultSetConcurrency,
doStreaming,
this.currentCatalog, cachedFields);
} else {
rs = locallyScopedConn.execSQL(this, sql, -1, null,
this.resultSetType, this.resultSetConcurrency,
doStreaming,
this.currentCatalog, cachedFields);
}
if (timeoutTask != null) {
if (timeoutTask.caughtWhileCancelling != null) {
throw timeoutTask.caughtWhileCancelling;
}
timeoutTask.cancel();
timeoutTask = null;
}
synchronized (this.cancelTimeoutMutex) {
if (this.wasCancelled) {
SQLException cause = null;
if (this.wasCancelledByTimeout) {
cause = new MySQLTimeoutException();
} else {
cause = new MySQLStatementCancelledException();
}
resetCancelledState();
throw cause;
}
}
} finally {
if (timeoutTask != null) {
timeoutTask.cancel();
}
if (oldCatalog != null) {
locallyScopedConn.setCatalog(oldCatalog);
}
}
}
if (rs != null) {
this.lastInsertId = rs.getUpdateID();
this.results = rs;
rs.setFirstCharOfQuery(firstNonWsChar);
if (rs.reallyResult()) {
if (cachedMetaData != null) {
locallyScopedConn.initializeResultsMetadataFromCache(sql, cachedMetaData,
this.results);
} else {
if (this.connection.getCacheResultSetMetadata()) {
locallyScopedConn.initializeResultsMetadataFromCache(sql,
null /* will be created */, this.results);
}
}
}
}
return ((rs != null) && rs.reallyResult());
}
}
protected synchronized void resetCancelledState() {
if (this.cancelTimeoutMutex == null) {
return;
}
synchronized (this.cancelTimeoutMutex) {
this.wasCancelled = false;
this.wasCancelledByTimeout = false;
}
}
/**
* @see StatementImpl#execute(String, int)
*/
public boolean execute(String sql, int returnGeneratedKeys)
throws SQLException {
if (returnGeneratedKeys == java.sql.Statement.RETURN_GENERATED_KEYS) {
checkClosed();
ConnectionImpl locallyScopedConn = this.connection;
synchronized (locallyScopedConn.getMutex()) {
// If this is a 'REPLACE' query, we need to be able to parse
// the 'info' message returned from the server to determine
// the actual number of keys generated.
boolean readInfoMsgState = this.connection
.isReadInfoMsgEnabled();
locallyScopedConn.setReadInfoMsgEnabled(true);
try {
return execute(sql, true);
} finally {
locallyScopedConn.setReadInfoMsgEnabled(readInfoMsgState);
}
}
}
return execute(sql);
}
/**
* @see StatementImpl#execute(String, int[])
*/
public boolean execute(String sql, int[] generatedKeyIndices)
throws SQLException {
if ((generatedKeyIndices != null) && (generatedKeyIndices.length > 0)) {
checkClosed();
ConnectionImpl locallyScopedConn = this.connection;
synchronized (locallyScopedConn.getMutex()) {
this.retrieveGeneratedKeys = true;
// If this is a 'REPLACE' query, we need to be able to parse
// the 'info' message returned from the server to determine
// the actual number of keys generated.
boolean readInfoMsgState = locallyScopedConn
.isReadInfoMsgEnabled();
locallyScopedConn.setReadInfoMsgEnabled(true);
try {
return execute(sql, true);
} finally {
locallyScopedConn.setReadInfoMsgEnabled(readInfoMsgState);
}
}
}
return execute(sql);
}
/**
* @see StatementImpl#execute(String, String[])
*/
public boolean execute(String sql, String[] generatedKeyNames)
throws SQLException {
if ((generatedKeyNames != null) && (generatedKeyNames.length > 0)) {
checkClosed();
ConnectionImpl locallyScopedConn = this.connection;
synchronized (locallyScopedConn.getMutex()) {
this.retrieveGeneratedKeys = true;
// If this is a 'REPLACE' query, we need to be able to parse
// the 'info' message returned from the server to determine
// the actual number of keys generated.
boolean readInfoMsgState = this.connection
.isReadInfoMsgEnabled();
locallyScopedConn.setReadInfoMsgEnabled(true);
try {
return execute(sql, true);
} finally {
locallyScopedConn.setReadInfoMsgEnabled(readInfoMsgState);
}
}
}
return execute(sql);
}
/**
* JDBC 2.0 Submit a batch of commands to the database for execution. This
* method is optional.
*
* @return an array of update counts containing one element for each command
* in the batch. The array is ordered according to the order in
* which commands were inserted into the batch
*
* @exception SQLException
* if a database-access error occurs, or the driver does not
* support batch statements
* @throws java.sql.BatchUpdateException
* DOCUMENT ME!
*/
public synchronized int[] executeBatch() throws SQLException {
checkClosed();
ConnectionImpl locallyScopedConn = this.connection;
if (locallyScopedConn.isReadOnly()) {
throw SQLError.createSQLException(Messages
.getString("Statement.34") //$NON-NLS-1$
+ Messages.getString("Statement.35"), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$
}
if (this.results != null) {
if (!locallyScopedConn.getHoldResultsOpenOverStatementClose()) {
this.results.realClose(false);
}
}
synchronized (locallyScopedConn.getMutex()) {
if (this.batchedArgs == null || this.batchedArgs.size() == 0) {
return new int[0];
}
// we timeout the entire batch, not individual statements
int individualStatementTimeout = this.timeoutInMillis;
this.timeoutInMillis = 0;
CancelTask timeoutTask = null;
try {
resetCancelledState();
this.retrieveGeneratedKeys = true; // The JDBC spec doesn't forbid this, but doesn't provide for it either...we do..
int[] updateCounts = null;
if (this.batchedArgs != null) {
int nbrCommands = this.batchedArgs.size();
this.batchedGeneratedKeys = new ArrayList(this.batchedArgs.size());
boolean multiQueriesEnabled = locallyScopedConn.getAllowMultiQueries();
if (locallyScopedConn.versionMeetsMinimum(4, 1, 1) &&
(multiQueriesEnabled ||
(locallyScopedConn.getRewriteBatchedStatements() &&
nbrCommands > 4))) {
return executeBatchUsingMultiQueries(multiQueriesEnabled, nbrCommands, individualStatementTimeout);
}
if (locallyScopedConn.getEnableQueryTimeouts() &&
individualStatementTimeout != 0
&& locallyScopedConn.versionMeetsMinimum(5, 0, 0)) {
timeoutTask = new CancelTask(this);
ConnectionImpl.getCancelTimer().schedule(timeoutTask,
individualStatementTimeout);
}
updateCounts = new int[nbrCommands];
for (int i = 0; i < nbrCommands; i++) {
updateCounts[i] = -3;
}
SQLException sqlEx = null;
int commandIndex = 0;
for (commandIndex = 0; commandIndex < nbrCommands; commandIndex++) {
try {
updateCounts[commandIndex] = executeUpdate((String) this.batchedArgs
.get(commandIndex), true, true);
getBatchedGeneratedKeys();
} catch (SQLException ex) {
updateCounts[commandIndex] = EXECUTE_FAILED;
if (this.continueBatchOnError &&
!(ex instanceof MySQLTimeoutException) &&
!(ex instanceof MySQLStatementCancelledException) &&
!hasDeadlockOrTimeoutRolledBackTx(ex)) {
sqlEx = ex;
} else {
int[] newUpdateCounts = new int[commandIndex];
if (hasDeadlockOrTimeoutRolledBackTx(ex)) {
for (int i = 0; i < newUpdateCounts.length; i++) {
newUpdateCounts[i] = Statement.EXECUTE_FAILED;
}
} else {
System.arraycopy(updateCounts, 0,
newUpdateCounts, 0, commandIndex);
}
throw new java.sql.BatchUpdateException(ex
.getMessage(), ex.getSQLState(), ex
.getErrorCode(), newUpdateCounts);
}
}
}
if (sqlEx != null) {
throw new java.sql.BatchUpdateException(sqlEx
.getMessage(), sqlEx.getSQLState(), sqlEx
.getErrorCode(), updateCounts);
}
}
if (timeoutTask != null) {
if (timeoutTask.caughtWhileCancelling != null) {
throw timeoutTask.caughtWhileCancelling;
}
timeoutTask.cancel();
timeoutTask = null;
}
return (updateCounts != null) ? updateCounts : new int[0];
} finally {
if (timeoutTask != null) {
timeoutTask.cancel();
}
resetCancelledState();
this.timeoutInMillis = individualStatementTimeout;
clearBatch();
}
}
}
protected final boolean hasDeadlockOrTimeoutRolledBackTx(SQLException ex) {
int vendorCode = ex.getErrorCode();
switch (vendorCode) {
case MysqlErrorNumbers.ER_LOCK_DEADLOCK:
case MysqlErrorNumbers.ER_LOCK_TABLE_FULL:
return true;
case MysqlErrorNumbers.ER_LOCK_WAIT_TIMEOUT:
try {
return !this.connection.versionMeetsMinimum(5, 0, 13);
} catch (SQLException sqlEx) {
// won't actually be thrown in this case
return false;
}
default:
return false;
}
}
/**
* Rewrites batch into a single query to send to the server. This method
* will constrain each batch to be shorter than max_allowed_packet on the
* server.
*
* @return update counts in the same manner as executeBatch()
* @throws SQLException
*/
private int[] executeBatchUsingMultiQueries(boolean multiQueriesEnabled,
int nbrCommands, int individualStatementTimeout) throws SQLException {
ConnectionImpl locallyScopedConn = this.connection;
if (!multiQueriesEnabled) {
locallyScopedConn.getIO().enableMultiQueries();
}
java.sql.Statement batchStmt = null;
CancelTask timeoutTask = null;
try {
int[] updateCounts = new int[nbrCommands];
for (int i = 0; i < nbrCommands; i++) {
updateCounts[i] = -3;
}
int commandIndex = 0;
StringBuffer queryBuf = new StringBuffer();
batchStmt = locallyScopedConn.createStatement();
if (locallyScopedConn.getEnableQueryTimeouts() &&
individualStatementTimeout != 0
&& locallyScopedConn.versionMeetsMinimum(5, 0, 0)) {
timeoutTask = new CancelTask((StatementImpl)batchStmt);
ConnectionImpl.getCancelTimer().schedule(timeoutTask,
individualStatementTimeout);
}
int counter = 0;
int numberOfBytesPerChar = 1;
String connectionEncoding = locallyScopedConn.getEncoding();
if (StringUtils.startsWithIgnoreCase(connectionEncoding, "utf")) {
numberOfBytesPerChar = 3;
} else if (CharsetMapping.isMultibyteCharset(connectionEncoding)) {
numberOfBytesPerChar = 2;
}
int escapeAdjust = 1;
if (this.doEscapeProcessing) {
escapeAdjust = 2; /* We assume packet _could_ grow by this amount, as we're not
sure how big statement will end up after
escape processing */
}
SQLException sqlEx = null;
int argumentSetsInBatchSoFar = 0;
for (commandIndex = 0; commandIndex < nbrCommands; commandIndex++) {
String nextQuery = (String) this.batchedArgs.get(commandIndex);
if (((((queryBuf.length() + nextQuery.length())
* numberOfBytesPerChar) + 1 /* for semicolon */
+ MysqlIO.HEADER_LENGTH) * escapeAdjust) + 32 > this.connection
.getMaxAllowedPacket()) {
try {
batchStmt.execute(queryBuf.toString(), Statement.RETURN_GENERATED_KEYS);
} catch (SQLException ex) {
sqlEx = handleExceptionForBatch(commandIndex,
argumentSetsInBatchSoFar, updateCounts, ex);
}
counter = processMultiCountsAndKeys((StatementImpl)batchStmt, counter,
updateCounts);
queryBuf = new StringBuffer();
argumentSetsInBatchSoFar = 0;
}
queryBuf.append(nextQuery);
queryBuf.append(";");
argumentSetsInBatchSoFar++;
}
if (queryBuf.length() > 0) {
try {
batchStmt.execute(queryBuf.toString(), Statement.RETURN_GENERATED_KEYS);
} catch (SQLException ex) {
sqlEx = handleExceptionForBatch(commandIndex - 1,
argumentSetsInBatchSoFar, updateCounts, ex);
}
counter = processMultiCountsAndKeys((StatementImpl)batchStmt, counter,
updateCounts);
}
if (timeoutTask != null) {
if (timeoutTask.caughtWhileCancelling != null) {
throw timeoutTask.caughtWhileCancelling;
}
timeoutTask.cancel();
timeoutTask = null;
}
if (sqlEx != null) {
throw new java.sql.BatchUpdateException(sqlEx
.getMessage(), sqlEx.getSQLState(), sqlEx
.getErrorCode(), updateCounts);
}
return (updateCounts != null) ? updateCounts : new int[0];
} finally {
if (timeoutTask != null) {
timeoutTask.cancel();
}
resetCancelledState();
try {
if (batchStmt != null) {
batchStmt.close();
}
} finally {
if (!multiQueriesEnabled) {
locallyScopedConn.getIO().disableMultiQueries();
}
}
}
}
protected int processMultiCountsAndKeys(
StatementImpl batchedStatement,
int updateCountCounter, int[] updateCounts) throws SQLException {
updateCounts[updateCountCounter++] = batchedStatement.getUpdateCount();
boolean doGenKeys = this.batchedGeneratedKeys != null;
byte[][] row = null;
if (doGenKeys) {
long generatedKey = batchedStatement.getLastInsertID();
row = new byte[1][];
row[0] = Long.toString(generatedKey).getBytes();
this.batchedGeneratedKeys.add(new ByteArrayRow(row));
}
while (batchedStatement.getMoreResults()
|| batchedStatement.getUpdateCount() != -1) {
updateCounts[updateCountCounter++] = batchedStatement.getUpdateCount();
if (doGenKeys) {
long generatedKey = batchedStatement.getLastInsertID();
row = new byte[1][];
row[0] = Long.toString(generatedKey).getBytes();
this.batchedGeneratedKeys.add(new ByteArrayRow(row));
}
}
return updateCountCounter;
}
protected SQLException handleExceptionForBatch(int endOfBatchIndex,
int numValuesPerBatch, int[] updateCounts, SQLException ex)
throws BatchUpdateException {
SQLException sqlEx;
for (int j = endOfBatchIndex; j > endOfBatchIndex - numValuesPerBatch; j--) {
updateCounts[j] = EXECUTE_FAILED;
}
if (this.continueBatchOnError &&
!(ex instanceof MySQLTimeoutException) &&
!(ex instanceof MySQLStatementCancelledException) &&
!hasDeadlockOrTimeoutRolledBackTx(ex)) {
sqlEx = ex;
} else {
int[] newUpdateCounts = new int[endOfBatchIndex];
System.arraycopy(updateCounts, 0,
newUpdateCounts, 0, endOfBatchIndex);
throw new java.sql.BatchUpdateException(ex
.getMessage(), ex.getSQLState(), ex
.getErrorCode(), newUpdateCounts);
}
return sqlEx;
}
/**
* Execute a SQL statement that retruns a single ResultSet
*
* @param sql
* typically a static SQL SELECT statement
*
* @return a ResulSet that contains the data produced by the query
*
* @exception SQLException
* if a database access error occurs
*/
public java.sql.ResultSet executeQuery(String sql)
throws SQLException {
checkClosed();
ConnectionImpl locallyScopedConn = this.connection;
synchronized (locallyScopedConn.getMutex()) {
this.retrieveGeneratedKeys = false;
resetCancelledState();
checkNullOrEmptyQuery(sql);
boolean doStreaming = createStreamingResultSet();
// Adjust net_write_timeout to a higher value if we're
// streaming result sets. More often than not, someone runs into
// an issue where they blow net_write_timeout when using this
// feature, and if they're willing to hold a result set open
// for 30 seconds or more, one more round-trip isn't going to hurt
//
// This is reset by RowDataDynamic.close().
if (doStreaming
&& this.connection.getNetTimeoutForStreamingResults() > 0) {
executeSimpleNonQuery(locallyScopedConn, "SET net_write_timeout="
+ this.connection.getNetTimeoutForStreamingResults());
}
if (this.doEscapeProcessing) {
Object escapedSqlResult = EscapeProcessor.escapeSQL(sql,
locallyScopedConn.serverSupportsConvertFn(), this.connection);
if (escapedSqlResult instanceof String) {
sql = (String) escapedSqlResult;
} else {
sql = ((EscapeProcessorResult) escapedSqlResult).escapedSql;
}
}
char firstStatementChar = StringUtils.firstNonWsCharUc(sql,
findStartOfStatement(sql));
if (sql.charAt(0) == '/') {
if (sql.startsWith(PING_MARKER)) {
doPingInstead();
return this.results;
}
}
checkForDml(sql, firstStatementChar);
if (this.results != null) {
if (!locallyScopedConn.getHoldResultsOpenOverStatementClose()) {
this.results.realClose(false);
}
}
CachedResultSetMetaData cachedMetaData = null;
// If there isn't a limit clause in the SQL
// then limit the number of rows to return in
// an efficient manner. Only do this if
// setMaxRows() hasn't been used on any Statements
// generated from the current Connection (saves
// a query, and network traffic).
if (useServerFetch()) {
this.results = createResultSetUsingServerFetch(sql);
return this.results;
}
CancelTask timeoutTask = null;
String oldCatalog = null;
try {
if (locallyScopedConn.getEnableQueryTimeouts() &&
this.timeoutInMillis != 0
&& locallyScopedConn.versionMeetsMinimum(5, 0, 0)) {
timeoutTask = new CancelTask(this);
ConnectionImpl.getCancelTimer().schedule(timeoutTask,
this.timeoutInMillis);
}
if (!locallyScopedConn.getCatalog().equals(this.currentCatalog)) {
oldCatalog = locallyScopedConn.getCatalog();
locallyScopedConn.setCatalog(this.currentCatalog);
}
//
// Check if we have cached metadata for this query...
//
Field[] cachedFields = null;
if (locallyScopedConn.getCacheResultSetMetadata()) {
cachedMetaData = locallyScopedConn.getCachedMetaData(sql);
if (cachedMetaData != null) {
cachedFields = cachedMetaData.fields;
}
}
if (locallyScopedConn.useMaxRows()) {
// We need to execute this all together
// So synchronize on the Connection's mutex (because
// even queries going through there synchronize
// on the connection
if (StringUtils.indexOfIgnoreCase(sql, "LIMIT") != -1) { //$NON-NLS-1$
this.results = locallyScopedConn.execSQL(this, sql,
this.maxRows, null, this.resultSetType,
this.resultSetConcurrency,
doStreaming,
this.currentCatalog, cachedFields);
} else {
if (this.maxRows <= 0) {
executeSimpleNonQuery(locallyScopedConn,
"SET OPTION SQL_SELECT_LIMIT=DEFAULT");
} else {
executeSimpleNonQuery(locallyScopedConn,
"SET OPTION SQL_SELECT_LIMIT=" + this.maxRows);
}
this.results = locallyScopedConn.execSQL(this, sql, -1,
null, this.resultSetType,
this.resultSetConcurrency,
doStreaming,
this.currentCatalog, cachedFields);
if (oldCatalog != null) {
locallyScopedConn.setCatalog(oldCatalog);
}
}
} else {
this.results = locallyScopedConn.execSQL(this, sql, -1, null,
this.resultSetType, this.resultSetConcurrency,
doStreaming,
this.currentCatalog, cachedFields);
}
if (timeoutTask != null) {
if (timeoutTask.caughtWhileCancelling != null) {
throw timeoutTask.caughtWhileCancelling;
}
timeoutTask.cancel();
timeoutTask = null;
}
synchronized (this.cancelTimeoutMutex) {
if (this.wasCancelled) {
SQLException cause = null;
if (this.wasCancelledByTimeout) {
cause = new MySQLTimeoutException();
} else {
cause = new MySQLStatementCancelledException();
}
resetCancelledState();
throw cause;
}
}
} finally {
if (timeoutTask != null) {
timeoutTask.cancel();
}
if (oldCatalog != null) {
locallyScopedConn.setCatalog(oldCatalog);
}
}
this.lastInsertId = this.results.getUpdateID();
if (cachedMetaData != null) {
locallyScopedConn.initializeResultsMetadataFromCache(sql, cachedMetaData,
this.results);
} else {
if (this.connection.getCacheResultSetMetadata()) {
locallyScopedConn.initializeResultsMetadataFromCache(sql,
null /* will be created */, this.results);
}
}
return this.results;
}
}
protected void doPingInstead() throws SQLException {
if (this.pingTarget != null) {
this.pingTarget.doPing();
} else {
this.connection.ping();
}
ResultSetInternalMethods fakeSelectOneResultSet = generatePingResultSet();
this.results = fakeSelectOneResultSet;
}
protected ResultSetInternalMethods generatePingResultSet() throws SQLException {
Field[] fields = { new Field(null, "1", Types.BIGINT, 1) };
ArrayList rows = new ArrayList();
byte[] colVal = new byte[] { (byte) '1' };
rows.add(new ByteArrayRow(new byte[][] { colVal }));
return (ResultSetInternalMethods) DatabaseMetaData.buildResultSet(fields, rows,
this.connection);
}
protected void executeSimpleNonQuery(ConnectionImpl c, String nonQuery)
throws SQLException {
c.execSQL(this, nonQuery,
-1, null, ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY, false, this.currentCatalog,
null, false).close();
}
/**
* Execute a SQL INSERT, UPDATE or DELETE statement. In addition SQL
* statements that return nothing such as SQL DDL statements can be executed
* Any IDs generated for AUTO_INCREMENT fields can be retrieved by casting
* this Statement to org.gjt.mm.mysql.Statement and calling the
* getLastInsertID() method.
*
* @param sql
* a SQL statement
*
* @return either a row count, or 0 for SQL commands
*
* @exception SQLException
* if a database access error occurs
*/
public int executeUpdate(String sql) throws SQLException {
return executeUpdate(sql, false, false);
}
protected int executeUpdate(String sql, boolean isBatch, boolean returnGeneratedKeys)
throws SQLException {
checkClosed();
ConnectionImpl locallyScopedConn = this.connection;
char firstStatementChar = StringUtils.firstAlphaCharUc(sql,
findStartOfStatement(sql));
ResultSetInternalMethods rs = null;
synchronized (locallyScopedConn.getMutex()) {
this.retrieveGeneratedKeys = returnGeneratedKeys;
resetCancelledState();
checkNullOrEmptyQuery(sql);
if (this.doEscapeProcessing) {
Object escapedSqlResult = EscapeProcessor.escapeSQL(sql,
this.connection.serverSupportsConvertFn(), this.connection);
if (escapedSqlResult instanceof String) {
sql = (String) escapedSqlResult;
} else {
sql = ((EscapeProcessorResult) escapedSqlResult).escapedSql;
}
}
if (locallyScopedConn.isReadOnly()) {
throw SQLError.createSQLException(Messages
.getString("Statement.42") //$NON-NLS-1$
+ Messages.getString("Statement.43"), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$
}
if (StringUtils.startsWithIgnoreCaseAndWs(sql, "select")) { //$NON-NLS-1$
throw SQLError.createSQLException(Messages
.getString("Statement.46"), //$NON-NLS-1$
"01S03"); //$NON-NLS-1$
}
if (this.results != null) {
if (!locallyScopedConn.getHoldResultsOpenOverStatementClose()) {
this.results.realClose(false);
}
}
// The checking and changing of catalogs
// must happen in sequence, so synchronize
// on the same mutex that _conn is using
CancelTask timeoutTask = null;
String oldCatalog = null;
try {
if (locallyScopedConn.getEnableQueryTimeouts() &&
this.timeoutInMillis != 0
&& locallyScopedConn.versionMeetsMinimum(5, 0, 0)) {
timeoutTask = new CancelTask(this);
ConnectionImpl.getCancelTimer().schedule(timeoutTask,
this.timeoutInMillis);
}
if (!locallyScopedConn.getCatalog().equals(this.currentCatalog)) {
oldCatalog = locallyScopedConn.getCatalog();
locallyScopedConn.setCatalog(this.currentCatalog);
}
//
// Only apply max_rows to selects
//
if (locallyScopedConn.useMaxRows()) {
executeSimpleNonQuery(locallyScopedConn,
"SET OPTION SQL_SELECT_LIMIT=DEFAULT");
}
rs = locallyScopedConn.execSQL(this, sql, -1, null,
java.sql.ResultSet.TYPE_FORWARD_ONLY,
java.sql.ResultSet.CONCUR_READ_ONLY, false,
this.currentCatalog,
null /* force read of field info on DML */,
isBatch);
if (timeoutTask != null) {
if (timeoutTask.caughtWhileCancelling != null) {
throw timeoutTask.caughtWhileCancelling;
}
timeoutTask.cancel();
timeoutTask = null;
}
synchronized (this.cancelTimeoutMutex) {
if (this.wasCancelled) {
SQLException cause = null;
if (this.wasCancelledByTimeout) {
cause = new MySQLTimeoutException();
} else {
cause = new MySQLStatementCancelledException();
}
resetCancelledState();
throw cause;
}
}
} finally {
if (timeoutTask != null) {
timeoutTask.cancel();
}
if (oldCatalog != null) {
locallyScopedConn.setCatalog(oldCatalog);
}
}
}
this.results = rs;
rs.setFirstCharOfQuery(firstStatementChar);
this.updateCount = rs.getUpdateCount();
int truncatedUpdateCount = 0;
if (this.updateCount > Integer.MAX_VALUE) {
truncatedUpdateCount = Integer.MAX_VALUE;
} else {
truncatedUpdateCount = (int) this.updateCount;
}
this.lastInsertId = rs.getUpdateID();
return truncatedUpdateCount;
}
/**
* @see StatementImpl#executeUpdate(String, int)
*/
public int executeUpdate(String sql, int returnGeneratedKeys)
throws SQLException {
if (returnGeneratedKeys == java.sql.Statement.RETURN_GENERATED_KEYS) {
checkClosed();
ConnectionImpl locallyScopedConn = this.connection;
synchronized (locallyScopedConn.getMutex()) {
// If this is a 'REPLACE' query, we need to be able to parse
// the 'info' message returned from the server to determine
// the actual number of keys generated.
boolean readInfoMsgState = locallyScopedConn
.isReadInfoMsgEnabled();
locallyScopedConn.setReadInfoMsgEnabled(true);
try {
return executeUpdate(sql, false, true);
} finally {
locallyScopedConn.setReadInfoMsgEnabled(readInfoMsgState);
}
}
}
return executeUpdate(sql);
}
/**
* @see StatementImpl#executeUpdate(String, int[])
*/
public int executeUpdate(String sql, int[] generatedKeyIndices)
throws SQLException {
if ((generatedKeyIndices != null) && (generatedKeyIndices.length > 0)) {
checkClosed();
ConnectionImpl locallyScopedConn = this.connection;
synchronized (locallyScopedConn.getMutex()) {
// If this is a 'REPLACE' query, we need to be able to parse
// the 'info' message returned from the server to determine
// the actual number of keys generated.
boolean readInfoMsgState = locallyScopedConn
.isReadInfoMsgEnabled();
locallyScopedConn.setReadInfoMsgEnabled(true);
try {
return executeUpdate(sql, false, true);
} finally {
locallyScopedConn.setReadInfoMsgEnabled(readInfoMsgState);
}
}
}
return executeUpdate(sql);
}
/**
* @see StatementImpl#executeUpdate(String, String[])
*/
public int executeUpdate(String sql, String[] generatedKeyNames)
throws SQLException {
if ((generatedKeyNames != null) && (generatedKeyNames.length > 0)) {
checkClosed();
ConnectionImpl locallyScopedConn = this.connection;
synchronized (locallyScopedConn.getMutex()) {
// If this is a 'REPLACE' query, we need to be able to parse
// the 'info' message returned from the server to determine
// the actual number of keys generated.
boolean readInfoMsgState = this.connection
.isReadInfoMsgEnabled();
locallyScopedConn.setReadInfoMsgEnabled(true);
try {
return executeUpdate(sql, false, true);
} finally {
locallyScopedConn.setReadInfoMsgEnabled(readInfoMsgState);
}
}
}
return executeUpdate(sql);
}
/**
* Optimization to only use one calendar per-session, or calculate it for
* each call, depending on user configuration
*/
protected Calendar getCalendarInstanceForSessionOrNew() {
if (this.connection != null) {
return this.connection.getCalendarInstanceForSessionOrNew();
} else {
// punt, no connection around
return new GregorianCalendar();
}
}
/**
* JDBC 2.0 Return the Connection that produced the Statement.
*
* @return the Connection that produced the Statement
*
* @throws SQLException
* if an error occurs
*/
public java.sql.Connection getConnection() throws SQLException {
return this.connection;
}
/**
* JDBC 2.0 Determine the fetch direction.
*
* @return the default fetch direction
*
* @exception SQLException
* if a database-access error occurs
*/
public int getFetchDirection() throws SQLException {
return java.sql.ResultSet.FETCH_FORWARD;
}
/**
* JDBC 2.0 Determine the default fetch size.
*
* @return the number of rows to fetch at a time
*
* @throws SQLException
* if an error occurs
*/
public int getFetchSize() throws SQLException {
return this.fetchSize;
}
/**
* DOCUMENT ME!
*
* @return DOCUMENT ME!
*
* @throws SQLException
* DOCUMENT ME!
*/
public synchronized java.sql.ResultSet getGeneratedKeys()
throws SQLException {
if (!this.retrieveGeneratedKeys) {
throw SQLError.createSQLException(Messages.getString("Statement.GeneratedKeysNotRequested"), SQLError.SQL_STATE_ILLEGAL_ARGUMENT);
}
if (this.batchedGeneratedKeys == null) {
return getGeneratedKeysInternal();
}
Field[] fields = new Field[1];
fields[0] = new Field("", "GENERATED_KEY", Types.BIGINT, 17); //$NON-NLS-1$ //$NON-NLS-2$
fields[0].setConnection(this.connection);
return com.mysql.jdbc.ResultSetImpl.getInstance(this.currentCatalog, fields,
new RowDataStatic(this.batchedGeneratedKeys), this.connection,
this, false);
}
/*
* Needed because there's no concept of super.super to get to this
* implementation from ServerPreparedStatement when dealing with batched
* updates.
*/
protected java.sql.ResultSet getGeneratedKeysInternal()
throws SQLException {
Field[] fields = new Field[1];
fields[0] = new Field("", "GENERATED_KEY", Types.BIGINT, 17); //$NON-NLS-1$ //$NON-NLS-2$
fields[0].setConnection(this.connection);
fields[0].setUseOldNameMetadata(true);
ArrayList rowSet = new ArrayList();
long beginAt = getLastInsertID();
int numKeys = getUpdateCount();
if (this.results != null) {
String serverInfo = this.results.getServerInfo();
//
// Only parse server info messages for 'REPLACE'
// queries
//
if ((numKeys > 0) && (this.results.getFirstCharOfQuery() == 'R')
&& (serverInfo != null) && (serverInfo.length() > 0)) {
numKeys = getRecordCountFromInfo(serverInfo);
}
if ((beginAt > 0) && (numKeys > 0)) {
for (int i = 0; i < numKeys; i++) {
byte[][] row = new byte[1][];
row[0] = Long.toString(beginAt).getBytes();
rowSet.add(new ByteArrayRow(row));
beginAt += this.connection.getAutoIncrementIncrement();
}
}
}
com.mysql.jdbc.ResultSetImpl gkRs = com.mysql.jdbc.ResultSetImpl.getInstance(this.currentCatalog, fields,
new RowDataStatic(rowSet), this.connection, this, false);
this.openResults.add(gkRs);
return gkRs;
}
/**
* Returns the id used when profiling
*
* @return the id used when profiling.
*/
protected int getId() {
return this.statementId;
}
/**
* getLastInsertID returns the value of the auto_incremented key after an
* executeQuery() or excute() call.
*
* <p>
* This gets around the un-threadsafe behavior of "select LAST_INSERT_ID()"
* which is tied to the Connection that created this Statement, and
* therefore could have had many INSERTS performed before one gets a chance
* to call "select LAST_INSERT_ID()".
* </p>
*
* @return the last update ID.
*/
public long getLastInsertID() {
return this.lastInsertId;
}
/**
* getLongUpdateCount returns the current result as an update count, if the
* result is a ResultSet or there are no more results, -1 is returned. It
* should only be called once per result.
*
* <p>
* This method returns longs as MySQL server versions newer than 3.22.4
* return 64-bit values for update counts
* </p>
*
* @return the current update count.
*/
public long getLongUpdateCount() {
if (this.results == null) {
return -1;
}
if (this.results.reallyResult()) {
return -1;
}
return this.updateCount;
}
/**
* The maxFieldSize limit (in bytes) is the maximum amount of data returned
* for any column value; it only applies to BINARY, VARBINARY,
* LONGVARBINARY, CHAR, VARCHAR and LONGVARCHAR columns. If the limit is
* exceeded, the excess data is silently discarded.
*
* @return the current max column size limit; zero means unlimited
*
* @exception SQLException
* if a database access error occurs
*/
public int getMaxFieldSize() throws SQLException {
return this.maxFieldSize;
}
/**
* The maxRows limit is set to limit the number of rows that any ResultSet
* can contain. If the limit is exceeded, the excess rows are silently
* dropped.
*
* @return the current maximum row limit; zero means unlimited
*
* @exception SQLException
* if a database access error occurs
*/
public int getMaxRows() throws SQLException {
if (this.maxRows <= 0) {
return 0;
}
return this.maxRows;
}
/**
* getMoreResults moves to a Statement's next result. If it returns true,
* this result is a ResulSet.
*
* @return true if the next ResultSet is valid
*
* @exception SQLException
* if a database access error occurs
*/
public boolean getMoreResults() throws SQLException {
return getMoreResults(CLOSE_CURRENT_RESULT);
}
/**
* @see StatementImpl#getMoreResults(int)
*/
public boolean getMoreResults(int current) throws SQLException {
if (this.results == null) {
return false;
}
boolean streamingMode = createStreamingResultSet();
if (streamingMode) {
if (this.results.reallyResult()) {
while (this.results.next()); // need to drain remaining rows to get to server status
// which tells us whether more results actually exist or not
}
}
ResultSetInternalMethods nextResultSet = this.results.getNextResultSet();
switch (current) {
case java.sql.Statement.CLOSE_CURRENT_RESULT:
if (this.results != null) {
if (!streamingMode) {
this.results.close();
}
this.results.clearNextResult();
}
break;
case java.sql.Statement.CLOSE_ALL_RESULTS:
if (this.results != null) {
if (!streamingMode) {
this.results.close();
}
this.results.clearNextResult();
}
closeAllOpenResults();
break;
case java.sql.Statement.KEEP_CURRENT_RESULT:
if (!this.connection.getDontTrackOpenResources()) {
this.openResults.add(this.results);
}
this.results.clearNextResult(); // nobody besides us should
// ever need this value...
break;
default:
throw SQLError.createSQLException(Messages
.getString("Statement.19"), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$
}
this.results = nextResultSet;
if (this.results == null) {
this.updateCount = -1;
this.lastInsertId = -1;
} else if (this.results.reallyResult()) {
this.updateCount = -1;
this.lastInsertId = -1;
} else {
this.updateCount = this.results.getUpdateCount();
this.lastInsertId = this.results.getUpdateID();
}
return ((this.results != null) && this.results.reallyResult()) ? true
: false;
}
/**
* The queryTimeout limit is the number of seconds the driver will wait for
* a Statement to execute. If the limit is exceeded, a SQLException is
* thrown.
*
* @return the current query timeout limit in seconds; 0 = unlimited
*
* @exception SQLException
* if a database access error occurs
*/
public int getQueryTimeout() throws SQLException {
return this.timeoutInMillis / 1000;
}
/**
* Parses actual record count from 'info' message
*
* @param serverInfo
* DOCUMENT ME!
*
* @return DOCUMENT ME!
*/
private int getRecordCountFromInfo(String serverInfo) {
StringBuffer recordsBuf = new StringBuffer();
int recordsCount = 0;
int duplicatesCount = 0;
char c = (char) 0;
int length = serverInfo.length();
int i = 0;
for (; i < length; i++) {
c = serverInfo.charAt(i);
if (Character.isDigit(c)) {
break;
}
}
recordsBuf.append(c);
i++;
for (; i < length; i++) {
c = serverInfo.charAt(i);
if (!Character.isDigit(c)) {
break;
}
recordsBuf.append(c);
}
recordsCount = Integer.parseInt(recordsBuf.toString());
StringBuffer duplicatesBuf = new StringBuffer();
for (; i < length; i++) {
c = serverInfo.charAt(i);
if (Character.isDigit(c)) {
break;
}
}
duplicatesBuf.append(c);
i++;
for (; i < length; i++) {
c = serverInfo.charAt(i);
if (!Character.isDigit(c)) {
break;
}
duplicatesBuf.append(c);
}
duplicatesCount = Integer.parseInt(duplicatesBuf.toString());
return recordsCount - duplicatesCount;
}
/**
* getResultSet returns the current result as a ResultSet. It should only be
* called once per result.
*
* @return the current result set; null if there are no more
*
* @exception SQLException
* if a database access error occurs (why?)
*/
public java.sql.ResultSet getResultSet() throws SQLException {
return ((this.results != null) && this.results.reallyResult()) ? (java.sql.ResultSet) this.results
: null;
}
/**
* JDBC 2.0 Determine the result set concurrency.
*
* @return CONCUR_UPDATABLE or CONCUR_READONLY
*
* @throws SQLException
* if an error occurs
*/
public int getResultSetConcurrency() throws SQLException {
return this.resultSetConcurrency;
}
/**
* @see StatementImpl#getResultSetHoldability()
*/
public int getResultSetHoldability() throws SQLException {
return java.sql.ResultSet.HOLD_CURSORS_OVER_COMMIT;
}
protected ResultSetInternalMethods getResultSetInternal() {
return this.results;
}
/**
* JDBC 2.0 Determine the result set type.
*
* @return the ResultSet type (SCROLL_SENSITIVE or SCROLL_INSENSITIVE)
*
* @throws SQLException
* if an error occurs.
*/
public int getResultSetType() throws SQLException {
return this.resultSetType;
}
/**
* getUpdateCount returns the current result as an update count, if the
* result is a ResultSet or there are no more results, -1 is returned. It
* should only be called once per result.
*
* @return the current result as an update count.
*
* @exception SQLException
* if a database access error occurs
*/
public int getUpdateCount() throws SQLException {
if (this.results == null) {
return -1;
}
if (this.results.reallyResult()) {
return -1;
}
int truncatedUpdateCount = 0;
if (this.results.getUpdateCount() > Integer.MAX_VALUE) {
truncatedUpdateCount = Integer.MAX_VALUE;
} else {
truncatedUpdateCount = (int) this.results.getUpdateCount();
}
return truncatedUpdateCount;
}
/**
* The first warning reported by calls on this Statement is returned. A
* Statement's execute methods clear its java.sql.SQLWarning chain.
* Subsequent Statement warnings will be chained to this
* java.sql.SQLWarning.
*
* <p>
* The Warning chain is automatically cleared each time a statement is
* (re)executed.
* </p>
*
* <p>
* <B>Note:</B> If you are processing a ResultSet then any warnings
* associated with ResultSet reads will be chained on the ResultSet object.
* </p>
*
* @return the first java.sql.SQLWarning or null
*
* @exception SQLException
* if a database access error occurs
*/
public java.sql.SQLWarning getWarnings() throws SQLException {
checkClosed();
if (this.connection != null && !this.connection.isClosed()
&& this.connection.versionMeetsMinimum(4, 1, 0)) {
SQLWarning pendingWarningsFromServer = SQLError
.convertShowWarningsToSQLWarnings(this.connection);
if (this.warningChain != null) {
this.warningChain.setNextWarning(pendingWarningsFromServer);
} else {
this.warningChain = pendingWarningsFromServer;
}
return this.warningChain;
}
return this.warningChain;
}
/**
* Closes this statement, and frees resources.
*
* @param calledExplicitly
* was this called from close()?
*
* @throws SQLException
* if an error occurs
*/
protected void realClose(boolean calledExplicitly, boolean closeOpenResults)
throws SQLException {
if (this.isClosed) {
return;
}
if (this.useUsageAdvisor) {
if (!calledExplicitly) {
String message = Messages.getString("Statement.63") //$NON-NLS-1$
+ Messages.getString("Statement.64"); //$NON-NLS-1$
this.eventSink.consumeEvent(new ProfilerEvent(
ProfilerEvent.TYPE_WARN,
"", //$NON-NLS-1$
this.currentCatalog, this.connectionId, this.getId(),
-1, System.currentTimeMillis(), 0,
Constants.MILLIS_I18N, null, this.pointOfOrigin,
message));
}
}
if (closeOpenResults) {
closeOpenResults = !this.holdResultsOpenOverClose;
}
if (closeOpenResults) {
if (this.results != null) {
try {
this.results.close();
} catch (Exception ex) {
;
}
}
closeAllOpenResults();
}
if (this.connection != null) {
if (this.maxRowsChanged) {
this.connection.unsetMaxRows(this);
}
if (!this.connection.getDontTrackOpenResources()) {
this.connection.unregisterStatement(this);
}
}
this.isClosed = true;
this.results = null;
this.connection = null;
this.warningChain = null;
this.openResults = null;
this.batchedGeneratedKeys = null;
this.localInfileInputStream = null;
this.pingTarget = null;
}
/**
* setCursorName defines the SQL cursor name that will be used by subsequent
* execute methods. This name can then be used in SQL positioned
* update/delete statements to identify the current row in the ResultSet
* generated by this statement. If a database doesn't support positioned
* update/delete, this method is a no-op.
*
* <p>
* <b>Note:</b> This MySQL driver does not support cursors.
* </p>
*
* @param name
* the new cursor name
*
* @exception SQLException
* if a database access error occurs
*/
public void setCursorName(String name) throws SQLException {
// No-op
}
/**
* If escape scanning is on (the default), the driver will do escape
* substitution before sending the SQL to the database.
*
* @param enable
* true to enable; false to disable
*
* @exception SQLException
* if a database access error occurs
*/
public void setEscapeProcessing(boolean enable)
throws SQLException {
this.doEscapeProcessing = enable;
}
/**
* JDBC 2.0 Give a hint as to the direction in which the rows in a result
* set will be processed. The hint applies only to result sets created using
* this Statement object. The default value is ResultSet.FETCH_FORWARD.
*
* @param direction
* the initial direction for processing rows
*
* @exception SQLException
* if a database-access error occurs or direction is not one
* of ResultSet.FETCH_FORWARD, ResultSet.FETCH_REVERSE, or
* ResultSet.FETCH_UNKNOWN
*/
public void setFetchDirection(int direction) throws SQLException {
switch (direction) {
case java.sql.ResultSet.FETCH_FORWARD:
case java.sql.ResultSet.FETCH_REVERSE:
case java.sql.ResultSet.FETCH_UNKNOWN:
break;
default:
throw SQLError.createSQLException(
Messages.getString("Statement.5"), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$
}
}
/**
* JDBC 2.0 Give the JDBC driver a hint as to the number of rows that should
* be fetched from the database when more rows are needed. The number of
* rows specified only affects result sets created using this statement. If
* the value specified is zero, then the hint is ignored. The default value
* is zero.
*
* @param rows
* the number of rows to fetch
*
* @exception SQLException
* if a database-access error occurs, or the condition 0
* <= rows <= this.getMaxRows() is not satisfied.
*/
public void setFetchSize(int rows) throws SQLException {
if (((rows < 0) && (rows != Integer.MIN_VALUE))
|| ((this.maxRows != 0) && (this.maxRows != -1) && (rows > this
.getMaxRows()))) {
throw SQLError.createSQLException(
Messages.getString("Statement.7"), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$ //$NON-NLS-2$
}
this.fetchSize = rows;
}
protected void setHoldResultsOpenOverClose(boolean holdResultsOpenOverClose) {
this.holdResultsOpenOverClose = holdResultsOpenOverClose;
}
/**
* Sets the maxFieldSize
*
* @param max
* the new max column size limit; zero means unlimited
*
* @exception SQLException
* if size exceeds buffer size
*/
public void setMaxFieldSize(int max) throws SQLException {
if (max < 0) {
throw SQLError.createSQLException(Messages
.getString("Statement.11"), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$
}
int maxBuf = (this.connection != null) ? this.connection
.getMaxAllowedPacket() : MysqlIO.getMaxBuf();
if (max > maxBuf) {
throw SQLError.createSQLException(Messages.getString(
"Statement.13", //$NON-NLS-1$
new Object[] { Constants.longValueOf(maxBuf) }), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$
}
this.maxFieldSize = max;
}
/**
* Set the maximum number of rows
*
* @param max
* the new max rows limit; zero means unlimited
*
* @exception SQLException
* if a database access error occurs
*
* @see getMaxRows
*/
public void setMaxRows(int max) throws SQLException {
if ((max > MysqlDefs.MAX_ROWS) || (max < 0)) {
throw SQLError
.createSQLException(
Messages.getString("Statement.15") + max //$NON-NLS-1$
+ " > " //$NON-NLS-1$ //$NON-NLS-2$
+ MysqlDefs.MAX_ROWS + ".", SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$ //$NON-NLS-2$
}
if (max == 0) {
max = -1;
}
this.maxRows = max;
this.maxRowsChanged = true;
if (this.maxRows == -1) {
this.connection.unsetMaxRows(this);
this.maxRowsChanged = false;
} else {
// Most people don't use setMaxRows()
// so don't penalize them
// with the extra query it takes
// to do it efficiently unless we need
// to.
this.connection.maxRowsChanged(this);
}
}
/**
* Sets the queryTimeout limit
*
* @param seconds -
* the new query timeout limit in seconds
*
* @exception SQLException
* if a database access error occurs
*/
public void setQueryTimeout(int seconds) throws SQLException {
if (seconds < 0) {
throw SQLError.createSQLException(Messages
.getString("Statement.21"), //$NON-NLS-1$
SQLError.SQL_STATE_ILLEGAL_ARGUMENT); //$NON-NLS-1$
}
this.timeoutInMillis = seconds * 1000;
}
/**
* Sets the concurrency for result sets generated by this statement
*
* @param concurrencyFlag
* DOCUMENT ME!
*/
void setResultSetConcurrency(int concurrencyFlag) {
this.resultSetConcurrency = concurrencyFlag;
}
/**
* Sets the result set type for result sets generated by this statement
*
* @param typeFlag
* DOCUMENT ME!
*/
void setResultSetType(int typeFlag) {
this.resultSetType = typeFlag;
}
protected void getBatchedGeneratedKeys(java.sql.Statement batchedStatement) throws SQLException {
if (this.retrieveGeneratedKeys) {
java.sql.ResultSet rs = null;
try {
rs = batchedStatement.getGeneratedKeys();
while (rs.next()) {
this.batchedGeneratedKeys
.add(new ByteArrayRow(new byte[][] { rs.getBytes(1) }));
}
} finally {
if (rs != null) {
rs.close();
}
}
}
}
protected void getBatchedGeneratedKeys() throws SQLException {
if (this.retrieveGeneratedKeys) {
java.sql.ResultSet rs = null;
try {
rs = getGeneratedKeysInternal();
while (rs.next()) {
this.batchedGeneratedKeys
.add(new ByteArrayRow(new byte[][] { rs.getBytes(1) }));
}
} finally {
if (rs != null) {
rs.close();
}
}
}
}
/**
* @return
*/
private boolean useServerFetch() throws SQLException {
return this.connection.isCursorFetchEnabled() && this.fetchSize > 0
&& this.resultSetConcurrency == ResultSet.CONCUR_READ_ONLY
&& this.resultSetType == ResultSet.TYPE_FORWARD_ONLY;
}
public synchronized boolean isClosed() throws SQLException {
return this.isClosed;
}
private boolean isPoolable = true;
public boolean isPoolable() throws SQLException {
return this.isPoolable;
}
public void setPoolable(boolean poolable) throws SQLException {
this.isPoolable = poolable;
}
/**
* Returns true if this either implements the interface argument or is directly or indirectly a wrapper
* for an object that does. Returns false otherwise. If this implements the interface then return true,
* else if this is a wrapper then return the result of recursively calling <code>isWrapperFor</code> on the wrapped
* object. If this does not implement the interface and is not a wrapper, return false.
* This method should be implemented as a low-cost operation compared to <code>unwrap</code> so that
* callers can use this method to avoid expensive <code>unwrap</code> calls that may fail. If this method
* returns true then calling <code>unwrap</code> with the same argument should succeed.
*
* @param interfaces a Class defining an interface.
* @return true if this implements the interface or directly or indirectly wraps an object that does.
* @throws java.sql.SQLException if an error occurs while determining whether this is a wrapper
* for an object with the given interface.
* @since 1.6
*/
public boolean isWrapperFor(Class iface) throws SQLException {
checkClosed();
// This works for classes that aren't actually wrapping
// anything
return iface.isInstance(this);
}
/**
* Returns an object that implements the given interface to allow access to non-standard methods,
* or standard methods not exposed by the proxy.
* The result may be either the object found to implement the interface or a proxy for that object.
* If the receiver implements the interface then that is the object. If the receiver is a wrapper
* and the wrapped object implements the interface then that is the object. Otherwise the object is
* the result of calling <code>unwrap</code> recursively on the wrapped object. If the receiver is not a
* wrapper and does not implement the interface, then an <code>SQLException</code> is thrown.
*
* @param iface A Class defining an interface that the result must implement.
* @return an object that implements the interface. May be a proxy for the actual implementing object.
* @throws java.sql.SQLException If no object found that implements the interface
* @since 1.6
*/
public Object unwrap(Class iface) throws java.sql.SQLException {
try {
// This works for classes that aren't actually wrapping
// anything
return Util.cast(iface, this);
} catch (ClassCastException cce) {
throw SQLError.createSQLException("Unable to unwrap to " + iface.toString(),
SQLError.SQL_STATE_ILLEGAL_ARGUMENT);
}
}
protected int findStartOfStatement(String sql) {
int statementStartPos = 0;
if (StringUtils.startsWithIgnoreCaseAndWs(sql, "/*")) {
statementStartPos = sql.indexOf("*/");
if (statementStartPos == -1) {
statementStartPos = 0;
} else {
statementStartPos += 2;
}
} else if (StringUtils.startsWithIgnoreCaseAndWs(sql, "--")
|| StringUtils.startsWithIgnoreCaseAndWs(sql, "#")) {
statementStartPos = sql.indexOf('\n');
if (statementStartPos == -1) {
statementStartPos = sql.indexOf('\r');
if (statementStartPos == -1) {
statementStartPos = 0;
}
}
}
return statementStartPos;
}
private InputStream localInfileInputStream;
public synchronized InputStream getLocalInfileInputStream() {
return this.localInfileInputStream;
}
public synchronized void setLocalInfileInputStream(InputStream stream) {
this.localInfileInputStream = stream;
}
public synchronized void setPingTarget(PingTarget pingTarget) {
this.pingTarget = pingTarget;
}
}