Newer
Older
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
package uk.ac.sanger.artemis.util;
import com.ibatis.sqlmap.client.SqlMapClient;
import uk.ac.sanger.ibatis.*;
import java.sql.*;
import java.io.*;
import java.util.Hashtable;
* Objects of this class are Documents created from a relational database.
*
*/
/** database schema */
private String schema = "public";
private InputStreamProgressListener progress_listener;
private Hashtable org2schema;
private String sqlLog = System.getProperty("user.home") +
System.getProperty("file.separator") +
"art_sql_debug.log";
private String[] types = { "exon", "gene", "CDS", "transcript" };
*
* Create a new Document from a database.
*
* @param location
* This should be a URL string giving:
* jdbc:postgresql://host:port/datbase_name?user=username
*
*/
public DatabaseDocument(String location, JPasswordField pfield)
*
* Create a new Document from a database.
*
* @param location
* This should be a URL string giving:
* jdbc:postgresql://host:port/datbase_name?user=username
* @param feature_id
* ID of a feature to be extracted.
*
*/
public DatabaseDocument(String location, JPasswordField pfield,
*
* Create a new Document from a database.
*
* @param location
* This should be a URL string giving:
* jdbc:postgresql://host:port/datbase_name?user=username
* @param feature_id
* ID of a feature to be extracted.
* @param splitGFFEntry
* split into separate entries based on feature types.
* @param progress_listener
* input stream progress listener
*
*/
public DatabaseDocument(String location, JPasswordField pfield,
InputStreamProgressListener progress_listener)
{
super(location);
this.progress_listener = progress_listener;
public DatabaseDocument(String location, JPasswordField pfield,
*
* Append a String to the Document location.
*
* @param name
* The name to append.
*
*/
public Document append(String name) throws IOException
return new DatabaseDocument( ((String)getLocation()) + name, pfield);
*
* Return the name of this Document (the last element of the Document
* location).
*
*/
public String getName()
int ind = ((String) getLocation()).indexOf("?");
String name = ((String) getLocation()).substring(0, ind);
/**
*
* Set the name of this document.
*
*/
public void setName(String name)
{
this.name = name;
}
*
* Return a Document with the last element stripped off.
*
*/
*
* Return true if and only if the Document refered to by this object exists
* and is readable. Always returns true.
*
*/
public boolean readable()
*
* Return true if and only if the Document refered to by this object exists
* and can be written to. Always returns false.
*
*/
public boolean writable()
* Create a new InputStream object from this Document. The contents of the
* Document can be read from the InputStream.
*
* @exception IOException
* Thrown if the Document can't be read from (for example if it
* doesn't exist).
*/
public InputStream getInputStream() throws IOException
ByteArrayInputStream instream;
if(gff_buff != null)
{
instream = new ByteArrayInputStream(gff_buff.getBytes());
{
if(gff_buffer[0].size() > 0)
entry.append(gff_buffer[0]);
{
if(gff_buffer[i].size() > 0)
entry.append(gff_buffer[i]);
}
instream = new ByteArrayInputStream(entry.getBytes());
return instream;
}
catch(java.sql.SQLException sqlExp)
{
System.out.println("Problems connecting...");
sqlExp.printStackTrace();
}
return null;
}
*
* Called (by DatabaseEntrySource) to retrieve all the documents for each
* entry created.
*
*/
public DatabaseDocument[] getGffDocuments(String location, String id,
String schema)
{
if(gff_buffer[i].size() > 0)
nentries++;
}
DatabaseDocument[] new_docs = new DatabaseDocument[nentries];
nentries = 0;
String name;
if(i >= types.length)
name = "other";
else
name = types[i];
new_docs[nentries] = new DatabaseDocument(location, pfield, id, schema,
gff_buffer[i], name);
*
* Return a feature name given the feature_id.
*
*/
private String getFeatureNameJdbc(String feature_id, Connection conn,
String schema) throws java.sql.SQLException
String sql = "SELECT name FROM " + schema + ".feature WHERE feature_id= " +
feature_id;
appendToLogFile(sql, sqlLog);
ResultSet rs = st.executeQuery(sql);
rs.next();
return rs.getString("name");
}
private ByteBuffer[] getGFFiBatis(String parentFeatureID, String schema)
final int srcfeature_id = Integer.parseInt(parentFeatureID);
List featList = ConnectionIBatis.getGff(srcfeature_id, schema);
ByteBuffer[] buffers = new ByteBuffer[types.length + 1];
for(int i = 0; i < buffers.length; i++)
String parentFeature = ConnectionIBatis.getFeatureName(srcfeature_id, schema);
Hashtable id_store = new Hashtable(feature_size);
// build feature name store
for(int i = 0; i < feature_size; i++)
{
Feature feat = (Feature)featList.get(i);
String name = feat.getUniquename();
String feature_id = Integer.toString(feat.getId());
int fmin = feat.getFmin() + 1;
int fmax = feat.getFmax();
long type_id = feat.getType_id();
long prop_type_id = feat.getProp_type_id();
int strand = feat.getStrand();
String name = feat.getUniquename();
String typeName = getCvtermName(null, type_id);
String propTypeName = getCvtermName(null, prop_type_id);
if(parent_id != null && id_store.containsKey(parent_id))
parent_id = (String)id_store.get(parent_id);
{
if(types[j].equals(typeName))
this_buff = buffers[j];
}
this_buff.append(parentFeature + "\t"); // seqid
this_buff.append("chado\t"); // source
this_buff.append(typeName + "\t"); // type
this_buff.append(fmin + "\t"); // start
this_buff.append(fmax + "\t"); // end
this_buff.append(".\t"); // score
if(strand == -1) // strand
this_buff.append("-\t");
else if(strand == 1)
this_buff.append("+\t");
else
this_buff.append(".\t");
this_buff.append(".\t"); // phase
this_buff.append("ID=" + name + ";");
this_buff.append("timelastmodified=" + timelastmodified + ";");
String value = "";
if(feat.getValue() != null)
value = GFFStreamFeature.encode(feat.getValue());
// is the next line part of the same feature, if so merge
boolean rewind = false;
Feature featNext = null;
if(i < feature_size - 1)
featNext = (Feature)featList.get(i + 1);
while(featNext != null && featNext.getUniquename().equals(name))
value = GFFStreamFeature.encode(featNext.getValue());
if(i < feature_size - 1)
featNext = (Feature) featList.get(i + 1);
else
progress_listener.progressMade("Read from database: " + name);
*
* Given a parent (chromosome, contig, supercontig) retrieve the features in
* the form of a GFF stream.
*
*/
private ByteBuffer[] getGFFJdbc(Connection conn, String parentFeatureID,
String schema)
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
Statement st = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
ResultSet.CONCUR_UPDATABLE);
String sql = "SELECT timelastmodified, feature.feature_id, object_id, strand, fmin, fmax, uniquename, "
+ schema + ".feature.type_id, "
+ schema + ".featureprop.type_id AS prop_type_id, featureprop.value"
+ " FROM "
+ schema + ".featureloc, "
+ schema + ".feature"
+ " LEFT JOIN "
+ schema + ".feature_relationship ON "
+ schema + ".feature_relationship.subject_id="
+ schema + ".feature.feature_id"
+ " LEFT JOIN "
+ schema + ".featureprop ON "
+ schema + ".featureprop.feature_id="
+ schema + ".feature.feature_id"
+ " WHERE srcfeature_id = "
+ parentFeatureID + " AND "
+ schema + ".featureloc.feature_id="
+ schema + ".feature.feature_id"
+ " AND ("
+ schema + ".featureloc.rank="
+ schema + ".feature_relationship.rank OR "
+ schema + ".feature_relationship.rank IS NULL)"
+ " ORDER BY "
+ schema + ".feature.type_id, uniquename";
ByteBuffer[] buffers = new ByteBuffer[types.length + 1];
for(int i = 0; i < buffers.length; i++)
String parentFeature = getFeatureNameJdbc(parentFeatureID, conn, schema);
while(rs.next())
{
String name = rs.getString("uniquename");
String feature_id = rs.getString("feature_id");
int fmin = rs.getInt("fmin") + 1;
int fmax = rs.getInt("fmax");
long type_id = rs.getLong("type_id");
long prop_type_id = rs.getLong("prop_type_id");
int strand = rs.getInt("strand");
String name = rs.getString("uniquename");
String typeName = getCvtermName(conn, type_id);
String propTypeName = getCvtermName(conn, prop_type_id);
String timelastmodified = rs.getString("timelastmodified");
if(parent_id != null && id_store.containsKey(parent_id))
parent_id = (String)id_store.get(parent_id);
// select buffer
this_buff = buffers[types.length];
{
if(types[i].equals(typeName))
this_buff = buffers[i];
}
this_buff.append(parentFeature + "\t"); // seqid
this_buff.append("chado\t"); // source
this_buff.append(typeName + "\t"); // type
this_buff.append(fmin + "\t"); // start
this_buff.append(fmax + "\t"); // end
this_buff.append(".\t"); // score
if (strand == -1) // strand
this_buff.append(".\t"); // phase
this_buff.append("ID=" + name + ";");
this_buff.append("Parent=" + parent_id + ";");
this_buff.append("timelastmodified=" + timelastmodified + ";");
String value = "";
if(rs.getString("value") != null)
value = GFFStreamFeature.encode(rs.getString("value"));
while((rewind = rs.next()) && rs.getString("uniquename").equals(name))
{
prop_type_id = rs.getLong("prop_type_id");
progress_listener.progressMade("Read from database: " + name);
{
Enumeration enum_cvterm = cvterm.keys();
while(enum_cvterm.hasMoreElements())
{
Long key = (Long)enum_cvterm.nextElement();
if(name.equals(cvterm.get(key)))
private String getCvtermName(Connection conn, long id)
{
if(cvterm == null)
*
* Look up cvterms names and id and return in a hashtable.
*
*/
private Hashtable getCvtermIbatis(String cv_name)
{
cvterm = new Hashtable();
try
{
Iterator it = cvtem_list.iterator();
while(it.hasNext())
{
Cvterm cv = (Cvterm)it.next();
cvterm.put(new Long(cv.getId()), cv.getName());
}
}
{
System.err.println(this.getClass() + ": SQLException retrieving CvTerms");
System.err.println(sqle);
}
return cvterm;
}
*
* Look up cvterms names and id and return in a hashtable.
*
*/
private Hashtable getCvterm(Connection conn, String cv_name)
{
String sql = "SELECT cvterm.cvterm_id, cvterm.name " +
"FROM cvterm, cv WHERE cv.cv_id = cvterm.cv_id";
if(cv_name != null)
{
Statement s = conn.createStatement();
ResultSet rs = s.executeQuery(sql);
{
long id = rs.getLong("cvterm_id");
String name = rs.getString("name");
if(cvterm.get(name) != null)
System.err.println(this.getClass()
+ ": WARNING - read multiple CvTerms with name = '" + name + "'");
cvterm.put(new Long(id), name);
{
System.err.println(this.getClass() + ": SQLException retrieving CvTerms");
System.err.println(sqle);
}
return cvterm;
}
public ByteBuffer getSequenceIbatis(ByteBuffer buff, String schema)
Feature feature = ConnectionIBatis.getSequence(Integer.parseInt(feature_id),
schema);
public ByteBuffer getSequence(Connection conn, ByteBuffer buff, String schema)
throws java.sql.SQLException
String sql = "SELECT name, residues from " + schema +
".feature where feature_id = '" + feature_id + "'";
buff.append("##FASTA\n>");
buff.append(rs.getBytes("name"));
buff.append("\n");
buff.append(rs.getBytes("residues"));
return buff;
// return "##FASTA\n>" + name + "\n" + rs.getString("residues");
public Hashtable getDatabaseEntries()
{
if(iBatis)
return getDatabaseEntriesIbatis();
else
return getDatabaseEntriesJdbc();
}
public Hashtable getSchemaEntries()
{
return org2schema;
}
db = new Hashtable();
organism = new Vector();
String query = "SELECT schema_name FROM information_schema.schemata "+
"WHERE schema_name=schema_owner ORDER BY schema_name";
ResultSet rs = st.executeQuery(query);
Vector schemas = new Vector();
while(rs.next())
schemas.add(rs.getString("schema_name"));
String schema = (String)schemas.get(i);
appendToLogFile(schema, sqlLog);
String sql = "SELECT DISTINCT type_id FROM " + schema +
".feature WHERE residues notnull";
appendToLogFile(sql, sqlLog);
Vector cvterm_id = new Vector();
rs = st.executeQuery(sql);
if(cvterm_id.size() == 0) // no residues for this organism
continue;
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
sql = new String(
"SELECT abbreviation, name, feature_id, type_id FROM organism, "+
schema + ".feature WHERE (");
for(int j = 0; j < cvterm_id.size(); j++)
{
sql = sql + " type_id = " + (String)cvterm_id.get(j);
if(j < cvterm_id.size() - 1)
sql = sql + " OR ";
}
sql = sql + ") and organism.organism_id=" + schema
+ ".feature.organism_id " + "and residues notnull "
+ "ORDER BY abbreviation";
appendToLogFile(sql, sqlLog);
rs = st.executeQuery(sql);
while(rs.next())
{
String org = rs.getString("abbreviation");
String typeName = getCvtermName(conn, rs.getLong("type_id"));
db.put(org + " - " + typeName + " - " + rs.getString("name"),
rs.getString("feature_id"));
if(!organism.contains(org))
organism.add(org);
if(!org2schema.containsKey(org))
{
org2schema.put(org, schema);
}
}
}
JOptionPane.showMessageDialog(null, "SQL Problems...", "SQL Error",
JOptionPane.ERROR_MESSAGE);
{
JOptionPane.showMessageDialog(null, "Problems connecting...",
"Database Connection Error - Check Server",
JOptionPane.ERROR_MESSAGE);
conn.printStackTrace();
/**
*
* Create a hashtable of the available entries.
*
*/
private Hashtable getDatabaseEntriesIbatis()
{
db = new Hashtable();
organism = new Vector();
if(list.size() == 0) // no residues for this organism
continue;
List list_residue_features = ConnectionIBatis.getResidueFeatures(list, schema);
Iterator it_residue_features = list_residue_features.iterator();
while(it_residue_features.hasNext())
{
Feature feature = (Feature)it_residue_features.next();
String org = feature.getAbbreviation();
String typeName = getCvtermName(null, feature.getType_id());
db.put(org + " - " + typeName + " - " + feature.getName(),
Integer.toString(feature.getId()));
if(!organism.contains(org))
organism.add(org);
if(!org2schema.containsKey(org))
org2schema.put(org, schema);
}
JOptionPane.showMessageDialog(null, "SQL Problems...", "SQL Error",
JOptionPane.ERROR_MESSAGE);
public Vector getOrganism()
{
return organism;
}
*
* Make a connetion with the jdbc
* jdbc:postgresql://localhost:13001/chadoCVS?user=es2
*
*/
// assume we have a password
final int index = location.indexOf("?user=");
return DriverManager.getConnection(location.substring(0, index),
location.substring(index + 6),
new String(pfield.getPassword()));
}
* Create a new OutputStream object from this Document. The contents of the
* Document can be written from the stream.
*
* @exception IOException
* Thrown if the Document can't be written.
*/
public OutputStream getOutputStream() throws IOException
{
System.out.println("DatabaseDocument - ReadOnlyException");
throw new ReadOnlyException("this Database Document can not be written to");
*
* Appends a log entry to the log file
*
* @param logEntry
* entry to add to log file
* @param logFileName
* log file name
*
*/
private void appendToLogFile(String logEntry, String logFileName)
{
BufferedWriter bw = null;
try
{
String dat = new java.util.Date().toString();
bw = new BufferedWriter(new FileWriter(logFileName, true));
System.out.println("Error writing to log file " + logFileName);