Update MongoDB Java driver to 2.11.1

This commit is contained in:
Olof Larsson 2013-05-01 11:06:39 +02:00
parent bc92d94f89
commit 41e581ee71
100 changed files with 7991 additions and 2555 deletions

View File

@ -14,7 +14,6 @@ import org.bukkit.inventory.PlayerInventory;
import com.massivecraft.mcore.adapter.InventoryAdapter; import com.massivecraft.mcore.adapter.InventoryAdapter;
import com.massivecraft.mcore.adapter.ItemStackAdapter; import com.massivecraft.mcore.adapter.ItemStackAdapter;
import com.massivecraft.mcore.adapter.MongoURIAdapter;
import com.massivecraft.mcore.adapter.ObjectIdAdapter; import com.massivecraft.mcore.adapter.ObjectIdAdapter;
import com.massivecraft.mcore.adapter.PlayerInventoryAdapter; import com.massivecraft.mcore.adapter.PlayerInventoryAdapter;
import com.massivecraft.mcore.adapter.UUIDAdapter; import com.massivecraft.mcore.adapter.UUIDAdapter;
@ -28,6 +27,7 @@ import com.massivecraft.mcore.ps.PS;
import com.massivecraft.mcore.ps.PSAdapter; import com.massivecraft.mcore.ps.PSAdapter;
import com.massivecraft.mcore.store.Coll; import com.massivecraft.mcore.store.Coll;
import com.massivecraft.mcore.store.Db; import com.massivecraft.mcore.store.Db;
import com.massivecraft.mcore.store.ExamineThread;
import com.massivecraft.mcore.store.MStore; import com.massivecraft.mcore.store.MStore;
import com.massivecraft.mcore.usys.Aspect; import com.massivecraft.mcore.usys.Aspect;
import com.massivecraft.mcore.usys.AspectColl; import com.massivecraft.mcore.usys.AspectColl;
@ -40,7 +40,6 @@ import com.massivecraft.mcore.util.TimeUnit;
import com.massivecraft.mcore.xlib.bson.types.ObjectId; import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import com.massivecraft.mcore.xlib.gson.Gson; import com.massivecraft.mcore.xlib.gson.Gson;
import com.massivecraft.mcore.xlib.gson.GsonBuilder; import com.massivecraft.mcore.xlib.gson.GsonBuilder;
import com.massivecraft.mcore.xlib.mongodb.MongoURI;
public class MCore extends MPlugin public class MCore extends MPlugin
{ {
@ -72,7 +71,6 @@ public class MCore extends MPlugin
.setPrettyPrinting() .setPrettyPrinting()
.disableHtmlEscaping() .disableHtmlEscaping()
.excludeFieldsWithModifiers(Modifier.TRANSIENT) .excludeFieldsWithModifiers(Modifier.TRANSIENT)
.registerTypeAdapter(MongoURI.class, MongoURIAdapter.get())
.registerTypeAdapter(ObjectId.class, ObjectIdAdapter.get()) .registerTypeAdapter(ObjectId.class, ObjectIdAdapter.get())
.registerTypeAdapter(UUID.class, UUIDAdapter.get()) .registerTypeAdapter(UUID.class, UUIDAdapter.get())
.registerTypeAdapter(ItemStack.class, ItemStackAdapter.get()) .registerTypeAdapter(ItemStack.class, ItemStackAdapter.get())
@ -121,6 +119,9 @@ public class MCore extends MPlugin
// Note this one must be before preEnable. dooh. // Note this one must be before preEnable. dooh.
Coll.instances.clear(); Coll.instances.clear();
// Start the examine thread
ExamineThread.get().start();
if ( ! preEnable()) return; if ( ! preEnable()) return;
// Load Server Config // Load Server Config
@ -217,4 +218,11 @@ public class MCore extends MPlugin
} }
} }
@Override
public void onDisable()
{
super.onDisable();
ExamineThread.get().interrupt();
}
} }

View File

@ -56,7 +56,6 @@ public abstract class MPlugin extends JavaPlugin implements Listener
for (Coll<?> coll : Coll.instances) for (Coll<?> coll : Coll.instances)
{ {
if (coll.getPlugin() != this) continue; if (coll.getPlugin() != this) continue;
coll.examineThread().interrupt();
coll.syncAll(); // TODO: Save outwards only? We may want to avoid loads at this stage... coll.syncAll(); // TODO: Save outwards only? We may want to avoid loads at this stage...
Coll.instances.remove(coll); Coll.instances.remove(coll);
} }

View File

@ -1,53 +0,0 @@
package com.massivecraft.mcore.adapter;
import java.lang.reflect.Type;
import com.massivecraft.mcore.xlib.gson.JsonDeserializationContext;
import com.massivecraft.mcore.xlib.gson.JsonDeserializer;
import com.massivecraft.mcore.xlib.gson.JsonElement;
import com.massivecraft.mcore.xlib.gson.JsonParseException;
import com.massivecraft.mcore.xlib.gson.JsonPrimitive;
import com.massivecraft.mcore.xlib.gson.JsonSerializationContext;
import com.massivecraft.mcore.xlib.gson.JsonSerializer;
import com.massivecraft.mcore.xlib.mongodb.MongoURI;
public class MongoURIAdapter implements JsonDeserializer<MongoURI>, JsonSerializer<MongoURI>
{
// -------------------------------------------- //
// INSTANCE & CONSTRUCT
// -------------------------------------------- //
protected static MongoURIAdapter i = new MongoURIAdapter();
public static MongoURIAdapter get() { return i; }
// -------------------------------------------- //
// OVERRIDE
// -------------------------------------------- //
@Override
public JsonElement serialize(MongoURI mongoURI, Type typeOfSrc, JsonSerializationContext context)
{
return serialize(mongoURI);
}
@Override
public MongoURI deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) throws JsonParseException
{
return deserialize(json);
}
// -------------------------------------------- //
// STATIC LOGIC
// -------------------------------------------- //
public static JsonElement serialize(MongoURI mongoURI)
{
return new JsonPrimitive(mongoURI.toString());
}
public static MongoURI deserialize(JsonElement json)
{
return new MongoURI(json.getAsString());
}
}

View File

@ -16,7 +16,6 @@ import org.bukkit.plugin.Plugin;
import com.massivecraft.mcore.MCore; import com.massivecraft.mcore.MCore;
import com.massivecraft.mcore.MPlugin; import com.massivecraft.mcore.MPlugin;
import com.massivecraft.mcore.NaturalOrderComparator;
import com.massivecraft.mcore.Predictate; import com.massivecraft.mcore.Predictate;
import com.massivecraft.mcore.store.accessor.Accessor; import com.massivecraft.mcore.store.accessor.Accessor;
import com.massivecraft.mcore.store.idstrategy.IdStrategy; import com.massivecraft.mcore.store.idstrategy.IdStrategy;
@ -599,9 +598,6 @@ public class Coll<E> implements CollInterface<E>
this.syncSuspects(); this.syncSuspects();
} }
protected ExamineThread<E> examineThread;
@Override public Thread examineThread() { return this.examineThread; }
// -------------------------------------------- // // -------------------------------------------- //
// CONSTRUCT // CONSTRUCT
// -------------------------------------------- // // -------------------------------------------- //
@ -637,11 +633,6 @@ public class Coll<E> implements CollInterface<E>
} }
this.collDriverObject = db.getCollDriverObject(this); this.collDriverObject = db.getCollDriverObject(this);
if (idComparator == null)
{
idComparator = NaturalOrderComparator.get();
}
// STORAGE // STORAGE
this.id2entity = new ConcurrentSkipListMap<String, E>(idComparator); this.id2entity = new ConcurrentSkipListMap<String, E>(idComparator);
this.entity2id = new ConcurrentSkipListMap<E, String>(entityComparator); this.entity2id = new ConcurrentSkipListMap<E, String>(entityComparator);
@ -678,8 +669,6 @@ public class Coll<E> implements CollInterface<E>
{ {
if (this.inited()) return; if (this.inited()) return;
this.syncAll(); this.syncAll();
this.examineThread = new ExamineThread<E>(this);
this.examineThread.start();
instances.add(this); instances.add(this);
} }

View File

@ -140,8 +140,6 @@ public interface CollInterface<E>
public Runnable getTickTask(); public Runnable getTickTask();
public void onTick(); public void onTick();
public Thread examineThread();
// -------------------------------------------- // // -------------------------------------------- //
// CONSTRUCT // CONSTRUCT
// -------------------------------------------- // // -------------------------------------------- //

View File

@ -107,8 +107,8 @@ public class DriverGson extends DriverAbstract<JsonElement>
// Scan the collection folder for .json files // Scan the collection folder for .json files
File collDir = getCollDir(coll); File collDir = getCollDir(coll);
if ( ! collDir.isDirectory()) return ret; if (!collDir.isDirectory()) return ret;
for(File file : collDir.listFiles(JsonFileFilter.get())) for (File file : collDir.listFiles(JsonFileFilter.get()))
{ {
ret.put(idFromFile(file), file.lastModified()); ret.put(idFromFile(file), file.lastModified());
} }

View File

@ -17,12 +17,13 @@ import com.massivecraft.mcore.xlib.mongodb.BasicDBObject;
import com.massivecraft.mcore.xlib.mongodb.DB; import com.massivecraft.mcore.xlib.mongodb.DB;
import com.massivecraft.mcore.xlib.mongodb.DBCollection; import com.massivecraft.mcore.xlib.mongodb.DBCollection;
import com.massivecraft.mcore.xlib.mongodb.DBCursor; import com.massivecraft.mcore.xlib.mongodb.DBCursor;
import com.massivecraft.mcore.xlib.mongodb.MongoURI; import com.massivecraft.mcore.xlib.mongodb.MongoClient;
import com.massivecraft.mcore.xlib.mongodb.MongoClientURI;
public class DriverMongo extends DriverAbstract<BasicDBObject> public class DriverMongo extends DriverAbstract<BasicDBObject>
{ {
// -------------------------------------------- // // -------------------------------------------- //
// STATIC // CONSTANTS
// -------------------------------------------- // // -------------------------------------------- //
public final static String ID_FIELD = "_id"; public final static String ID_FIELD = "_id";
@ -33,6 +34,33 @@ public class DriverMongo extends DriverAbstract<BasicDBObject>
public final static BasicDBObject dboKeysMtime = new BasicDBObject().append(MTIME_FIELD, 1); public final static BasicDBObject dboKeysMtime = new BasicDBObject().append(MTIME_FIELD, 1);
public final static BasicDBObject dboKeysIdandMtime = new BasicDBObject().append(ID_FIELD, 1).append(MTIME_FIELD, 1); public final static BasicDBObject dboKeysIdandMtime = new BasicDBObject().append(ID_FIELD, 1).append(MTIME_FIELD, 1);
//----------------------------------------------//
// CONSTRUCT
//----------------------------------------------//
private DriverMongo()
{
super("mongodb");
}
// -------------------------------------------- //
// INSTANCE
// -------------------------------------------- //
protected static DriverMongo instance;
public static DriverMongo get()
{
return instance;
}
static
{
instance = new DriverMongo();
instance.registerIdStrategy(IdStrategyOid.get());
instance.registerIdStrategy(IdStrategyUuid.get());
}
// -------------------------------------------- // // -------------------------------------------- //
// IMPLEMENTATION // IMPLEMENTATION
// -------------------------------------------- // // -------------------------------------------- //
@ -184,11 +212,14 @@ public class DriverMongo extends DriverAbstract<BasicDBObject>
protected DB getDbInner(String uri) protected DB getDbInner(String uri)
{ {
MongoURI muri = new MongoURI(uri); MongoClientURI muri = new MongoClientURI(uri);
try try
{ {
DB db = muri.connectDB(); // TODO: Create one of these per collection? Really? Perhaps I should cache.
MongoClient mongoClient = new MongoClient(muri);
DB db = mongoClient.getDB(muri.getDatabase());
if (muri.getUsername() == null) return db; if (muri.getUsername() == null) return db;
@ -207,29 +238,4 @@ public class DriverMongo extends DriverAbstract<BasicDBObject>
} }
} }
//----------------------------------------------//
// CONSTRUCTORS
//----------------------------------------------//
private DriverMongo()
{
super("mongodb");
}
// -------------------------------------------- //
// INSTANCE
// -------------------------------------------- //
protected static DriverMongo instance;
public static DriverMongo get()
{
return instance;
}
static
{
instance = new DriverMongo();
instance.registerIdStrategy(IdStrategyOid.get());
instance.registerIdStrategy(IdStrategyUuid.get());
}
} }

View File

@ -1,31 +1,56 @@
package com.massivecraft.mcore.store; package com.massivecraft.mcore.store;
public class ExamineThread<E> extends Thread public class ExamineThread extends Thread
{ {
protected Coll<E> coll; // -------------------------------------------- //
// INSTANCE
// -------------------------------------------- //
public ExamineThread(Coll<E> coll) private static ExamineThread i = null;
public static ExamineThread get()
{ {
this.coll = coll; if (i == null || !i.isAlive()) i = new ExamineThread();
this.setName("ExamineThread for "+coll.getName()); return i;
} }
// TODO: Implement logging and/or auto adjusting system for how long the sleep should be? // -------------------------------------------- //
// CONSTRUCT
// -------------------------------------------- //
public ExamineThread()
{
this.setName("MStore ExamineThread");
}
// -------------------------------------------- //
// FIELDS
// -------------------------------------------- //
private long lastDurationMillis = 0;
public long getLastDurationMillis() { return this.lastDurationMillis; }
// -------------------------------------------- //
// OVERRIDE
// -------------------------------------------- //
@Override @Override
public void run() public void run()
{ {
while(true) while (true)
{ {
try try
{ {
//long before = System.currentTimeMillis(); long before = System.currentTimeMillis();
for (Coll<?> coll: Coll.instances)
{
coll.findSuspects();
}
long after = System.currentTimeMillis();
long duration = after-before;
this.lastDurationMillis = duration;
coll.findSuspects(); //String message = Txt.parse("<i>ExamineThread iteration took <h>%dms<i>.", after-before);
//MCore.get().log(message);
//long after = System.currentTimeMillis();
//coll.mplugin().log(this.getName()+ " complete. Took "+ (after-before) +"ms.");
Thread.sleep(5000); Thread.sleep(5000);
} }

View File

@ -28,6 +28,7 @@ import java.util.regex.Pattern;
import com.massivecraft.mcore.xlib.bson.util.ClassMap; import com.massivecraft.mcore.xlib.bson.util.ClassMap;
@SuppressWarnings({"rawtypes"})
public class BSON { public class BSON {
static final Logger LOGGER = Logger.getLogger( "org.bson.BSON" ); static final Logger LOGGER = Logger.getLogger( "org.bson.BSON" );
@ -173,8 +174,7 @@ public class BSON {
public static boolean hasDecodeHooks() { return _decodeHooks; } public static boolean hasDecodeHooks() { return _decodeHooks; }
@SuppressWarnings("rawtypes") public static void addEncodingHook( Class c , Transformer t ){
public static void addEncodingHook( Class c , Transformer t ){
_encodeHooks = true; _encodeHooks = true;
List<Transformer> l = _encodingHooks.get( c ); List<Transformer> l = _encodingHooks.get( c );
if ( l == null ){ if ( l == null ){
@ -184,8 +184,7 @@ public class BSON {
l.add( t ); l.add( t );
} }
@SuppressWarnings("rawtypes") public static void addDecodingHook( Class c , Transformer t ){
public static void addDecodingHook( Class c , Transformer t ){
_decodeHooks = true; _decodeHooks = true;
List<Transformer> l = _decodingHooks.get( c ); List<Transformer> l = _decodingHooks.get( c );
if ( l == null ){ if ( l == null ){
@ -223,8 +222,7 @@ public class BSON {
* Returns the encoding hook(s) associated with the specified class * Returns the encoding hook(s) associated with the specified class
* *
*/ */
@SuppressWarnings("rawtypes") public static List<Transformer> getEncodingHooks( Class c ){
public static List<Transformer> getEncodingHooks( Class c ){
return _encodingHooks.get( c ); return _encodingHooks.get( c );
} }
@ -239,24 +237,21 @@ public class BSON {
/** /**
* Remove all encoding hooks for a specific class. * Remove all encoding hooks for a specific class.
*/ */
@SuppressWarnings("rawtypes") public static void removeEncodingHooks( Class c ){
public static void removeEncodingHooks( Class c ){
_encodingHooks.remove( c ); _encodingHooks.remove( c );
} }
/** /**
* Remove a specific encoding hook for a specific class. * Remove a specific encoding hook for a specific class.
*/ */
@SuppressWarnings("rawtypes") public static void removeEncodingHook( Class c , Transformer t ){
public static void removeEncodingHook( Class c , Transformer t ){
getEncodingHooks( c ).remove( t ); getEncodingHooks( c ).remove( t );
} }
/** /**
* Returns the decoding hook(s) associated with the specific class * Returns the decoding hook(s) associated with the specific class
*/ */
@SuppressWarnings("rawtypes") public static List<Transformer> getDecodingHooks( Class c ){
public static List<Transformer> getDecodingHooks( Class c ){
return _decodingHooks.get( c ); return _decodingHooks.get( c );
} }
@ -271,16 +266,14 @@ public class BSON {
/** /**
* Remove all decoding hooks for a specific class. * Remove all decoding hooks for a specific class.
*/ */
@SuppressWarnings("rawtypes") public static void removeDecodingHooks( Class c ){
public static void removeDecodingHooks( Class c ){
_decodingHooks.remove( c ); _decodingHooks.remove( c );
} }
/** /**
* Remove a specific encoding hook for a specific class. * Remove a specific encoding hook for a specific class.
*/ */
@SuppressWarnings("rawtypes") public static void removeDecodingHook( Class c , Transformer t ){
public static void removeDecodingHook( Class c , Transformer t ){
getDecodingHooks( c ).remove( t ); getDecodingHooks( c ).remove( t );
} }

View File

@ -24,6 +24,7 @@ import java.util.Set;
/** /**
* A key-value map that can be saved to the database. * A key-value map that can be saved to the database.
*/ */
@SuppressWarnings({"rawtypes"})
public interface BSONObject { public interface BSONObject {
/** /**
@ -44,8 +45,7 @@ public interface BSONObject {
* Sets all key/value pairs from a map into this object * Sets all key/value pairs from a map into this object
* @param m the map * @param m the map
*/ */
@SuppressWarnings("rawtypes") public void putAll( Map m );
public void putAll( Map m );
/** /**
* Gets a field from this object by a given name. * Gets a field from this object by a given name.
@ -58,8 +58,7 @@ public interface BSONObject {
* Returns a map representing this BSONObject. * Returns a map representing this BSONObject.
* @return the map * @return the map
*/ */
@SuppressWarnings("rawtypes") public Map toMap();
public Map toMap();
/** /**
* Removes a field with a given name from this object. * Removes a field with a given name from this object.

View File

@ -26,6 +26,7 @@ import com.massivecraft.mcore.xlib.bson.types.ObjectId;
/** /**
* Basic implementation of BSONDecoder interface that creates BasicBSONObject instances * Basic implementation of BSONDecoder interface that creates BasicBSONObject instances
*/ */
@SuppressWarnings({"unused"})
public class BasicBSONDecoder implements BSONDecoder { public class BasicBSONDecoder implements BSONDecoder {
public BSONObject readObject( byte[] b ){ public BSONObject readObject( byte[] b ){
try { try {
@ -505,7 +506,6 @@ public class BasicBSONDecoder implements BSONDecoder {
private static final String DEFAULT_ENCODING = "UTF-8"; private static final String DEFAULT_ENCODING = "UTF-8";
@SuppressWarnings("unused")
private static final boolean _isAscii( final byte b ){ private static final boolean _isAscii( final byte b ){
return b >=0 && b <= 127; return b >=0 && b <= 127;
} }

View File

@ -72,7 +72,7 @@ import com.massivecraft.mcore.xlib.mongodb.DBRefBase;
* this is meant to be pooled or cached * this is meant to be pooled or cached
* there is some per instance memory for string conversion, etc... * there is some per instance memory for string conversion, etc...
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings({"unchecked", "rawtypes", "unused"})
public class BasicBSONEncoder implements BSONEncoder { public class BasicBSONEncoder implements BSONEncoder {
static final boolean DEBUG = false; static final boolean DEBUG = false;
@ -123,7 +123,6 @@ public class BasicBSONEncoder implements BSONEncoder {
/** /**
* this is really for embedded objects * this is really for embedded objects
*/ */
@SuppressWarnings("rawtypes")
protected int putObject( String name , BSONObject o ){ protected int putObject( String name , BSONObject o ){
if ( o == null ) if ( o == null )
@ -196,7 +195,6 @@ public class BasicBSONEncoder implements BSONEncoder {
return _buf.getPosition() - start; return _buf.getPosition() - start;
} }
@SuppressWarnings("rawtypes")
protected void _putObjectField( String name , Object val ){ protected void _putObjectField( String name , Object val ){
if ( name.equals( "_transientFields" ) ) if ( name.equals( "_transientFields" ) )
@ -287,8 +285,7 @@ public class BasicBSONEncoder implements BSONEncoder {
_buf.writeInt( sizePos , _buf.getPosition() - sizePos ); _buf.writeInt( sizePos , _buf.getPosition() - sizePos );
} }
@SuppressWarnings("rawtypes") private void putIterable( String name , Iterable l ){
private void putIterable( String name , Iterable l ){
_put( ARRAY , name ); _put( ARRAY , name );
final int sizePos = _buf.getPosition(); final int sizePos = _buf.getPosition();
_buf.writeInt( 0 ); _buf.writeInt( 0 );
@ -304,8 +301,7 @@ public class BasicBSONEncoder implements BSONEncoder {
_buf.writeInt( sizePos , _buf.getPosition() - sizePos ); _buf.writeInt( sizePos , _buf.getPosition() - sizePos );
} }
@SuppressWarnings("rawtypes") private void putMap( String name , Map m ){
private void putMap( String name , Map m ){
_put( OBJECT , name ); _put( OBJECT , name );
final int sizePos = _buf.getPosition(); final int sizePos = _buf.getPosition();
_buf.writeInt( 0 ); _buf.writeInt( 0 );
@ -341,8 +337,7 @@ public class BasicBSONEncoder implements BSONEncoder {
_buf.writeInt( temp , _buf.getPosition() - temp ); _buf.writeInt( temp , _buf.getPosition() - temp );
} }
@SuppressWarnings("unused") protected void putCode( String name , Code code ){
protected void putCode( String name , Code code ){
_put( CODE , name ); _put( CODE , name );
int temp = _buf.getPosition(); int temp = _buf.getPosition();
_putValueString( code.getCode() ); _putValueString( code.getCode() );

View File

@ -36,6 +36,7 @@ import java.util.regex.Pattern;
* obj.put( "foo", "bar" ); * obj.put( "foo", "bar" );
* </pre></blockquote> * </pre></blockquote>
*/ */
@SuppressWarnings({"unchecked", "rawtypes"})
public class BasicBSONObject extends LinkedHashMap<String,Object> implements BSONObject { public class BasicBSONObject extends LinkedHashMap<String,Object> implements BSONObject {
private static final long serialVersionUID = -4415279469780082174L; private static final long serialVersionUID = -4415279469780082174L;
@ -63,7 +64,6 @@ public class BasicBSONObject extends LinkedHashMap<String,Object> implements BSO
* Creates a DBObject from a map. * Creates a DBObject from a map.
* @param m map to convert * @param m map to convert
*/ */
@SuppressWarnings({ "unchecked", "rawtypes" })
public BasicBSONObject(Map m) { public BasicBSONObject(Map m) {
super(m); super(m);
} }
@ -72,8 +72,7 @@ public class BasicBSONObject extends LinkedHashMap<String,Object> implements BSO
* Converts a DBObject to a map. * Converts a DBObject to a map.
* @return the DBObject * @return the DBObject
*/ */
@SuppressWarnings("rawtypes") public Map toMap() {
public Map toMap() {
return new LinkedHashMap<String,Object>(this); return new LinkedHashMap<String,Object>(this);
} }
@ -282,7 +281,6 @@ public class BasicBSONObject extends LinkedHashMap<String,Object> implements BSO
return super.put( key , val ); return super.put( key , val );
} }
@SuppressWarnings({ "unchecked", "rawtypes" })
public void putAll( Map m ){ public void putAll( Map m ){
for ( Map.Entry entry : (Set<Map.Entry>)m.entrySet() ){ for ( Map.Entry entry : (Set<Map.Entry>)m.entrySet() ){
put( entry.getKey().toString() , entry.getValue() ); put( entry.getKey().toString() , entry.getValue() );

View File

@ -25,6 +25,7 @@ import com.massivecraft.mcore.xlib.mongodb.LazyDBObject;
/** /**
* *
*/ */
@SuppressWarnings({"rawtypes", "unused"})
public class LazyBSONCallback extends EmptyBSONCallback { public class LazyBSONCallback extends EmptyBSONCallback {
public void objectStart(){ public void objectStart(){
@ -63,7 +64,6 @@ public class LazyBSONCallback extends EmptyBSONCallback {
return new LazyDBObject( data, offset, this ); return new LazyDBObject( data, offset, this );
} }
@SuppressWarnings("rawtypes")
public List createArray( byte[] data, int offset ){ public List createArray( byte[] data, int offset ){
return new LazyDBList( data, offset, this ); return new LazyDBList( data, offset, this );
} }
@ -83,6 +83,5 @@ public class LazyBSONCallback extends EmptyBSONCallback {
} }
}*/ }*/
private Object _root; private Object _root;
@SuppressWarnings("unused") private static final Logger log = Logger.getLogger( "org.bson.LazyBSONCallback" );
private static final Logger log = Logger.getLogger( "org.bson.LazyBSONCallback" );
} }

View File

@ -30,6 +30,7 @@ import java.util.regex.Pattern;
* @author scotthernandez * @author scotthernandez
* @author Kilroy Wuz Here * @author Kilroy Wuz Here
*/ */
@SuppressWarnings({"unchecked", "rawtypes", "unused"})
public class LazyBSONObject implements BSONObject { public class LazyBSONObject implements BSONObject {
public LazyBSONObject( byte[] data, LazyBSONCallback callback ){ public LazyBSONObject( byte[] data, LazyBSONCallback callback ){
@ -135,7 +136,6 @@ public class LazyBSONObject implements BSONObject {
return toArray(a); return toArray(a);
} }
@SuppressWarnings( "unchecked" )
@Override @Override
public <T> T[] toArray(T[] a) { public <T> T[] toArray(T[] a) {
int size = size(); int size = size();
@ -203,8 +203,7 @@ public class LazyBSONObject implements BSONObject {
throw new UnsupportedOperationException("Read only"); throw new UnsupportedOperationException("Read only");
} }
@SuppressWarnings("rawtypes") @Override
@Override
public boolean equals(Object o) { public boolean equals(Object o) {
if (!(o instanceof Map.Entry)) if (!(o instanceof Map.Entry))
return false; return false;
@ -269,14 +268,12 @@ public class LazyBSONObject implements BSONObject {
return new LazyBSONEntryIterator(); return new LazyBSONEntryIterator();
} }
@SuppressWarnings("rawtypes") @Override
@Override
public Object[] toArray() { public Object[] toArray() {
Map.Entry[] array = new Map.Entry[size()]; Map.Entry[] array = new Map.Entry[size()];
return toArray(array); return toArray(array);
} }
@SuppressWarnings( "unchecked" )
@Override @Override
public <T> T[] toArray(T[] a) { public <T> T[] toArray(T[] a) {
int size = size(); int size = size();
@ -340,8 +337,7 @@ public class LazyBSONObject implements BSONObject {
throw new UnsupportedOperationException( "Object is read only" ); throw new UnsupportedOperationException( "Object is read only" );
} }
@SuppressWarnings("rawtypes") public void putAll( Map m ){
public void putAll( Map m ){
throw new UnsupportedOperationException( "Object is read only" ); throw new UnsupportedOperationException( "Object is read only" );
} }
@ -401,8 +397,7 @@ public class LazyBSONObject implements BSONObject {
return elements; return elements;
} }
@SuppressWarnings("rawtypes") public Map toMap(){
public Map toMap(){
throw new UnsupportedOperationException( "Not Supported" ); throw new UnsupportedOperationException( "Not Supported" );
} }
@ -686,6 +681,5 @@ public class LazyBSONObject implements BSONObject {
protected final BSONByteBuffer _input; // TODO - Guard this with synchronicity? protected final BSONByteBuffer _input; // TODO - Guard this with synchronicity?
// callback is kept to create sub-objects on the fly // callback is kept to create sub-objects on the fly
protected final LazyBSONCallback _callback; protected final LazyBSONCallback _callback;
@SuppressWarnings("unused") private static final Logger log = Logger.getLogger( "org.bson.LazyBSONObject" );
private static final Logger log = Logger.getLogger( "org.bson.LazyBSONObject" );
} }

View File

@ -12,9 +12,9 @@ import com.massivecraft.mcore.xlib.mongodb.util.JSON;
* @author scotthernandez * @author scotthernandez
* *
*/ */
@SuppressWarnings({"unused"})
public class LazyDBList extends LazyBSONList implements DBObject { public class LazyDBList extends LazyBSONList implements DBObject {
@SuppressWarnings("unused") private static final long serialVersionUID = -4415279469780082174L;
private static final long serialVersionUID = -4415279469780082174L;
public LazyDBList(byte[] data, LazyBSONCallback callback) { super(data, callback); } public LazyDBList(byte[] data, LazyBSONCallback callback) { super(data, callback); }
public LazyDBList(byte[] data, int offset, LazyBSONCallback callback) { super(data, offset, callback); } public LazyDBList(byte[] data, int offset, LazyBSONCallback callback) { super(data, offset, callback); }

View File

@ -18,7 +18,6 @@
package com.massivecraft.mcore.xlib.bson.io; package com.massivecraft.mcore.xlib.bson.io;
import java.io.*; import java.io.*;
import java.util.*; import java.util.*;

View File

@ -27,7 +27,7 @@ import java.util.Date;
* <b>time</b> is seconds since epoch * <b>time</b> is seconds since epoch
* <b>inc<b> is an ordinal * <b>inc<b> is an ordinal
*/ */
public class BSONTimestamp implements Serializable { public class BSONTimestamp implements Comparable<BSONTimestamp>, Serializable {
private static final long serialVersionUID = -3268482672267936464L; private static final long serialVersionUID = -3268482672267936464L;
@ -60,6 +60,25 @@ public class BSONTimestamp implements Serializable {
return "TS time:" + _time + " inc:" + _inc; return "TS time:" + _time + " inc:" + _inc;
} }
@Override
public int compareTo(BSONTimestamp ts) {
if(getTime() != ts.getTime()) {
return getTime() - ts.getTime();
}
else{
return getInc() - ts.getInc();
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + _inc;
result = prime * result + getTime();
return result;
}
@Override @Override
public boolean equals(Object obj) { public boolean equals(Object obj) {
if (obj == this) if (obj == this)
@ -73,4 +92,5 @@ public class BSONTimestamp implements Serializable {
final int _inc; final int _inc;
final Date _time; final Date _time;
} }

View File

@ -48,6 +48,7 @@ import java.util.*;
* </pre></blockquote> * </pre></blockquote>
* </p> * </p>
*/ */
@SuppressWarnings({"unchecked", "rawtypes"})
public class BasicBSONList extends ArrayList<Object> implements BSONObject { public class BasicBSONList extends ArrayList<Object> implements BSONObject {
private static final long serialVersionUID = -4415279469780082174L; private static final long serialVersionUID = -4415279469780082174L;
@ -80,7 +81,6 @@ public class BasicBSONList extends ArrayList<Object> implements BSONObject {
return v; return v;
} }
@SuppressWarnings({ "unchecked", "rawtypes" })
public void putAll( Map m ){ public void putAll( Map m ){
for ( Map.Entry entry : (Set<Map.Entry>)m.entrySet() ){ for ( Map.Entry entry : (Set<Map.Entry>)m.entrySet() ){
put( entry.getKey().toString() , entry.getValue() ); put( entry.getKey().toString() , entry.getValue() );
@ -137,7 +137,6 @@ public class BasicBSONList extends ArrayList<Object> implements BSONObject {
return new StringRangeSet(size()); return new StringRangeSet(size());
} }
@SuppressWarnings({ "unchecked", "rawtypes" })
public Map toMap() { public Map toMap() {
Map m = new HashMap(); Map m = new HashMap();
Iterator i = this.keySet().iterator(); Iterator i = this.keySet().iterator();

View File

@ -24,9 +24,10 @@ package com.massivecraft.mcore.xlib.bson.types;
import com.massivecraft.mcore.xlib.bson.BSON; import com.massivecraft.mcore.xlib.bson.BSON;
import java.io.Serializable; import java.io.Serializable;
import java.util.Arrays;
/** /**
generic binary holder * generic binary holder
*/ */
public class Binary implements Serializable { public class Binary implements Serializable {
@ -34,34 +35,64 @@ public class Binary implements Serializable {
/** /**
* Creates a Binary object with the default binary type of 0 * Creates a Binary object with the default binary type of 0
*
* @param data raw data * @param data raw data
*/ */
public Binary( byte[] data ){ public Binary(byte[] data) {
this(BSON.B_GENERAL, data); this(BSON.B_GENERAL, data);
} }
/** /**
* Creates a Binary object * Creates a Binary object
*
* @param type type of the field as encoded in BSON * @param type type of the field as encoded in BSON
* @param data raw data * @param data raw data
*/ */
public Binary( byte type , byte[] data ){ public Binary(byte type, byte[] data) {
_type = type; _type = type;
_data = data; _data = data;
} }
public byte getType(){ public byte getType() {
return _type; return _type;
} }
public byte[] getData(){ public byte[] getData() {
return _data; return _data;
} }
public int length(){ public int length() {
return _data.length; return _data.length;
} }
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Binary)) {
return false;
}
Binary binary = (Binary) o;
if (_type != binary._type) {
return false;
}
if (!Arrays.equals(_data, binary._data)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = (int) _type;
result = 31 * result + (_data != null ? Arrays.hashCode(_data) : 0);
return result;
}
final byte _type; final byte _type;
final byte[] _data; final byte[] _data;
} }

View File

@ -0,0 +1,58 @@
package com.massivecraft.mcore.xlib.mongodb;
public class AggregationOutput {
/**
* returns an iterator to the results of the aggregation
* @return
*/
public Iterable<DBObject> results() {
return _resultSet;
}
/**
* returns the command result of the aggregation
* @return
*/
public CommandResult getCommandResult(){
return _commandResult;
}
/**
* returns the original aggregation command
* @return
*/
public DBObject getCommand() {
return _cmd;
}
/**
* returns the address of the server used to execute the aggregation
* @return
*/
public ServerAddress getServerUsed() {
return _commandResult.getServerUsed();
}
/**
* string representation of the aggregation command
*/
public String toString(){
return _commandResult.toString();
}
@SuppressWarnings("unchecked")
public AggregationOutput(DBObject cmd, CommandResult raw) {
_commandResult = raw;
_cmd = cmd;
if(raw.containsField("result"))
_resultSet = (Iterable<DBObject>) raw.get( "result" );
else
throw new IllegalArgumentException("result undefined");
}
protected final CommandResult _commandResult;
protected final DBObject _cmd;
protected final Iterable<DBObject> _resultSet;
}

View File

@ -18,12 +18,12 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import java.util.Map;
import com.massivecraft.mcore.xlib.bson.BasicBSONObject; import com.massivecraft.mcore.xlib.bson.BasicBSONObject;
import com.massivecraft.mcore.xlib.mongodb.util.JSON; import com.massivecraft.mcore.xlib.mongodb.util.JSON;
import java.util.Map;
/** /**
* a basic implementation of bson object that is mongo specific. * a basic implementation of bson object that is mongo specific.
* A <code>DBObject</code> can be created as follows, using this class: * A <code>DBObject</code> can be created as follows, using this class:
@ -32,6 +32,7 @@ import com.massivecraft.mcore.xlib.mongodb.util.JSON;
* obj.put( "foo", "bar" ); * obj.put( "foo", "bar" );
* </pre></blockquote> * </pre></blockquote>
*/ */
@SuppressWarnings({"rawtypes"})
public class BasicDBObject extends BasicBSONObject implements DBObject { public class BasicDBObject extends BasicBSONObject implements DBObject {
private static final long serialVersionUID = -4415279469780082174L; private static final long serialVersionUID = -4415279469780082174L;
@ -63,8 +64,7 @@ public class BasicDBObject extends BasicBSONObject implements DBObject {
* Creates an object from a map. * Creates an object from a map.
* @param m map to convert * @param m map to convert
*/ */
@SuppressWarnings("rawtypes") public BasicDBObject(Map m) {
public BasicDBObject(Map m) {
super(m); super(m);
} }
@ -106,5 +106,5 @@ public class BasicDBObject extends BasicBSONObject implements DBObject {
return newobj; return newobj;
} }
private boolean _isPartialObject = false; private boolean _isPartialObject;
} }

View File

@ -27,6 +27,7 @@ import java.util.Map;
* example: * example:
* BasicDBObjectBuilder.start().add( "name" , "eliot" ).add( "number" , 17 ).get() * BasicDBObjectBuilder.start().add( "name" , "eliot" ).add( "number" , 17 ).get()
*/ */
@SuppressWarnings({"unchecked", "rawtypes"})
public class BasicDBObjectBuilder { public class BasicDBObjectBuilder {
/** /**
@ -59,7 +60,6 @@ public class BasicDBObjectBuilder {
* @param m map to use * @param m map to use
* @return the new builder * @return the new builder
*/ */
@SuppressWarnings({ "unchecked", "rawtypes" })
public static BasicDBObjectBuilder start(Map m){ public static BasicDBObjectBuilder start(Map m){
BasicDBObjectBuilder b = new BasicDBObjectBuilder(); BasicDBObjectBuilder b = new BasicDBObjectBuilder();
Iterator<Map.Entry> i = m.entrySet().iterator(); Iterator<Map.Entry> i = m.entrySet().iterator();

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* An exception indicating a failed command.
*/
public class CommandFailureException extends MongoException {
private static final long serialVersionUID = -1180715413196161037L;
private final CommandResult commandResult;
/**
* Construct a new instance with the CommandResult from a failed command
*
* @param commandResult the result
*/
public CommandFailureException(CommandResult commandResult){
super(ServerError.getCode(commandResult), commandResult.toString());
this.commandResult = commandResult;
}
/**
* Gets the getlasterror command result document.
*
* @return the command result
*/
public CommandResult getCommandResult() {
return commandResult;
}
}

View File

@ -25,14 +25,9 @@ package com.massivecraft.mcore.xlib.mongodb;
public class CommandResult extends BasicDBObject { public class CommandResult extends BasicDBObject {
CommandResult(ServerAddress srv) { CommandResult(ServerAddress srv) {
this(null, srv);
}
CommandResult(DBObject cmd, ServerAddress srv) {
if (srv == null) { if (srv == null) {
throw new IllegalArgumentException("server address is null"); throw new IllegalArgumentException("server address is null");
} }
_cmd = cmd;
_host = srv; _host = srv;
//so it is shown in toString/debug //so it is shown in toString/debug
put("serverUsed", srv.toString()); put("serverUsed", srv.toString());
@ -61,55 +56,36 @@ public class CommandResult extends BasicDBObject {
* @return The error message or null * @return The error message or null
*/ */
public String getErrorMessage(){ public String getErrorMessage(){
Object foo = get( "errmsg" ); Object errorMessage = get( "errmsg" );
if ( foo == null ) if ( errorMessage == null )
return null; return null;
return foo.toString(); return errorMessage.toString();
} }
/** /**
* utility method to create an exception with the command name * utility method to create an exception with the command name
* @return The mongo exception or null * @return The mongo exception or null
*/ */
public MongoException getException(){ public MongoException getException() {
if ( !ok() ) { if ( !ok() ) { // check for command failure
StringBuilder buf = new StringBuilder(); return new CommandFailureException( this );
} else if ( hasErr() ) { // check for errors reported by getlasterror command
String cmdName; if (getCode() == 11000 || getCode() == 11001 || getCode() == 12582) {
if (_cmd != null) { return new MongoException.DuplicateKey(this);
cmdName = _cmd.keySet().iterator().next();
buf.append( "command failed [" ).append( cmdName ).append( "]: " );
} else {
buf.append( "operation failed: ");
} }
else {
buf.append( toString() ); return new WriteConcernException(this);
return new CommandFailure( this , buf.toString() );
} else {
// GLE check
if ( hasErr() ) {
Object foo = get( "err" );
int code = getCode();
String s = foo.toString();
if ( code == 11000 || code == 11001 || s.startsWith( "E11000" ) || s.startsWith( "E11001" ) )
return new MongoException.DuplicateKey( code , s );
return new MongoException( code , s );
} }
} }
//all good, should never get here. return null;
return null;
} }
/** /**
* returns the "code" field, as an int * returns the "code" field, as an int
* @return -1 if there is no code * @return -1 if there is no code
*/ */
private int getCode(){ int getCode() {
int code = -1; int code = -1;
if ( get( "code" ) instanceof Number ) if ( get( "code" ) instanceof Number )
code = ((Number)get("code")).intValue(); code = ((Number)get("code")).intValue();
@ -129,7 +105,7 @@ public class CommandResult extends BasicDBObject {
* throws an exception containing the cmd name, in case the command failed, or the "err/code" information * throws an exception containing the cmd name, in case the command failed, or the "err/code" information
* @throws MongoException * @throws MongoException
*/ */
public void throwOnError() throws MongoException { public void throwOnError() {
if ( !ok() || hasErr() ){ if ( !ok() || hasErr() ){
throw getException(); throw getException();
} }
@ -139,15 +115,7 @@ public class CommandResult extends BasicDBObject {
return _host; return _host;
} }
private final DBObject _cmd;
private final ServerAddress _host; private final ServerAddress _host;
private static final long serialVersionUID = 1L; private static final long serialVersionUID = 1L;
static class CommandFailure extends MongoException {
private static final long serialVersionUID = 1L;
CommandFailure( CommandResult res , String msg ){
super( ServerError.getCode( res ) , msg );
}
}
} }

View File

@ -0,0 +1,252 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Base class for classes that manage connections to mongo instances as background tasks.
*/
abstract class ConnectionStatus {
ConnectionStatus(List<ServerAddress> mongosAddresses, Mongo mongo) {
_mongoOptions = mongoOptionsDefaults.copy();
_mongoOptions.socketFactory = mongo._options.socketFactory;
this._mongosAddresses = new ArrayList<ServerAddress>(mongosAddresses);
this._mongo = mongo;
}
protected BackgroundUpdater _updater;
protected final Mongo _mongo;
protected final List<ServerAddress> _mongosAddresses;
protected volatile boolean _closed;
protected final MongoOptions _mongoOptions;
protected static int updaterIntervalMS;
protected static int updaterIntervalNoMasterMS;
@SuppressWarnings("deprecation")
protected static final MongoOptions mongoOptionsDefaults = new MongoOptions();
protected static final float latencySmoothFactor;
protected static final DBObject isMasterCmd = new BasicDBObject("ismaster", 1);
/**
* Start the updater if there is one
*/
void start() {
if (_updater != null) {
_updater.start();
}
}
/**
* Stop the updater if there is one
*/
void close() {
_closed = true;
if (_updater != null) {
_updater.interrupt();
}
}
/**
* Gets the list of addresses for this connection.
*/
abstract List<ServerAddress> getServerAddressList();
/**
* Whether there is least one server up.
*/
abstract boolean hasServerUp();
/**
* Ensures that we have the current master, if there is one. If the current snapshot of the replica set
* has no master, this method waits one cycle to find a new master, and returns it if found, or null if not.
*
* @return address of the current master, or null if there is none
*/
abstract Node ensureMaster();
/**
* Whether this connection has been closed.
*/
void checkClosed() {
if (_closed)
throw new IllegalStateException("ReplicaSetStatus closed");
}
static {
updaterIntervalMS = Integer.parseInt(System.getProperty("com.mongodb.updaterIntervalMS", "5000"));
updaterIntervalNoMasterMS = Integer.parseInt(System.getProperty("com.mongodb.updaterIntervalNoMasterMS", "10"));
mongoOptionsDefaults.connectTimeout = Integer.parseInt(System.getProperty("com.mongodb.updaterConnectTimeoutMS", "20000"));
mongoOptionsDefaults.socketTimeout = Integer.parseInt(System.getProperty("com.mongodb.updaterSocketTimeoutMS", "20000"));
latencySmoothFactor = Float.parseFloat(System.getProperty("com.mongodb.latencySmoothFactor", "4"));
}
static class Node {
Node(float pingTime, ServerAddress addr, int maxBsonObjectSize, boolean ok) {
this._pingTime = pingTime;
this._addr = addr;
this._maxBsonObjectSize = maxBsonObjectSize;
this._ok = ok;
}
public boolean isOk() {
return _ok;
}
public int getMaxBsonObjectSize() {
return _maxBsonObjectSize;
}
public ServerAddress getServerAddress() {
return _addr;
}
protected final ServerAddress _addr;
protected final float _pingTime;
protected final boolean _ok;
protected final int _maxBsonObjectSize;
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final Node node = (Node) o;
if (_maxBsonObjectSize != node._maxBsonObjectSize) return false;
if (_ok != node._ok) return false;
if (Float.compare(node._pingTime, _pingTime) != 0) return false;
if (!_addr.equals(node._addr)) return false;
return true;
}
@Override
public int hashCode() {
int result = _addr.hashCode();
result = 31 * result + (_pingTime != +0.0f ? Float.floatToIntBits(_pingTime) : 0);
result = 31 * result + (_ok ? 1 : 0);
result = 31 * result + _maxBsonObjectSize;
return result;
}
public String toJSON() {
StringBuilder buf = new StringBuilder();
buf.append("{");
buf.append("address:'").append(_addr).append("', ");
buf.append("ok:").append(_ok).append(", ");
buf.append("ping:").append(_pingTime).append(", ");
buf.append("maxBsonObjectSize:").append(_maxBsonObjectSize).append(", ");
buf.append("}");
return buf.toString();
}
}
static class BackgroundUpdater extends Thread {
public BackgroundUpdater(final String name) {
super(name);
setDaemon(true);
}
}
static abstract class UpdatableNode {
UpdatableNode(final ServerAddress addr, Mongo mongo, MongoOptions mongoOptions) {
this._addr = addr;
this._mongo = mongo;
this._mongoOptions = mongoOptions;
this._port = new DBPort(addr, null, mongoOptions);
}
public CommandResult update() {
CommandResult res = null;
try {
long start = System.nanoTime();
res = _port.runCommand(_mongo.getDB("admin"), isMasterCmd);
long end = System.nanoTime();
float newPingMS = (end - start) / 1000000F;
if (!successfullyContacted)
_pingTimeMS = newPingMS;
else
_pingTimeMS = _pingTimeMS + ((newPingMS - _pingTimeMS) / latencySmoothFactor);
getLogger().log(Level.FINE, "Latency to " + _addr + " actual=" + newPingMS + " smoothed=" + _pingTimeMS);
successfullyContacted = true;
if (res == null) {
throw new MongoInternalException("Invalid null value returned from isMaster");
}
if (!_ok) {
getLogger().log(Level.INFO, "Server seen up: " + _addr);
}
_ok = true;
// max size was added in 1.8
if (res.containsField("maxBsonObjectSize")) {
_maxBsonObjectSize = (Integer) res.get("maxBsonObjectSize");
} else {
_maxBsonObjectSize = Bytes.MAX_OBJECT_SIZE;
}
} catch (Exception e) {
if (!((_ok) ? true : (Math.random() > 0.1))) {
return res;
}
final StringBuilder logError = (new StringBuilder("Server seen down: ")).append(_addr);
if (e instanceof IOException) {
logError.append(" - ").append(IOException.class.getName());
if (e.getMessage() != null) {
logError.append(" - message: ").append(e.getMessage());
}
getLogger().log(Level.WARNING, logError.toString());
} else {
getLogger().log(Level.WARNING, logError.toString(), e);
}
_ok = false;
}
return res;
}
protected abstract Logger getLogger();
final ServerAddress _addr;
final MongoOptions _mongoOptions;
final Mongo _mongo;
DBPort _port; // we have our own port so we can set different socket options and don't have to worry about the pool
boolean successfullyContacted = false;
boolean _ok = false;
float _pingTimeMS = 0;
int _maxBsonObjectSize;
}
}

View File

@ -18,6 +18,11 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.mongodb.DBApiLayer.Result;
import com.massivecraft.mcore.xlib.mongodb.util.Util;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -28,15 +33,26 @@ import java.util.LinkedHashSet;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import com.massivecraft.mcore.xlib.mongodb.DBApiLayer.Result;
import com.massivecraft.mcore.xlib.mongodb.util.Util;
/** /**
* an abstract class that represents a logical database on a server * an abstract class that represents a logical database on a server
* @dochub databases * @dochub databases
*/ */
public abstract class DB { public abstract class DB {
private static final Set<String> _obedientCommands = new HashSet<String>();
static {
_obedientCommands.add("group");
_obedientCommands.add("aggregate");
_obedientCommands.add("collstats");
_obedientCommands.add("dbstats");
_obedientCommands.add("count");
_obedientCommands.add("distinct");
_obedientCommands.add("geonear");
_obedientCommands.add("geosearch");
_obedientCommands.add("geowalk");
}
/** /**
* @param mongo the mongo instance * @param mongo the mongo instance
* @param name the database name * @param name the database name
@ -47,6 +63,44 @@ public abstract class DB {
_options = new Bytes.OptionHolder( _mongo._netOptions ); _options = new Bytes.OptionHolder( _mongo._netOptions );
} }
/**
* Determines the read preference that should be used for the given command.
* @param command the <code>DBObject</code> representing the command
* @param requestedPreference the preference requested by the client.
* @return the read preference to use for the given command. It will never return null.
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference
*/
ReadPreference getCommandReadPreference(DBObject command, ReadPreference requestedPreference){
String comString = command.keySet().iterator().next();
if (comString.equals("getnonce") || comString.equals("authenticate")) {
return ReadPreference.primaryPreferred();
}
boolean primaryRequired;
// explicitly check mapreduce commands are inline
if(comString.equals("mapreduce")) {
Object out = command.get("out");
if (out instanceof BSONObject ){
BSONObject outMap = (BSONObject) out;
primaryRequired = outMap.get("inline") == null;
}
else
primaryRequired = true;
} else {
primaryRequired = !_obedientCommands.contains(comString.toLowerCase());
}
if (primaryRequired) {
return ReadPreference.primary();
} else if (requestedPreference == null) {
return ReadPreference.primary();
} else {
return requestedPreference;
}
}
/** /**
* starts a new "consistent request". * starts a new "consistent request".
* Following this call and until requestDone() is called, all db operations should use the same underlying connection. * Following this call and until requestDone() is called, all db operations should use the same underlying connection.
@ -95,6 +149,7 @@ public abstract class DB {
* @param name the name of the collection to return * @param name the name of the collection to return
* @param options options * @param options options
* @return the collection * @return the collection
* @throws MongoException
*/ */
public DBCollection createCollection( String name, DBObject options ){ public DBCollection createCollection( String name, DBObject options ){
if ( options != null ){ if ( options != null ){
@ -140,21 +195,52 @@ public abstract class DB {
* @throws MongoException * @throws MongoException
* @dochub commands * @dochub commands
*/ */
public CommandResult command( DBObject cmd ) throws MongoException{ public CommandResult command( DBObject cmd ){
return command( cmd, 0 ); return command( cmd, 0 );
} }
public CommandResult command( DBObject cmd, DBEncoder encoder ) throws MongoException{
/**
* Executes a database command.
* This method calls {@link DB#command(com.massivecraft.mcore.xlib.mongodb.DBObject, int, com.massivecraft.mcore.xlib.mongodb.DBEncoder) } with 0 as query option.
* @see <a href="http://mongodb.onconfluence.com/display/DOCS/List+of+Database+Commands">List of Commands</a>
* @param cmd dbobject representing the command to execute
* @param encoder
* @return result of command from the database
* @throws MongoException
* @dochub commands
*/
public CommandResult command( DBObject cmd, DBEncoder encoder ){
return command( cmd, 0, encoder ); return command( cmd, 0, encoder );
} }
public CommandResult command( DBObject cmd , int options, DBEncoder encoder ) /**
throws MongoException { * Executes a database command.
return command(cmd, options, null, encoder); * This method calls {@link DB#command(com.massivecraft.mcore.xlib.mongodb.DBObject, int, com.massivecraft.mcore.xlib.mongodb.ReadPreference, com.massivecraft.mcore.xlib.mongodb.DBEncoder) } with a null readPrefs.
* @see <a href="http://mongodb.onconfluence.com/display/DOCS/List+of+Database+Commands">List of Commands</a>
* @param cmd dbobject representing the command to execute
* @param options query options to use
* @param encoder
* @return result of command from the database
* @throws MongoException
* @dochub commands
*/
public CommandResult command( DBObject cmd , int options, DBEncoder encoder ){
return command(cmd, options, getReadPreference(), encoder);
} }
public CommandResult command( DBObject cmd , int options, ReadPreference readPrefs ) /**
throws MongoException { * Executes a database command.
* This method calls {@link DB#command(com.massivecraft.mcore.xlib.mongodb.DBObject, int, com.massivecraft.mcore.xlib.mongodb.ReadPreference, com.massivecraft.mcore.xlib.mongodb.DBEncoder) } with a default encoder.
* @see <a href="http://mongodb.onconfluence.com/display/DOCS/List+of+Database+Commands">List of Commands</a>
* @param cmd dbobject representing the command to execute
* @param options query options to use
* @param readPrefs ReadPreferences for this command (nodes selection is the biggest part of this)
* @return result of command from the database
* @throws MongoException
* @dochub commands
*/
public CommandResult command( DBObject cmd , int options, ReadPreference readPrefs ){
return command(cmd, options, readPrefs, DefaultDBEncoder.FACTORY.create()); return command(cmd, options, readPrefs, DefaultDBEncoder.FACTORY.create());
} }
@ -164,12 +250,14 @@ public abstract class DB {
* @param cmd dbobject representing the command to execute * @param cmd dbobject representing the command to execute
* @param options query options to use * @param options query options to use
* @param readPrefs ReadPreferences for this command (nodes selection is the biggest part of this) * @param readPrefs ReadPreferences for this command (nodes selection is the biggest part of this)
* @param encoder
* @return result of command from the database * @return result of command from the database
* @dochub commands
* @throws MongoException * @throws MongoException
* @dochub commands
*/ */
public CommandResult command( DBObject cmd , int options, ReadPreference readPrefs, DBEncoder encoder ) public CommandResult command( DBObject cmd , int options, ReadPreference readPrefs, DBEncoder encoder ){
throws MongoException { readPrefs = getCommandReadPreference(cmd, readPrefs);
cmd = wrapCommand(cmd, readPrefs);
Iterator<DBObject> i = Iterator<DBObject> i =
getCollection("$cmd").__find(cmd, new BasicDBObject(), 0, -1, 0, options, readPrefs , getCollection("$cmd").__find(cmd, new BasicDBObject(), 0, -1, 0, options, readPrefs ,
@ -179,24 +267,40 @@ public abstract class DB {
DBObject res = i.next(); DBObject res = i.next();
ServerAddress sa = (i instanceof Result) ? ((Result) i).getServerAddress() : null; ServerAddress sa = (i instanceof Result) ? ((Result) i).getServerAddress() : null;
CommandResult cr = new CommandResult(cmd, sa); CommandResult cr = new CommandResult(sa);
cr.putAll( res ); cr.putAll( res );
return cr; return cr;
} }
// Only append $readPreference meta-operator if connected to a mongos, read preference is not primary
// or secondary preferred,
// and command is an instance of BasicDBObject. The last condition is unfortunate, but necessary in case
// the encoder is not capable of encoding a BasicDBObject
// Due to issues with compatibility between different versions of mongos, also wrap the command in a
// $query field, so that the $readPreference is not rejected
private DBObject wrapCommand(DBObject cmd, final ReadPreference readPrefs) {
if (getMongo().isMongosConnection() &&
!(ReadPreference.primary().equals(readPrefs) || ReadPreference.secondaryPreferred().equals(readPrefs)) &&
cmd instanceof BasicDBObject) {
cmd = new BasicDBObject("$query", cmd)
.append(QueryOpBuilder.READ_PREFERENCE_META_OPERATOR, readPrefs.toDBObject());
}
return cmd;
}
/** /**
* Executes a database command. * Executes a database command.
* @see <a href="http://mongodb.onconfluence.com/display/DOCS/List+of+Database+Commands">List of Commands</a> * @see <a href="http://mongodb.onconfluence.com/display/DOCS/List+of+Database+Commands">List of Commands</a>
* @param cmd dbobject representing the command to execute * @param cmd dbobject representing the command to execute
* @param options query options to use * @param options query options to use
* @return result of command from the database * @return result of command from the database
* @dochub commands
* @throws MongoException * @throws MongoException
* @dochub commands
*/ */
public CommandResult command( DBObject cmd , int options ) public CommandResult command( DBObject cmd , int options ){
throws MongoException { return command(cmd, options, getReadPreference());
return command(cmd, options, getReadPreference());
} }
/** /**
* Executes a database command. * Executes a database command.
* This method constructs a simple dbobject and calls {@link DB#command(com.massivecraft.mcore.xlib.mongodb.DBObject) } * This method constructs a simple dbobject and calls {@link DB#command(com.massivecraft.mcore.xlib.mongodb.DBObject) }
@ -204,9 +308,9 @@ public abstract class DB {
* @param cmd command to execute * @param cmd command to execute
* @return result of command from the database * @return result of command from the database
* @throws MongoException * @throws MongoException
* @dochub commands
*/ */
public CommandResult command( String cmd ) public CommandResult command( String cmd ){
throws MongoException {
return command( new BasicDBObject( cmd , Boolean.TRUE ) ); return command( new BasicDBObject( cmd , Boolean.TRUE ) );
} }
@ -218,9 +322,9 @@ public abstract class DB {
* @param options query options to use * @param options query options to use
* @return result of command from the database * @return result of command from the database
* @throws MongoException * @throws MongoException
* @dochub commands
*/ */
public CommandResult command( String cmd, int options ) public CommandResult command( String cmd, int options ){
throws MongoException {
return command( new BasicDBObject( cmd , Boolean.TRUE ), options ); return command( new BasicDBObject( cmd , Boolean.TRUE ), options );
} }
@ -232,8 +336,7 @@ public abstract class DB {
* @return The command result * @return The command result
* @throws MongoException * @throws MongoException
*/ */
public CommandResult doEval( String code , Object ... args ) public CommandResult doEval( String code , Object ... args ){
throws MongoException {
return command( BasicDBObjectBuilder.start() return command( BasicDBObjectBuilder.start()
.add( "$eval" , code ) .add( "$eval" , code )
@ -250,8 +353,7 @@ public abstract class DB {
* @return The object * @return The object
* @throws MongoException * @throws MongoException
*/ */
public Object eval( String code , Object ... args ) public Object eval( String code , Object ... args ){
throws MongoException {
CommandResult res = doEval( code , args ); CommandResult res = doEval( code , args );
res.throwOnError(); res.throwOnError();
@ -261,6 +363,7 @@ public abstract class DB {
/** /**
* Returns the result of "dbstats" command * Returns the result of "dbstats" command
* @return * @return
* @throws MongoException
*/ */
public CommandResult getStats() { public CommandResult getStats() {
return command("dbstats"); return command("dbstats");
@ -288,8 +391,7 @@ public abstract class DB {
* @return the names of collections in this database * @return the names of collections in this database
* @throws MongoException * @throws MongoException
*/ */
public Set<String> getCollectionNames() public Set<String> getCollectionNames(){
throws MongoException {
DBCollection namespaces = getCollection("system.namespaces"); DBCollection namespaces = getCollection("system.namespaces");
if (namespaces == null) if (namespaces == null)
@ -330,6 +432,7 @@ public abstract class DB {
* Checks to see if a collection by name %lt;name&gt; exists. * Checks to see if a collection by name %lt;name&gt; exists.
* @param collectionName The collection to test for existence * @param collectionName The collection to test for existence
* @return false if no collection by that name exists, true if a match to an existing collection was found * @return false if no collection by that name exists, true if a match to an existing collection was found
* @throws MongoException
*/ */
public boolean collectionExists(String collectionName) public boolean collectionExists(String collectionName)
{ {
@ -376,8 +479,7 @@ public abstract class DB {
* @return DBObject with error and status information * @return DBObject with error and status information
* @throws MongoException * @throws MongoException
*/ */
public CommandResult getLastError() public CommandResult getLastError(){
throws MongoException {
return command(new BasicDBObject("getlasterror", 1)); return command(new BasicDBObject("getlasterror", 1));
} }
@ -387,8 +489,7 @@ public abstract class DB {
* @return * @return
* @throws MongoException * @throws MongoException
*/ */
public CommandResult getLastError( com.massivecraft.mcore.xlib.mongodb.WriteConcern concern ) public CommandResult getLastError( com.massivecraft.mcore.xlib.mongodb.WriteConcern concern ){
throws MongoException {
return command( concern.getCommand() ); return command( concern.getCommand() );
} }
@ -400,8 +501,7 @@ public abstract class DB {
* @return The command result * @return The command result
* @throws MongoException * @throws MongoException
*/ */
public CommandResult getLastError( int w , int wtimeout , boolean fsync ) public CommandResult getLastError( int w , int wtimeout , boolean fsync ){
throws MongoException {
return command( (new com.massivecraft.mcore.xlib.mongodb.WriteConcern( w, wtimeout , fsync )).getCommand() ); return command( (new com.massivecraft.mcore.xlib.mongodb.WriteConcern( w, wtimeout , fsync )).getCommand() );
} }
@ -452,8 +552,7 @@ public abstract class DB {
* Drops this database. Removes all data on disk. Use with caution. * Drops this database. Removes all data on disk. Use with caution.
* @throws MongoException * @throws MongoException
*/ */
public void dropDatabase() public void dropDatabase(){
throws MongoException {
CommandResult res = command(new BasicDBObject("dropDatabase", 1)); CommandResult res = command(new BasicDBObject("dropDatabase", 1));
res.throwOnError(); res.throwOnError();
@ -467,102 +566,95 @@ public abstract class DB {
* @dochub authenticate * @dochub authenticate
*/ */
public boolean isAuthenticated() { public boolean isAuthenticated() {
return ( _username != null ); return getAuthenticationCredentials() != null;
} }
/** /**
* Authenticates to db with the given name and password * Authenticates to db with the given credentials. If this method (or {@code authenticateCommand} has already been
* called with the same credentials and the authentication test succeeded, this method will return true. If this method
* has already been called with different credentials and the authentication test succeeded,
* this method will throw an {@code IllegalStateException}. If this method has already been called with any credentials
* and the authentication test failed, this method will re-try the authentication test with the
* given credentials.
* *
* @param username name of user for this database * @param username name of user for this database
* @param passwd password of user for this database * @param password password of user for this database
* @return true if authenticated, false otherwise * @return true if authenticated, false otherwise
* @throws MongoException * @throws MongoException if authentication failed due to invalid user/pass, or other exceptions like I/O
* @throws IllegalStateException if authentiation test has already succeeded with different credentials
* @see #authenticateCommand(String, char[])
* @dochub authenticate * @dochub authenticate
*/ */
public boolean authenticate(String username, char[] passwd ) public boolean authenticate(String username, char[] password ){
throws MongoException { return authenticateCommandHelper(username, password).failure == null;
if ( username == null || passwd == null )
throw new NullPointerException( "username can't be null" );
if ( _username != null )
throw new IllegalStateException( "can't call authenticate twice on the same DBObject" );
String hash = _hash( username , passwd );
CommandResult res = _doauth( username , hash.getBytes() );
if ( !res.ok())
return false;
_username = username;
_authhash = hash.getBytes();
return true;
} }
/** /**
* Authenticates to db with the given name and password * Authenticates to db with the given credentials. If this method (or {@code authenticate} has already been
* called with the same credentials and the authentication test succeeded, this method will return true. If this method
* has already been called with different credentials and the authentication test succeeded,
* this method will throw an {@code IllegalStateException}. If this method has already been called with any credentials
* and the authentication test failed, this method will re-try the authentication test with the
* given credentials.
*
* *
* @param username name of user for this database * @param username name of user for this database
* @param passwd password of user for this database * @param password password of user for this database
* @return the CommandResult from authenticate command * @return the CommandResult from authenticate command
* @throws MongoException if authentication failed due to invalid user/pass, or other exceptions like I/O * @throws MongoException if authentication failed due to invalid user/pass, or other exceptions like I/O
* @throws IllegalStateException if authentiation test has already succeeded with different credentials
* @see #authenticate(String, char[])
* @dochub authenticate * @dochub authenticate
*/ */
public CommandResult authenticateCommand(String username, char[] passwd ) public synchronized CommandResult authenticateCommand(String username, char[] password ){
throws MongoException { CommandResultPair commandResultPair = authenticateCommandHelper(username, password);
if (commandResultPair.failure != null) {
if ( username == null || passwd == null ) throw commandResultPair.failure;
throw new NullPointerException( "username can't be null" ); }
return commandResultPair.result;
if ( _username != null )
throw new IllegalStateException( "can't call authenticate twice on the same DBObject" );
String hash = _hash( username , passwd );
CommandResult res = _doauth( username , hash.getBytes() );
res.throwOnError();
_username = username;
_authhash = hash.getBytes();
return res;
} }
/* private CommandResultPair authenticateCommandHelper(String username, char[] password) {
boolean reauth(){ MongoCredential credentials =
if ( _username == null || _authhash == null ) MongoCredential.createMongoCRCredential(username, getName(), password);
throw new IllegalStateException( "no auth info!" ); if (getAuthenticationCredentials() != null) {
return _doauth( _username , _authhash ); if (getAuthenticationCredentials().equals(credentials)) {
} if (authenticationTestCommandResult != null) {
*/ return new CommandResultPair(authenticationTestCommandResult);
}
} else {
throw new IllegalStateException("can't authenticate twice on the same database");
}
}
DBObject _authCommand( String nonce ){ try {
if ( _username == null || _authhash == null ) authenticationTestCommandResult = doAuthenticate(credentials);
throw new IllegalStateException( "no auth info!" ); return new CommandResultPair(authenticationTestCommandResult);
} catch (CommandFailureException commandFailureException) {
return _authCommand( nonce , _username , _authhash ); return new CommandResultPair(commandFailureException);
}
} }
static DBObject _authCommand( String nonce , String username , byte[] hash ){ class CommandResultPair {
String key = nonce + username + new String( hash ); CommandResult result;
CommandFailureException failure;
BasicDBObject cmd = new BasicDBObject(); public CommandResultPair(final CommandResult result) {
this.result = result;
}
cmd.put("authenticate", 1); public CommandResultPair(final CommandFailureException failure) {
cmd.put("user", username); this.failure = failure;
cmd.put("nonce", nonce); }
cmd.put("key", Util.hexMD5(key.getBytes()));
return cmd;
} }
private CommandResult _doauth( String username , byte[] hash ){ abstract CommandResult doAuthenticate(MongoCredential credentials);
CommandResult res = command(new BasicDBObject("getnonce", 1));
res.throwOnError();
DBObject cmd = _authCommand( res.getString( "nonce" ) , username , hash );
return command(cmd);
}
/** /**
* Adds a new user for this db * Adds a new user for this db
* @param username * @param username
* @param passwd * @param passwd
* @throws MongoException
*/ */
public WriteResult addUser( String username , char[] passwd ){ public WriteResult addUser( String username , char[] passwd ){
return addUser(username, passwd, false); return addUser(username, passwd, false);
@ -573,6 +665,7 @@ public abstract class DB {
* @param username * @param username
* @param passwd * @param passwd
* @param readOnly if true, user will only be able to read * @param readOnly if true, user will only be able to read
* @throws MongoException
*/ */
public WriteResult addUser( String username , char[] passwd, boolean readOnly ){ public WriteResult addUser( String username , char[] passwd, boolean readOnly ){
DBCollection c = getCollection( "system.users" ); DBCollection c = getCollection( "system.users" );
@ -587,6 +680,7 @@ public abstract class DB {
/** /**
* Removes a user for this db * Removes a user for this db
* @param username * @param username
* @throws MongoException
*/ */
public WriteResult removeUser( String username ){ public WriteResult removeUser( String username ){
DBCollection c = getCollection( "system.users" ); DBCollection c = getCollection( "system.users" );
@ -629,8 +723,7 @@ public abstract class DB {
* @return DBObject with error and status information * @return DBObject with error and status information
* @throws MongoException * @throws MongoException
*/ */
public CommandResult getPreviousError() public CommandResult getPreviousError(){
throws MongoException {
return command(new BasicDBObject("getpreverror", 1)); return command(new BasicDBObject("getpreverror", 1));
} }
@ -639,8 +732,7 @@ public abstract class DB {
* Used to clear all errors such that {@link DB#getPreviousError()} will return no error. * Used to clear all errors such that {@link DB#getPreviousError()} will return no error.
* @throws MongoException * @throws MongoException
*/ */
public void resetError() public void resetError(){
throws MongoException {
command(new BasicDBObject("reseterror", 1)); command(new BasicDBObject("reseterror", 1));
} }
@ -648,8 +740,7 @@ public abstract class DB {
* For testing purposes only - this method forces an error to help test error handling * For testing purposes only - this method forces an error to help test error handling
* @throws MongoException * @throws MongoException
*/ */
public void forceError() public void forceError(){
throws MongoException {
command(new BasicDBObject("forceerror", 1)); command(new BasicDBObject("forceerror", 1));
} }
@ -673,8 +764,8 @@ public abstract class DB {
/** /**
* Makes it possible to execute "read" queries on a slave node * Makes it possible to execute "read" queries on a slave node
* *
* @deprecated Replaced with ReadPreference.SECONDARY * @deprecated Replaced with {@code ReadPreference.secondaryPreferred()}
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference.SECONDARY * @see ReadPreference#secondaryPreferred()
*/ */
@Deprecated @Deprecated
public void slaveOk(){ public void slaveOk(){
@ -712,8 +803,11 @@ public abstract class DB {
return _options.get(); return _options.get();
} }
public abstract void cleanCursors( boolean force ) throws MongoException; public abstract void cleanCursors( boolean force );
MongoCredential getAuthenticationCredentials() {
return getMongo().getAuthority().getCredentialsStore().get(getName());
}
final Mongo _mongo; final Mongo _mongo;
final String _name; final String _name;
@ -723,7 +817,7 @@ public abstract class DB {
private com.massivecraft.mcore.xlib.mongodb.ReadPreference _readPref; private com.massivecraft.mcore.xlib.mongodb.ReadPreference _readPref;
final Bytes.OptionHolder _options; final Bytes.OptionHolder _options;
String _username; // cached authentication command result, to return in case of multiple calls to authenticateCommand with the
byte[] _authhash = null; // same credentials
private volatile CommandResult authenticationTestCommandResult;
} }

View File

@ -155,6 +155,7 @@ public class DBAddress extends ServerAddress {
* creates a DBAddress pointing to a different database on the same server * creates a DBAddress pointing to a different database on the same server
* @param name database name * @param name database name
* @return * @return
* @throws MongoException
*/ */
public DBAddress getSister( String name ){ public DBAddress getSister( String name ){
try { try {

View File

@ -18,31 +18,24 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import java.util.ArrayList; import com.massivecraft.mcore.xlib.bson.BSONObject;
import java.util.Collection; import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import java.util.Collections; import com.massivecraft.mcore.xlib.mongodb.util.JSON;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList; import java.util.*;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.logging.Level; import java.util.logging.Level;
import java.util.logging.Logger; import java.util.logging.Logger;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import com.massivecraft.mcore.xlib.mongodb.util.JSON;
/** Database API /** Database API
* This cannot be directly instantiated, but the functions are available * This cannot be directly instantiated, but the functions are available
* through instances of Mongo. * through instances of Mongo.
*/ */
public class DBApiLayer extends DB { public class DBApiLayer extends DB {
static final boolean D = Boolean.getBoolean( "DEBUG.DB" );
/** The maximum number of cursors allowed */ /** The maximum number of cursors allowed */
static final int NUM_CURSORS_BEFORE_KILL = 100; static final int NUM_CURSORS_BEFORE_KILL = 100;
static final int NUM_CURSORS_PER_BATCH = 20000; static final int NUM_CURSORS_PER_BATCH = 20000;
@ -52,18 +45,18 @@ public class DBApiLayer extends DB {
static final Logger TRACE_LOGGER = Logger.getLogger( "com.mongodb.TRACE" ); static final Logger TRACE_LOGGER = Logger.getLogger( "com.mongodb.TRACE" );
static final Level TRACE_LEVEL = Boolean.getBoolean( "DB.TRACE" ) ? Level.INFO : Level.FINEST; static final Level TRACE_LEVEL = Boolean.getBoolean( "DB.TRACE" ) ? Level.INFO : Level.FINEST;
static final boolean willTrace(){ static boolean willTrace(){
return TRACE_LOGGER.isLoggable( TRACE_LEVEL ); return TRACE_LOGGER.isLoggable( TRACE_LEVEL );
} }
static final void trace( String s ){ static void trace( String s ){
TRACE_LOGGER.log( TRACE_LEVEL , s ); TRACE_LOGGER.log( TRACE_LEVEL , s );
} }
static int chooseBatchSize(int batchSize, int limit, int fetched) { static int chooseBatchSize(int batchSize, int limit, int fetched) {
int bs = Math.abs(batchSize); int bs = Math.abs(batchSize);
int remaining = limit > 0 ? limit - fetched : 0; int remaining = limit > 0 ? limit - fetched : 0;
int res = 0; int res;
if (bs == 0 && remaining > 0) if (bs == 0 && remaining > 0)
res = remaining; res = remaining;
else if (bs > 0 && remaining == 0) else if (bs > 0 && remaining == 0)
@ -122,14 +115,12 @@ public class DBApiLayer extends DB {
return old != null ? old : c; return old != null ? old : c;
} }
String _removeRoot( String ns ){
if ( ! ns.startsWith( _rootPlusDot ) )
return ns;
return ns.substring( _root.length() + 1 );
}
public void cleanCursors( boolean force ) /**
throws MongoException { * @param force true if should clean regardless of number of dead cursors
* @throws MongoException
*/
public void cleanCursors( boolean force ){
int sz = _deadCursorIds.size(); int sz = _deadCursorIds.size();
@ -161,15 +152,11 @@ public class DBApiLayer extends DB {
} }
} }
void killCursors( ServerAddress addr , List<Long> all ) void killCursors( ServerAddress addr , List<Long> all ){
throws MongoException {
if ( all == null || all.size() == 0 ) if ( all == null || all.size() == 0 )
return; return;
OutMessage om = new OutMessage( _mongo , 2007 ); OutMessage om = OutMessage.killCursors(_mongo, Math.min( NUM_CURSORS_PER_BATCH , all.size()));
om.writeInt( 0 ); // reserved
om.writeInt( Math.min( NUM_CURSORS_PER_BATCH , all.size() ) );
int soFar = 0; int soFar = 0;
int totalSoFar = 0; int totalSoFar = 0;
@ -181,9 +168,7 @@ public class DBApiLayer extends DB {
if ( soFar >= NUM_CURSORS_PER_BATCH ){ if ( soFar >= NUM_CURSORS_PER_BATCH ){
_connector.say( this , om ,com.massivecraft.mcore.xlib.mongodb.WriteConcern.NONE ); _connector.say( this , om ,com.massivecraft.mcore.xlib.mongodb.WriteConcern.NONE );
om = new OutMessage( _mongo , 2007 ); om = OutMessage.killCursors(_mongo, Math.min( NUM_CURSORS_PER_BATCH , all.size() - totalSoFar));
om.writeInt( 0 ); // reserved
om.writeInt( Math.min( NUM_CURSORS_PER_BATCH , all.size() - totalSoFar ) );
soFar = 0; soFar = 0;
} }
} }
@ -191,6 +176,11 @@ public class DBApiLayer extends DB {
_connector.say( this , om ,com.massivecraft.mcore.xlib.mongodb.WriteConcern.NONE , addr ); _connector.say( this , om ,com.massivecraft.mcore.xlib.mongodb.WriteConcern.NONE , addr );
} }
@Override
CommandResult doAuthenticate(MongoCredential credentials) {
return _connector.authenticate(credentials);
}
class MyCollection extends DBCollection { class MyCollection extends DBCollection {
MyCollection( String name ){ MyCollection( String name ){
super( DBApiLayer.this , name ); super( DBApiLayer.this , name );
@ -201,36 +191,38 @@ public class DBApiLayer extends DB {
} }
@Override @Override
public void drop() throws MongoException { public void drop(){
_collections.remove(getName()); _collections.remove(getName());
super.drop(); super.drop();
} }
public WriteResult insert(DBObject[] arr, com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ) public WriteResult insert(List<DBObject> list, com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ){
throws MongoException {
return insert( arr, true, concern, encoder ); if (concern == null) {
throw new IllegalArgumentException("Write concern can not be null");
}
return insert(list, true, concern, encoder);
} }
protected WriteResult insert(DBObject[] arr, boolean shouldApply , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ) protected WriteResult insert(List<DBObject> list, boolean shouldApply , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ){
throws MongoException {
if (encoder == null) if (encoder == null)
encoder = DefaultDBEncoder.FACTORY.create(); encoder = DefaultDBEncoder.FACTORY.create();
if ( willTrace() ) { if ( willTrace() ) {
for (DBObject o : arr) { for (DBObject o : list) {
trace( "save: " + _fullNameSpace + " " + JSON.serialize( o ) ); trace( "save: " + _fullNameSpace + " " + JSON.serialize( o ) );
} }
} }
if ( shouldApply ){ if ( shouldApply ){
for ( int i=0; i<arr.length; i++ ){ for (DBObject o : list) {
DBObject o=arr[i]; apply(o);
apply( o ); _checkObject(o, false, false);
_checkObject( o , false , false ); Object id = o.get("_id");
Object id = o.get( "_id" ); if (id instanceof ObjectId) {
if ( id instanceof ObjectId ){ ((ObjectId) id).notNew();
((ObjectId)id).notNew();
} }
} }
} }
@ -239,16 +231,12 @@ public class DBApiLayer extends DB {
int cur = 0; int cur = 0;
int maxsize = _mongo.getMaxBsonObjectSize(); int maxsize = _mongo.getMaxBsonObjectSize();
while ( cur < arr.length ){ while ( cur < list.size() ) {
OutMessage om = new OutMessage( _mongo , 2002, encoder );
int flags = 0; OutMessage om = OutMessage.insert( this , encoder, concern );
if ( concern.getContinueOnErrorForInsert() ) flags |= 1;
om.writeInt( flags );
om.writeCString( _fullNameSpace );
for ( ; cur<arr.length; cur++ ){ for ( ; cur < list.size(); cur++ ){
DBObject o = arr[cur]; DBObject o = list.get(cur);
om.putObject( o ); om.putObject( o );
// limit for batch insert is 4 x maxbson on server, use 2 x to be safe // limit for batch insert is 4 x maxbson on server, use 2 x to be safe
@ -264,50 +252,38 @@ public class DBApiLayer extends DB {
return last; return last;
} }
public WriteResult remove( DBObject o , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ) public WriteResult remove( DBObject o , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ){
throws MongoException {
if (concern == null) {
throw new IllegalArgumentException("Write concern can not be null");
}
if (encoder == null) if (encoder == null)
encoder = DefaultDBEncoder.FACTORY.create(); encoder = DefaultDBEncoder.FACTORY.create();
if ( willTrace() ) trace( "remove: " + _fullNameSpace + " " + JSON.serialize( o ) ); if ( willTrace() ) trace( "remove: " + _fullNameSpace + " " + JSON.serialize( o ) );
OutMessage om = new OutMessage( _mongo , 2006, encoder ); OutMessage om = OutMessage.remove(this, encoder, o);
om.writeInt( 0 ); // reserved
om.writeCString( _fullNameSpace );
Collection<String> keys = o.keySet();
if ( keys.size() == 1 &&
keys.iterator().next().equals( "_id" ) &&
o.get( keys.iterator().next() ) instanceof ObjectId )
om.writeInt( 1 );
else
om.writeInt( 0 );
om.putObject( o );
return _connector.say( _db , om , concern ); return _connector.say( _db , om , concern );
} }
@Override @Override
Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize, int limit , int options, ReadPreference readPref, DBDecoder decoder ) Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize, int limit , int options, ReadPreference readPref, DBDecoder decoder ){
throws MongoException {
return __find(ref, fields, numToSkip, batchSize, limit, options, readPref, decoder, DefaultDBEncoder.FACTORY.create()); return __find(ref, fields, numToSkip, batchSize, limit, options, readPref, decoder, DefaultDBEncoder.FACTORY.create());
} }
@Override @Override
Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options, Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options,
ReadPreference readPref, DBDecoder decoder, DBEncoder encoder ) throws MongoException { ReadPreference readPref, DBDecoder decoder, DBEncoder encoder ){
if ( ref == null ) if ( ref == null )
ref = new BasicDBObject(); ref = new BasicDBObject();
if ( willTrace() ) trace( "find: " + _fullNameSpace + " " + JSON.serialize( ref ) ); if ( willTrace() ) trace( "find: " + _fullNameSpace + " " + JSON.serialize( ref ) );
OutMessage query = OutMessage.query( _mongo , options , _fullNameSpace , numToSkip , chooseBatchSize(batchSize, limit, 0) , ref , fields, readPref, OutMessage query = OutMessage.query( this , options , numToSkip , chooseBatchSize(batchSize, limit, 0) , ref , fields, readPref,
encoder); encoder);
Response res = _connector.call( _db , this , query , null , 2, readPref, decoder ); Response res = _connector.call( _db , this , query , null , 2, readPref, decoder );
@ -323,38 +299,36 @@ public class DBApiLayer extends DB {
} }
@Override @Override
public WriteResult update( DBObject query , DBObject o , boolean upsert , boolean multi , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ) public WriteResult update( DBObject query , DBObject o , boolean upsert , boolean multi , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ){
throws MongoException {
if (o == null) {
throw new IllegalArgumentException("update can not be null");
}
if (concern == null) {
throw new IllegalArgumentException("Write concern can not be null");
}
if (encoder == null) if (encoder == null)
encoder = DefaultDBEncoder.FACTORY.create(); encoder = DefaultDBEncoder.FACTORY.create();
if (o != null && !o.keySet().isEmpty()) { if (!o.keySet().isEmpty()) {
// if 1st key doesn't start with $, then object will be inserted as is, need to check it // if 1st key doesn't start with $, then object will be inserted as is, need to check it
String key = o.keySet().iterator().next(); String key = o.keySet().iterator().next();
if (!key.startsWith("$")) if (!key.startsWith("$"))
_checkObject(o, false, false); _checkObject(o, false, false);
} }
if ( willTrace() ) trace( "update: " + _fullNameSpace + " " + JSON.serialize( query ) + " " + JSON.serialize( o ) ); if ( willTrace() ) {
trace( "update: " + _fullNameSpace + " " + JSON.serialize( query ) + " " + JSON.serialize( o ) );
}
OutMessage om = new OutMessage( _mongo , 2001, encoder ); OutMessage om = OutMessage.update(this, encoder, upsert, multi, query, o);
om.writeInt( 0 ); // reserved
om.writeCString( _fullNameSpace );
int flags = 0;
if ( upsert ) flags |= 1;
if ( multi ) flags |= 2;
om.writeInt( flags );
om.putObject( query );
om.putObject( o );
return _connector.say( _db , om , concern ); return _connector.say( _db , om , concern );
} }
public void createIndex( final DBObject keys, final DBObject options, DBEncoder encoder ) public void createIndex( final DBObject keys, final DBObject options, DBEncoder encoder ){
throws MongoException {
if (encoder == null) if (encoder == null)
encoder = DefaultDBEncoder.FACTORY.create(); encoder = DefaultDBEncoder.FACTORY.create();
@ -367,7 +341,7 @@ public class DBApiLayer extends DB {
MyCollection idxs = DBApiLayer.this.doGetCollection( "system.indexes" ); MyCollection idxs = DBApiLayer.this.doGetCollection( "system.indexes" );
//query first, maybe we should do an update w/upsert? -- need to test performance and lock behavior //query first, maybe we should do an update w/upsert? -- need to test performance and lock behavior
if ( idxs.findOne( full ) == null ) if ( idxs.findOne( full ) == null )
idxs.insert( new DBObject[] { full }, false, WriteConcern.SAFE, encoder ); idxs.insert(Arrays.asList(full), false, WriteConcern.SAFE, encoder);
} }
final String _fullNameSpace; final String _fullNameSpace;
@ -383,19 +357,22 @@ public class DBApiLayer extends DB {
_host = res._host; _host = res._host;
_decoder = decoder; _decoder = decoder;
init( res ); init( res );
// Only enable finalizer if cursor finalization is enabled and there is actually a cursor that needs killing
_optionalFinalizer = _mongo.getMongoOptions().isCursorFinalizerEnabled() && res.cursor() != 0 ?
new OptionalFinalizer() : null;
} }
private void init( Response res ){ private void init( Response res ){
if ( ( res._flags & Bytes.RESULTFLAG_CURSORNOTFOUND ) > 0 ){
throw new MongoException.CursorNotFound(_curResult.cursor(), res.serverUsed());
}
_totalBytes += res._len; _totalBytes += res._len;
_curResult = res; _curResult = res;
_cur = res.iterator(); _cur = res.iterator();
_sizes.add( res.size() ); _sizes.add( res.size() );
_numFetched += res.size(); _numFetched += res.size();
if ( ( res._flags & Bytes.RESULTFLAG_CURSORNOTFOUND ) > 0 ){
throw new MongoException.CursorNotFound(res._cursor, res.serverUsed());
}
if (res._cursor != 0 && _limit > 0 && _limit - _numFetched <= 0) { if (res._cursor != 0 && _limit > 0 && _limit - _numFetched <= 0) {
// fetched all docs within limit, close cursor server-side // fetched all docs within limit, close cursor server-side
killCursor(); killCursor();
@ -408,7 +385,7 @@ public class DBApiLayer extends DB {
} }
if ( ! _curResult.hasGetMore( _options ) ) if ( ! _curResult.hasGetMore( _options ) )
throw new RuntimeException( "no more" ); throw new NoSuchElementException("no more");
_advance(); _advance();
return next(); return next();
@ -433,7 +410,8 @@ public class DBApiLayer extends DB {
if ((_curResult._flags & Bytes.RESULTFLAG_AWAITCAPABLE) == 0) { if ((_curResult._flags & Bytes.RESULTFLAG_AWAITCAPABLE) == 0) {
try { try {
Thread.sleep(500); Thread.sleep(500);
} catch (Exception e) { } catch (InterruptedException e) {
throw new MongoInterruptedException(e);
} }
} }
} }
@ -447,12 +425,8 @@ public class DBApiLayer extends DB {
if ( _curResult.cursor() <= 0 ) if ( _curResult.cursor() <= 0 )
throw new RuntimeException( "can't advance a cursor <= 0" ); throw new RuntimeException( "can't advance a cursor <= 0" );
OutMessage m = new OutMessage( _mongo , 2005 ); OutMessage m = OutMessage.getMore(_collection, _curResult.cursor(),
chooseBatchSize(_batchSize, _limit, _numFetched));
m.writeInt( 0 );
m.writeCString( _collection._fullNameSpace );
m.writeInt( chooseBatchSize(_batchSize, _limit, _numFetched) );
m.writeLong( _curResult.cursor() );
Response res = _connector.call( DBApiLayer.this , _collection , m , _host, _decoder ); Response res = _connector.call( DBApiLayer.this , _collection , m , _host, _decoder );
_numGetMores++; _numGetMores++;
@ -475,18 +449,6 @@ public class DBApiLayer extends DB {
return "DBCursor"; return "DBCursor";
} }
protected void finalize() throws Throwable {
if (_curResult != null) {
long curId = _curResult.cursor();
_curResult = null;
_cur = null;
if (curId != 0) {
_deadCursorIds.add(new DeadCursor(curId, _host));
}
}
super.finalize();
}
public long totalBytes(){ public long totalBytes(){
return _totalBytes; return _totalBytes;
} }
@ -537,6 +499,10 @@ public class DBApiLayer extends DB {
return _host; return _host;
} }
boolean hasFinalizer() {
return _optionalFinalizer != null;
}
Response _curResult; Response _curResult;
Iterator<DBObject> _cur; Iterator<DBObject> _cur;
int _batchSize; int _batchSize;
@ -551,6 +517,23 @@ public class DBApiLayer extends DB {
private List<Integer> _sizes = new ArrayList<Integer>(); private List<Integer> _sizes = new ArrayList<Integer>();
private int _numFetched = 0; private int _numFetched = 0;
// This allows us to easily enable/disable finalizer for cleaning up un-closed cursors
private final OptionalFinalizer _optionalFinalizer;
private class OptionalFinalizer {
@Override
protected void finalize() {
if (_curResult != null) {
long curId = _curResult.cursor();
_curResult = null;
_cur = null;
if (curId != 0) {
_deadCursorIds.add(new DeadCursor(curId, _host));
}
}
}
}
} // class Result } // class Result
static class DeadCursor { static class DeadCursor {
@ -571,5 +554,4 @@ public class DBApiLayer extends DB {
ConcurrentLinkedQueue<DeadCursor> _deadCursorIds = new ConcurrentLinkedQueue<DeadCursor>(); ConcurrentLinkedQueue<DeadCursor> _deadCursorIds = new ConcurrentLinkedQueue<DeadCursor>();
static final List<DBObject> EMPTY = Collections.unmodifiableList( new LinkedList<DBObject>() );
} }

View File

@ -19,7 +19,12 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
// Mongo // Mongo
import com.massivecraft.mcore.xlib.bson.LazyDBList;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
@ -28,18 +33,16 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
/** This class provides a skeleton implementation of a database collection. /** This class provides a skeleton implementation of a database collection.
* <p>A typical invocation sequence is thus * <p>A typical invocation sequence is thus
* <blockquote><pre> * <blockquote><pre>
* Mongo mongo = new Mongo( new DBAddress( "localhost", 127017 ) ); * MongoClient mongoClient = new MongoClient(new ServerAddress("localhost", 27017));
* DB db = mongo.getDB( "mydb" ); * DB db = mongo.getDB("mydb");
* DBCollection collection = db.getCollection( "test" ); * DBCollection collection = db.getCollection("test");
* </pre></blockquote> * </pre></blockquote>
* @dochub collections * @dochub collections
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings({"unchecked", "rawtypes"})
public abstract class DBCollection { public abstract class DBCollection {
/** /**
@ -53,7 +56,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub insert * @dochub insert
*/ */
public WriteResult insert(DBObject[] arr , WriteConcern concern ) throws MongoException { public WriteResult insert(DBObject[] arr , WriteConcern concern ){
return insert( arr, concern, getDBEncoder()); return insert( arr, concern, getDBEncoder());
} }
@ -69,7 +72,9 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub insert * @dochub insert
*/ */
public abstract WriteResult insert(DBObject[] arr , WriteConcern concern, DBEncoder encoder) throws MongoException; public WriteResult insert(DBObject[] arr , WriteConcern concern, DBEncoder encoder) {
return insert(Arrays.asList(arr), concern, encoder);
}
/** /**
* Inserts a document into the database. * Inserts a document into the database.
@ -82,9 +87,8 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub insert * @dochub insert
*/ */
public WriteResult insert(DBObject o , WriteConcern concern ) public WriteResult insert(DBObject o , WriteConcern concern ){
throws MongoException { return insert( Arrays.asList(o) , concern );
return insert( new DBObject[]{ o } , concern );
} }
/** /**
@ -97,8 +101,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub insert * @dochub insert
*/ */
public WriteResult insert(DBObject ... arr) public WriteResult insert(DBObject ... arr){
throws MongoException {
return insert( arr , getWriteConcern() ); return insert( arr , getWriteConcern() );
} }
@ -112,8 +115,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub insert * @dochub insert
*/ */
public WriteResult insert(WriteConcern concern, DBObject ... arr) public WriteResult insert(WriteConcern concern, DBObject ... arr){
throws MongoException {
return insert( arr, concern ); return insert( arr, concern );
} }
@ -127,8 +129,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub insert * @dochub insert
*/ */
public WriteResult insert(List<DBObject> list ) public WriteResult insert(List<DBObject> list ){
throws MongoException {
return insert( list, getWriteConcern() ); return insert( list, getWriteConcern() );
} }
@ -143,11 +144,23 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub insert * @dochub insert
*/ */
public WriteResult insert(List<DBObject> list, WriteConcern concern ) public WriteResult insert(List<DBObject> list, WriteConcern concern ){
throws MongoException { return insert(list, concern, getDBEncoder() );
return insert( list.toArray( new DBObject[list.size()] ) , concern );
} }
/**
* Saves document(s) to the database.
* if doc doesn't have an _id, one will be added
* you can get the _id that was added from doc after the insert
*
* @param list list of documents to save
* @param concern the write concern
* @return
* @throws MongoException
* @dochub insert
*/
public abstract WriteResult insert(List<DBObject> list, WriteConcern concern, DBEncoder encoder);
/** /**
* Performs an update operation. * Performs an update operation.
* @param q search query for old object to update * @param q search query for old object to update
@ -161,7 +174,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub update * @dochub update
*/ */
public WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi , WriteConcern concern ) throws MongoException { public WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi , WriteConcern concern ){
return update( q, o, upsert, multi, concern, getDBEncoder()); return update( q, o, upsert, multi, concern, getDBEncoder());
} }
@ -179,7 +192,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub update * @dochub update
*/ */
public abstract WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi , WriteConcern concern, DBEncoder encoder ) throws MongoException ; public abstract WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi , WriteConcern concern, DBEncoder encoder );
/** /**
* calls {@link DBCollection#update(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, boolean, boolean, com.massivecraft.mcore.xlib.mongodb.WriteConcern)} with default WriteConcern. * calls {@link DBCollection#update(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, boolean, boolean, com.massivecraft.mcore.xlib.mongodb.WriteConcern)} with default WriteConcern.
@ -192,8 +205,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub update * @dochub update
*/ */
public WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi ) public WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi ){
throws MongoException {
return update( q , o , upsert , multi , getWriteConcern() ); return update( q , o , upsert , multi , getWriteConcern() );
} }
@ -205,7 +217,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub update * @dochub update
*/ */
public WriteResult update( DBObject q , DBObject o ) throws MongoException { public WriteResult update( DBObject q , DBObject o ){
return update( q , o , false , false ); return update( q , o , false , false );
} }
@ -217,7 +229,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub update * @dochub update
*/ */
public WriteResult updateMulti( DBObject q , DBObject o ) throws MongoException { public WriteResult updateMulti( DBObject q , DBObject o ){
return update( q , o , false , true ); return update( q , o , false , true );
} }
@ -235,7 +247,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub remove * @dochub remove
*/ */
public WriteResult remove( DBObject o , WriteConcern concern ) throws MongoException { public WriteResult remove( DBObject o , WriteConcern concern ){
return remove( o, concern, getDBEncoder()); return remove( o, concern, getDBEncoder());
} }
@ -248,7 +260,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub remove * @dochub remove
*/ */
public abstract WriteResult remove( DBObject o , WriteConcern concern, DBEncoder encoder ) throws MongoException ; public abstract WriteResult remove( DBObject o , WriteConcern concern, DBEncoder encoder );
/** /**
* calls {@link DBCollection#remove(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.WriteConcern)} with the default WriteConcern * calls {@link DBCollection#remove(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.WriteConcern)} with the default WriteConcern
@ -257,8 +269,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub remove * @dochub remove
*/ */
public WriteResult remove( DBObject o ) public WriteResult remove( DBObject o ){
throws MongoException {
return remove( o , getWriteConcern() ); return remove( o , getWriteConcern() );
} }
@ -266,10 +277,10 @@ public abstract class DBCollection {
/** /**
* Finds objects * Finds objects
*/ */
abstract Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options, ReadPreference readPref, DBDecoder decoder ) throws MongoException ; abstract Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options, ReadPreference readPref, DBDecoder decoder );
abstract Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options, abstract Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options,
ReadPreference readPref, DBDecoder decoder, DBEncoder encoder ) throws MongoException ; ReadPreference readPref, DBDecoder decoder, DBEncoder encoder );
/** /**
@ -284,7 +295,7 @@ public abstract class DBCollection {
* @dochub find * @dochub find
*/ */
@Deprecated @Deprecated
public DBCursor find( DBObject query , DBObject fields , int numToSkip , int batchSize , int options ) throws MongoException{ public DBCursor find( DBObject query , DBObject fields , int numToSkip , int batchSize , int options ){
return find(query, fields, numToSkip, batchSize).addOption(options); return find(query, fields, numToSkip, batchSize).addOption(options);
} }
@ -317,8 +328,7 @@ public abstract class DBCollection {
* @return the object, if found, otherwise <code>null</code> * @return the object, if found, otherwise <code>null</code>
* @throws MongoException * @throws MongoException
*/ */
public DBObject findOne( Object obj ) public DBObject findOne( Object obj ){
throws MongoException {
return findOne(obj, null); return findOne(obj, null);
} }
@ -330,9 +340,10 @@ public abstract class DBCollection {
* @param obj any valid object * @param obj any valid object
* @param fields fields to return * @param fields fields to return
* @return the object, if found, otherwise <code>null</code> * @return the object, if found, otherwise <code>null</code>
* @throws MongoException
* @dochub find * @dochub find
*/ */
public DBObject findOne( Object obj, DBObject fields ) { public DBObject findOne( Object obj, DBObject fields ){
Iterator<DBObject> iterator = __find( new BasicDBObject("_id", obj), fields, 0, -1, 0, getOptions(), getReadPreference(), getDecoder() ); Iterator<DBObject> iterator = __find( new BasicDBObject("_id", obj), fields, 0, -1, 0, getOptions(), getReadPreference(), getDecoder() );
return (iterator.hasNext() ? iterator.next() : null); return (iterator.hasNext() ? iterator.next() : null);
} }
@ -347,8 +358,9 @@ public abstract class DBCollection {
* @param returnNew if true, the updated document is returned, otherwise the old document is returned (or it would be lost forever) * @param returnNew if true, the updated document is returned, otherwise the old document is returned (or it would be lost forever)
* @param upsert do upsert (insert if document not present) * @param upsert do upsert (insert if document not present)
* @return the document * @return the document
* @throws MongoException
*/ */
public DBObject findAndModify(DBObject query, DBObject fields, DBObject sort, boolean remove, DBObject update, boolean returnNew, boolean upsert) { public DBObject findAndModify(DBObject query, DBObject fields, DBObject sort, boolean remove, DBObject update, boolean returnNew, boolean upsert){
BasicDBObject cmd = new BasicDBObject( "findandmodify", _name); BasicDBObject cmd = new BasicDBObject( "findandmodify", _name);
if (query != null && !query.keySet().isEmpty()) if (query != null && !query.keySet().isEmpty())
@ -362,7 +374,7 @@ public abstract class DBCollection {
cmd.append( "remove", remove ); cmd.append( "remove", remove );
else { else {
if (update != null && !update.keySet().isEmpty()) { if (update != null && !update.keySet().isEmpty()) {
// if 1st key doesnt start with $, then object will be inserted as is, need to check it // if 1st key doesn't start with $, then object will be inserted as is, need to check it
String key = update.keySet().iterator().next(); String key = update.keySet().iterator().next();
if (key.charAt(0) != '$') if (key.charAt(0) != '$')
_checkObject(update, false, false); _checkObject(update, false, false);
@ -423,8 +435,9 @@ public abstract class DBCollection {
* @param sort * @param sort
* @param update * @param update
* @return the old document * @return the old document
* @throws MongoException
*/ */
public DBObject findAndModify( DBObject query , DBObject sort , DBObject update){ public DBObject findAndModify( DBObject query , DBObject sort , DBObject update) {
return findAndModify( query, null, sort, false, update, false, false); return findAndModify( query, null, sort, false, update, false, false);
} }
@ -434,8 +447,9 @@ public abstract class DBCollection {
* @param query * @param query
* @param update * @param update
* @return the old document * @return the old document
* @throws MongoException
*/ */
public DBObject findAndModify( DBObject query , DBObject update ) { public DBObject findAndModify( DBObject query , DBObject update ){
return findAndModify( query, null, null, false, update, false, false ); return findAndModify( query, null, null, false, update, false, false );
} }
@ -444,6 +458,7 @@ public abstract class DBCollection {
* with fields=null, sort=null, remove=true, returnNew=false, upsert=false * with fields=null, sort=null, remove=true, returnNew=false, upsert=false
* @param query * @param query
* @return the removed document * @return the removed document
* @throws MongoException
*/ */
public DBObject findAndRemove( DBObject query ) { public DBObject findAndRemove( DBObject query ) {
return findAndModify( query, null, null, true, null, false, false ); return findAndModify( query, null, null, true, null, false, false );
@ -456,8 +471,7 @@ public abstract class DBCollection {
* @param keys an object with a key set of the fields desired for the index * @param keys an object with a key set of the fields desired for the index
* @throws MongoException * @throws MongoException
*/ */
public void createIndex( final DBObject keys ) public void createIndex( final DBObject keys ){
throws MongoException {
createIndex( keys , defaultOptions( keys ) ); createIndex( keys , defaultOptions( keys ) );
} }
@ -467,7 +481,7 @@ public abstract class DBCollection {
* @param options * @param options
* @throws MongoException * @throws MongoException
*/ */
public void createIndex( DBObject keys , DBObject options ) throws MongoException { public void createIndex( DBObject keys , DBObject options ){
createIndex( keys, options, getDBEncoder()); createIndex( keys, options, getDBEncoder());
} }
@ -478,11 +492,12 @@ public abstract class DBCollection {
* @param encoder the DBEncoder to use * @param encoder the DBEncoder to use
* @throws MongoException * @throws MongoException
*/ */
public abstract void createIndex( DBObject keys , DBObject options, DBEncoder encoder ) throws MongoException; public abstract void createIndex( DBObject keys , DBObject options, DBEncoder encoder );
/** /**
* Creates an ascending index on a field with default options, if one does not already exist. * Creates an ascending index on a field with default options, if one does not already exist.
* @param name name of field to index on * @param name name of field to index on
* @throws MongoException
*/ */
public void ensureIndex( final String name ){ public void ensureIndex( final String name ){
ensureIndex( new BasicDBObject( name , 1 ) ); ensureIndex( new BasicDBObject( name , 1 ) );
@ -493,8 +508,7 @@ public abstract class DBCollection {
* @param keys an object with a key set of the fields desired for the index * @param keys an object with a key set of the fields desired for the index
* @throws MongoException * @throws MongoException
*/ */
public void ensureIndex( final DBObject keys ) public void ensureIndex( final DBObject keys ){
throws MongoException {
ensureIndex( keys , defaultOptions( keys ) ); ensureIndex( keys , defaultOptions( keys ) );
} }
@ -505,8 +519,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub indexes * @dochub indexes
*/ */
public void ensureIndex( DBObject keys , String name ) public void ensureIndex( DBObject keys , String name ){
throws MongoException {
ensureIndex( keys , name , false ); ensureIndex( keys , name , false );
} }
@ -517,8 +530,7 @@ public abstract class DBCollection {
* @param unique if the index should be unique * @param unique if the index should be unique
* @throws MongoException * @throws MongoException
*/ */
public void ensureIndex( DBObject keys , String name , boolean unique ) public void ensureIndex( DBObject keys , String name , boolean unique ){
throws MongoException {
DBObject options = defaultOptions( keys ); DBObject options = defaultOptions( keys );
if (name != null && name.length()>0) if (name != null && name.length()>0)
options.put( "name" , name ); options.put( "name" , name );
@ -533,8 +545,7 @@ public abstract class DBCollection {
* @param optionsIN options for the index (name, unique, etc) * @param optionsIN options for the index (name, unique, etc)
* @throws MongoException * @throws MongoException
*/ */
public void ensureIndex( final DBObject keys , final DBObject optionsIN ) public void ensureIndex( final DBObject keys , final DBObject optionsIN ){
throws MongoException {
if ( checkReadOnly( false ) ) return; if ( checkReadOnly( false ) ) return;
@ -645,8 +656,7 @@ public abstract class DBCollection {
* @return the object found, or <code>null</code> if the collection is empty * @return the object found, or <code>null</code> if the collection is empty
* @throws MongoException * @throws MongoException
*/ */
public DBObject findOne() public DBObject findOne(){
throws MongoException {
return findOne( new BasicDBObject() ); return findOne( new BasicDBObject() );
} }
@ -656,9 +666,8 @@ public abstract class DBCollection {
* @return the object found, or <code>null</code> if no such object exists * @return the object found, or <code>null</code> if no such object exists
* @throws MongoException * @throws MongoException
*/ */
public DBObject findOne( DBObject o ) public DBObject findOne( DBObject o ){
throws MongoException { return findOne( o, null, null, getReadPreference());
return findOne( o, null, getReadPreference());
} }
/** /**
@ -666,20 +675,58 @@ public abstract class DBCollection {
* @param o the query object * @param o the query object
* @param fields fields to return * @param fields fields to return
* @return the object found, or <code>null</code> if no such object exists * @return the object found, or <code>null</code> if no such object exists
* @throws MongoException
* @dochub find * @dochub find
*/ */
public DBObject findOne( DBObject o, DBObject fields ) { public DBObject findOne( DBObject o, DBObject fields ) {
return findOne( o, fields, getReadPreference()); return findOne( o, fields, null, getReadPreference());
} }
/**
* Returns a single obejct from this collection matching the query.
* @param o the query object
* @param fields fields to return
* @param orderBy fields to order by
* @return the object found, or <code>null</code> if no such object exists
* @throws MongoException
* @dochub find
*/
public DBObject findOne( DBObject o, DBObject fields, DBObject orderBy){
return findOne(o, fields, orderBy, getReadPreference());
}
/** /**
* Returns a single object from this collection matching the query. * Returns a single object from this collection matching the query.
* @param o the query object * @param o the query object
* @param fields fields to return * @param fields fields to return
* @param readPref
* @return the object found, or <code>null</code> if no such object exists * @return the object found, or <code>null</code> if no such object exists
* @throws MongoException
* @dochub find * @dochub find
*/ */
public DBObject findOne( DBObject o, DBObject fields, ReadPreference readPref ) { public DBObject findOne( DBObject o, DBObject fields, ReadPreference readPref ){
Iterator<DBObject> i = __find( o , fields , 0 , -1 , 0, getOptions(), readPref, getDecoder() ); return findOne(o, fields, null, readPref);
}
/**
* Returns a single object from this collection matching the query.
* @param o the query object
* @param fields fields to return
* @param orderBy fields to order by
* @return the object found, or <code>null</code> if no such object exists
* @throws MongoException
* @dochub find
*/
public DBObject findOne( DBObject o, DBObject fields, DBObject orderBy, ReadPreference readPref ){
QueryOpBuilder queryOpBuilder = new QueryOpBuilder().addQuery(o).addOrderBy(orderBy);
if (getDB().getMongo().isMongosConnection()) {
queryOpBuilder.addReadPreference(readPref.toDBObject());
}
Iterator<DBObject> i = __find(queryOpBuilder.get(), fields , 0 , -1 , 0, getOptions(), readPref, getDecoder() );
DBObject obj = (i.hasNext() ? i.next() : null); DBObject obj = (i.hasNext() ? i.next() : null);
if ( obj != null && ( fields != null && fields.keySet().size() > 0 ) ){ if ( obj != null && ( fields != null && fields.keySet().size() > 0 ) ){
obj.markAsPartialObject(); obj.markAsPartialObject();
@ -733,8 +780,9 @@ public abstract class DBCollection {
* @param jo the <code>DBObject</code> to save * @param jo the <code>DBObject</code> to save
* will add <code>_id</code> field to jo if needed * will add <code>_id</code> field to jo if needed
* @return * @return
* @throws MongoException
*/ */
public WriteResult save( DBObject jo ) { public WriteResult save( DBObject jo ){
return save(jo, getWriteConcern()); return save(jo, getWriteConcern());
} }
@ -745,8 +793,7 @@ public abstract class DBCollection {
* @return * @return
* @throws MongoException * @throws MongoException
*/ */
public WriteResult save( DBObject jo, WriteConcern concern ) public WriteResult save( DBObject jo, WriteConcern concern ){
throws MongoException {
if ( checkReadOnly( true ) ) if ( checkReadOnly( true ) )
return null; return null;
@ -777,8 +824,7 @@ public abstract class DBCollection {
* Drops all indices from this collection * Drops all indices from this collection
* @throws MongoException * @throws MongoException
*/ */
public void dropIndexes() public void dropIndexes(){
throws MongoException {
dropIndexes( "*" ); dropIndexes( "*" );
} }
@ -788,8 +834,7 @@ public abstract class DBCollection {
* @param name the index name * @param name the index name
* @throws MongoException * @throws MongoException
*/ */
public void dropIndexes( String name ) public void dropIndexes( String name ){
throws MongoException {
DBObject cmd = BasicDBObjectBuilder.start() DBObject cmd = BasicDBObjectBuilder.start()
.add( "deleteIndexes" , getName() ) .add( "deleteIndexes" , getName() )
.add( "index" , name ) .add( "index" , name )
@ -806,8 +851,7 @@ public abstract class DBCollection {
* Drops (deletes) this collection. Use with care. * Drops (deletes) this collection. Use with care.
* @throws MongoException * @throws MongoException
*/ */
public void drop() public void drop(){
throws MongoException {
resetIndexCache(); resetIndexCache();
CommandResult res =_db.command( BasicDBObjectBuilder.start().add( "drop" , getName() ).get() ); CommandResult res =_db.command( BasicDBObjectBuilder.start().add( "drop" , getName() ).get() );
if (res.ok() || res.getErrorMessage().equals( "ns not found" )) if (res.ok() || res.getErrorMessage().equals( "ns not found" ))
@ -820,8 +864,7 @@ public abstract class DBCollection {
* @return * @return
* @throws MongoException * @throws MongoException
*/ */
public long count() public long count(){
throws MongoException {
return getCount(new BasicDBObject(), null); return getCount(new BasicDBObject(), null);
} }
@ -831,45 +874,88 @@ public abstract class DBCollection {
* @return * @return
* @throws MongoException * @throws MongoException
*/ */
public long count(DBObject query) public long count(DBObject query){
throws MongoException {
return getCount(query, null); return getCount(query, null);
} }
/**
* returns the number of documents that match a query.
* @param query query to match
* @param readPrefs ReadPreferences for this query
* @return
* @throws MongoException
*/
public long count(DBObject query, ReadPreference readPrefs ){
return getCount(query, null, readPrefs);
}
/** /**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject)} with an empty query and null fields. * calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject)} with an empty query and null fields.
* @return number of documents that match query * @return number of documents that match query
* @throws MongoException * @throws MongoException
*/ */
public long getCount() public long getCount(){
throws MongoException {
return getCount(new BasicDBObject(), null); return getCount(new BasicDBObject(), null);
} }
/**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.ReadPreference)} with empty query and null fields.
* @param readPrefs ReadPreferences for this command
* @return number of documents that match query
* @throws MongoException
*/
public long getCount(ReadPreference readPrefs){
return getCount(new BasicDBObject(), null, readPrefs);
}
/** /**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject)} with null fields. * calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject)} with null fields.
* @param query query to match * @param query query to match
* @return * @return
* @throws MongoException * @throws MongoException
*/ */
public long getCount(DBObject query) public long getCount(DBObject query){
throws MongoException {
return getCount(query, null); return getCount(query, null);
} }
/** /**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, long, long)} with limit=0 and skip=0 * calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, long, long)} with limit=0 and skip=0
* @param query query to match * @param query query to match
* @param fields fields to return * @param fields fields to return
* @return * @return
* @throws MongoException * @throws MongoException
*/ */
public long getCount(DBObject query, DBObject fields) public long getCount(DBObject query, DBObject fields){
throws MongoException {
return getCount( query , fields , 0 , 0 ); return getCount( query , fields , 0 , 0 );
} }
/**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, long, long, com.massivecraft.mcore.xlib.mongodb.ReadPreference)} with limit=0 and skip=0
* @param query query to match
* @param fields fields to return
* @param readPrefs ReadPreferences for this command
* @return
* @throws MongoException
*/
public long getCount(DBObject query, DBObject fields, ReadPreference readPrefs){
return getCount( query , fields , 0 , 0, readPrefs );
}
/**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, long, long, com.massivecraft.mcore.xlib.mongodb.ReadPreference)} with the DBCollection's ReadPreference
* @param query query to match
* @param fields fields to return
* @param limit limit the count to this value
* @param skip skip number of entries to skip
* @return
* @throws MongoException
*/
public long getCount(DBObject query, DBObject fields, long limit, long skip){
return getCount(query, fields, limit, skip, getReadPreference());
}
/** /**
* Returns the number of documents in the collection * Returns the number of documents in the collection
* that match the specified query * that match the specified query
@ -877,13 +963,13 @@ public abstract class DBCollection {
* @param query query to select documents to count * @param query query to select documents to count
* @param fields fields to return * @param fields fields to return
* @param limit limit the count to this value * @param limit limit the count to this value
* @param skip number of entries to skip * @param skip number of entries to skip
* @return number of documents that match query and fields * @param readPrefs ReadPreferences for this command
* @throws MongoException * @return number of documents that match query and fields
* @throws MongoException
*/ */
public long getCount(DBObject query, DBObject fields, long limit, long skip )
throws MongoException {
public long getCount(DBObject query, DBObject fields, long limit, long skip, ReadPreference readPrefs ){
BasicDBObject cmd = new BasicDBObject(); BasicDBObject cmd = new BasicDBObject();
cmd.put("count", getName()); cmd.put("count", getName());
cmd.put("query", query); cmd.put("query", query);
@ -896,8 +982,7 @@ public abstract class DBCollection {
if ( skip > 0 ) if ( skip > 0 )
cmd.put( "skip" , skip ); cmd.put( "skip" , skip );
CommandResult res = _db.command(cmd,getOptions()); CommandResult res = _db.command(cmd,getOptions(),readPrefs);
if ( ! res.ok() ){ if ( ! res.ok() ){
String errmsg = res.getErrorMessage(); String errmsg = res.getErrorMessage();
@ -913,14 +998,17 @@ public abstract class DBCollection {
return res.getLong("n"); return res.getLong("n");
} }
CommandResult command(DBObject cmd, int options, ReadPreference readPrefs){
return _db.command(cmd,getOptions(),readPrefs);
}
/** /**
* Calls {@link DBCollection#rename(java.lang.String, boolean)} with dropTarget=false * Calls {@link DBCollection#rename(java.lang.String, boolean)} with dropTarget=false
* @param newName new collection name (not a full namespace) * @param newName new collection name (not a full namespace)
* @return the new collection * @return the new collection
* @throws MongoException * @throws MongoException
*/ */
public DBCollection rename( String newName ) public DBCollection rename( String newName ){
throws MongoException {
return rename(newName, false); return rename(newName, false);
} }
@ -931,8 +1019,7 @@ public abstract class DBCollection {
* @return the new collection * @return the new collection
* @throws MongoException * @throws MongoException
*/ */
public DBCollection rename( String newName, boolean dropTarget ) public DBCollection rename( String newName, boolean dropTarget ){
throws MongoException {
CommandResult ret = CommandResult ret =
_db.getSisterDB( "admin" ) _db.getSisterDB( "admin" )
.command( BasicDBObjectBuilder.start() .command( BasicDBObjectBuilder.start()
@ -955,8 +1042,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a> * @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/ */
public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce ) public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce ){
throws MongoException {
return group( key , cond , initial , reduce , null ); return group( key , cond , initial , reduce , null );
} }
@ -971,12 +1057,28 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a> * @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/ */
public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce , String finalize ) public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce , String finalize ){
throws MongoException {
GroupCommand cmd = new GroupCommand(this, key, cond, initial, reduce, finalize); GroupCommand cmd = new GroupCommand(this, key, cond, initial, reduce, finalize);
return group( cmd ); return group( cmd );
} }
/**
* Applies a group operation
* @param key - { a : true }
* @param cond - optional condition on query
* @param reduce javascript reduce function
* @param initial initial value for first match on a key
* @param finalize An optional function that can operate on the result(s) of the reduce function.
* @param readPrefs ReadPreferences for this command
* @return
* @throws MongoException
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/
public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce , String finalize, ReadPreference readPrefs ){
GroupCommand cmd = new GroupCommand(this, key, cond, initial, reduce, finalize);
return group( cmd, readPrefs );
}
/** /**
* Applies a group operation * Applies a group operation
* @param cmd the group command * @param cmd the group command
@ -985,12 +1087,23 @@ public abstract class DBCollection {
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a> * @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/ */
public DBObject group( GroupCommand cmd ) { public DBObject group( GroupCommand cmd ) {
CommandResult res = _db.command( cmd.toDBObject(), getOptions() ); return group(cmd, getReadPreference());
}
/**
* Applies a group operation
* @param cmd the group command
* @param readPrefs ReadPreferences for this command
* @return
* @throws MongoException
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/
public DBObject group( GroupCommand cmd, ReadPreference readPrefs ) {
CommandResult res = _db.command( cmd.toDBObject(), getOptions(), readPrefs );
res.throwOnError(); res.throwOnError();
return (DBObject)res.get( "retval" ); return (DBObject)res.get( "retval" );
} }
/** /**
* @deprecated prefer the {@link DBCollection#group(com.massivecraft.mcore.xlib.mongodb.GroupCommand)} which is more standard * @deprecated prefer the {@link DBCollection#group(com.massivecraft.mcore.xlib.mongodb.GroupCommand)} which is more standard
* Applies a group operation * Applies a group operation
@ -1000,10 +1113,9 @@ public abstract class DBCollection {
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a> * @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/ */
@Deprecated @Deprecated
public DBObject group( DBObject args ) public DBObject group( DBObject args ){
throws MongoException {
args.put( "ns" , getName() ); args.put( "ns" , getName() );
CommandResult res = _db.command( new BasicDBObject( "group" , args ), getOptions() ); CommandResult res = _db.command( new BasicDBObject( "group" , args ), getOptions(), getReadPreference() );
res.throwOnError(); res.throwOnError();
return (DBObject)res.get( "retval" ); return (DBObject)res.get( "retval" );
} }
@ -1012,27 +1124,50 @@ public abstract class DBCollection {
* find distinct values for a key * find distinct values for a key
* @param key * @param key
* @return * @return
* @throws MongoException
*/ */
@SuppressWarnings("rawtypes") public List distinct( String key ){
public List distinct( String key ){
return distinct( key , new BasicDBObject() ); return distinct( key , new BasicDBObject() );
} }
/**
* find distinct values for a key
* @param key
* @param readPrefs
* @return
* @throws MongoException
*/
public List distinct( String key, ReadPreference readPrefs ){
return distinct( key , new BasicDBObject(), readPrefs );
}
/** /**
* find distinct values for a key * find distinct values for a key
* @param key * @param key
* @param query query to match * @param query query to match
* @return * @return
* @throws MongoException
*/ */
@SuppressWarnings("rawtypes") public List distinct( String key , DBObject query ){
public List distinct( String key , DBObject query ){ return distinct(key, query, getReadPreference());
}
/**
* find distinct values for a key
* @param key
* @param query query to match
* @param readPrefs
* @return
* @throws MongoException
*/
public List distinct( String key , DBObject query, ReadPreference readPrefs ){
DBObject c = BasicDBObjectBuilder.start() DBObject c = BasicDBObjectBuilder.start()
.add( "distinct" , getName() ) .add( "distinct" , getName() )
.add( "key" , key ) .add( "key" , key )
.add( "query" , query ) .add( "query" , query )
.get(); .get();
CommandResult res = _db.command( c, getOptions() ); CommandResult res = _db.command( c, getOptions(), readPrefs );
res.throwOnError(); res.throwOnError();
return (List)(res.get( "values" )); return (List)(res.get( "values" ));
} }
@ -1053,7 +1188,7 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub mapreduce * @dochub mapreduce
*/ */
public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , DBObject query ) throws MongoException{ public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , DBObject query ){
return mapReduce( new MapReduceCommand( this , map , reduce , outputTarget , MapReduceCommand.OutputType.REPLACE, query ) ); return mapReduce( new MapReduceCommand( this , map , reduce , outputTarget , MapReduceCommand.OutputType.REPLACE, query ) );
} }
@ -1080,11 +1215,41 @@ public abstract class DBCollection {
* @throws MongoException * @throws MongoException
* @dochub mapreduce * @dochub mapreduce
*/ */
public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , MapReduceCommand.OutputType outputType , DBObject query ) public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , MapReduceCommand.OutputType outputType , DBObject query ){
throws MongoException{
return mapReduce( new MapReduceCommand( this , map , reduce , outputTarget , outputType , query ) ); return mapReduce( new MapReduceCommand( this , map , reduce , outputTarget , outputType , query ) );
} }
/**
* performs a map reduce operation
* Specify an outputType to control job execution
* * INLINE - Return results inline
* * REPLACE - Replace the output collection with the job output
* * MERGE - Merge the job output with the existing contents of outputTarget
* * REDUCE - Reduce the job output with the existing contents of
* outputTarget
*
* @param map
* map function in javascript code
* @param outputTarget
* optional - leave null if want to use temp collection
* @param outputType
* set the type of job output
* @param reduce
* reduce function in javascript code
* @param query
* to match
* @param readPrefs
* ReadPreferences for this operation
* @return
* @throws MongoException
* @dochub mapreduce
*/
public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , MapReduceCommand.OutputType outputType , DBObject query, ReadPreference readPrefs ){
MapReduceCommand command = new MapReduceCommand( this , map , reduce , outputTarget , outputType , query );
command.setReadPreference(readPrefs);
return mapReduce( command );
}
/** /**
* performs a map reduce operation * performs a map reduce operation
* *
@ -1093,14 +1258,10 @@ public abstract class DBCollection {
* @return * @return
* @throws MongoException * @throws MongoException
*/ */
public MapReduceOutput mapReduce( MapReduceCommand command ) throws MongoException{ public MapReduceOutput mapReduce( MapReduceCommand command ){
DBObject cmd = command.toDBObject(); DBObject cmd = command.toDBObject();
// if type in inline, then query options like slaveOk is fine // if type in inline, then query options like slaveOk is fine
CommandResult res = null; CommandResult res = _db.command( cmd, getOptions(), command.getReadPreference() != null ? command.getReadPreference() : getReadPreference() );
if (command.getOutputType() == MapReduceCommand.OutputType.INLINE)
res = _db.command( cmd, getOptions(), command.getReadPreference() != null ? command.getReadPreference() : getReadPreference() );
else
res = _db.command( cmd );
res.throwOnError(); res.throwOnError();
return new MapReduceOutput( this , cmd, res ); return new MapReduceOutput( this , cmd, res );
} }
@ -1113,19 +1274,47 @@ public abstract class DBCollection {
* @return * @return
* @throws MongoException * @throws MongoException
*/ */
public MapReduceOutput mapReduce( DBObject command ) throws MongoException{ public MapReduceOutput mapReduce( DBObject command ){
if ( command.get( "mapreduce" ) == null && command.get( "mapReduce" ) == null ) if ( command.get( "mapreduce" ) == null && command.get( "mapReduce" ) == null )
throw new IllegalArgumentException( "need mapreduce arg" ); throw new IllegalArgumentException( "need mapreduce arg" );
CommandResult res = _db.command( command ); CommandResult res = _db.command( command, getOptions(), getReadPreference() );
res.throwOnError(); res.throwOnError();
return new MapReduceOutput( this , command, res ); return new MapReduceOutput( this , command, res );
} }
/**
* performs an aggregation operation
*
* @param firstOp
* requisite first operation to be performed in the aggregation pipeline
*
* @param additionalOps
* additional operations to be performed in the aggregation pipeline
* @return The aggregation operation's result set
*
*/
public AggregationOutput aggregate( DBObject firstOp, DBObject ... additionalOps){
if (firstOp == null)
throw new IllegalArgumentException("aggregate can not accept null pipeline operation");
DBObject command = new BasicDBObject("aggregate", _name );
List<DBObject> pipelineOps = new ArrayList<DBObject>();
pipelineOps.add(firstOp);
Collections.addAll(pipelineOps, additionalOps);
command.put( "pipeline", pipelineOps );
CommandResult res = _db.command( command, getOptions(), getReadPreference() );
res.throwOnError();
return new AggregationOutput( command, res );
}
/** /**
* Return a list of the indexes for this collection. Each object * Return a list of the indexes for this collection. Each object
* in the list is the "info document" from MongoDB * in the list is the "info document" from MongoDB
* *
* @return list of index documents * @return list of index documents
* @throws MongoException
*/ */
public List<DBObject> getIndexInfo() { public List<DBObject> getIndexInfo() {
BasicDBObject cmd = new BasicDBObject(); BasicDBObject cmd = new BasicDBObject();
@ -1147,8 +1336,7 @@ public abstract class DBCollection {
* @param keys keys of the index * @param keys keys of the index
* @throws MongoException * @throws MongoException
*/ */
public void dropIndex( DBObject keys ) public void dropIndex( DBObject keys ){
throws MongoException {
dropIndexes( genIndexName( keys ) ); dropIndexes( genIndexName( keys ) );
} }
@ -1157,27 +1345,28 @@ public abstract class DBCollection {
* @param name name of index to drop * @param name name of index to drop
* @throws MongoException * @throws MongoException
*/ */
public void dropIndex( String name ) public void dropIndex( String name ){
throws MongoException {
dropIndexes( name ); dropIndexes( name );
} }
/** /**
* gets the collections statistics ("collstats" command) * gets the collections statistics ("collstats" command)
* @return * @return
* @throws MongoException
*/ */
public CommandResult getStats() { public CommandResult getStats() {
return getDB().command(new BasicDBObject("collstats", getName()), getOptions()); return getDB().command(new BasicDBObject("collstats", getName()), getOptions(), getReadPreference());
} }
/** /**
* returns whether or not this is a capped collection * returns whether or not this is a capped collection
* @return * @return
* @throws MongoException
*/ */
public boolean isCapped() { public boolean isCapped() {
CommandResult stats = getStats(); CommandResult stats = getStats();
Object capped = stats.get("capped"); Object capped = stats.get("capped");
return(capped != null && (Integer)capped == 1); return(capped != null && ( capped.equals(1) || capped.equals(true) ) );
} }
// ------ // ------
@ -1214,6 +1403,9 @@ public abstract class DBCollection {
* Checks key strings for invalid characters. * Checks key strings for invalid characters.
*/ */
private void _checkKeys( DBObject o ) { private void _checkKeys( DBObject o ) {
if ( o instanceof LazyDBObject || o instanceof LazyDBList )
return;
for ( String s : o.keySet() ){ for ( String s : o.keySet() ){
validateKey ( s ); validateKey ( s );
Object inner = o.get( s ); Object inner = o.get( s );
@ -1329,8 +1521,7 @@ public abstract class DBCollection {
* @param c the class * @param c the class
* @throws IllegalArgumentException if <code>c</code> is not a DBObject * @throws IllegalArgumentException if <code>c</code> is not a DBObject
*/ */
@SuppressWarnings("rawtypes") public void setObjectClass( Class c ){
public void setObjectClass( Class c ){
if ( c == null ){ if ( c == null ){
// reset // reset
_wrapper = null; _wrapper = null;
@ -1351,8 +1542,7 @@ public abstract class DBCollection {
* Gets the default class for objects in the collection * Gets the default class for objects in the collection
* @return the class * @return the class
*/ */
@SuppressWarnings("rawtypes") public Class getObjectClass(){
public Class getObjectClass(){
return _objectClass; return _objectClass;
} }
@ -1361,8 +1551,7 @@ public abstract class DBCollection {
* @param path * @param path
* @param c * @param c
*/ */
@SuppressWarnings("rawtypes") public void setInternalClass( String path , Class c ){
public void setInternalClass( String path , Class c ){
_internalClass.put( path , c ); _internalClass.put( path , c );
} }
@ -1371,8 +1560,7 @@ public abstract class DBCollection {
* @param path * @param path
* @return * @return
*/ */
@SuppressWarnings("rawtypes") protected Class getInternalClass( String path ){
protected Class getInternalClass( String path ){
Class c = _internalClass.get( path ); Class c = _internalClass.get( path );
if ( c != null ) if ( c != null )
return c; return c;
@ -1427,8 +1615,8 @@ public abstract class DBCollection {
/** /**
* makes this query ok to run on a slave node * makes this query ok to run on a slave node
* *
* @deprecated Replaced with ReadPreference.SECONDARY * @deprecated Replaced with {@code ReadPreference.secondaryPreferred()}
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference.SECONDARY * @see com.massivecraft.mcore.xlib.mongodb.ReadPreference#secondaryPreferred()
*/ */
@Deprecated @Deprecated
public void slaveOk(){ public void slaveOk(){
@ -1512,10 +1700,8 @@ public abstract class DBCollection {
private DBEncoderFactory _encoderFactory; private DBEncoderFactory _encoderFactory;
final Bytes.OptionHolder _options; final Bytes.OptionHolder _options;
@SuppressWarnings("rawtypes") protected Class _objectClass = null;
protected Class _objectClass = null; private Map<String,Class> _internalClass = Collections.synchronizedMap( new HashMap<String,Class>() );
@SuppressWarnings("rawtypes")
private Map<String,Class> _internalClass = Collections.synchronizedMap( new HashMap<String,Class>() );
private ReflectionDBObject.JavaWrapper _wrapper = null; private ReflectionDBObject.JavaWrapper _wrapper = null;
final private Set<String> _createdIndexes = new HashSet<String>(); final private Set<String> _createdIndexes = new HashSet<String>();

View File

@ -47,7 +47,7 @@ public interface DBConnector {
* @return the write result * @return the write result
* @throws MongoException * @throws MongoException
*/ */
public WriteResult say( DB db , OutMessage m , WriteConcern concern ) throws MongoException; public WriteResult say( DB db , OutMessage m , WriteConcern concern );
/** /**
* does a write operation * does a write operation
* @param db the database * @param db the database
@ -57,7 +57,7 @@ public interface DBConnector {
* @return the write result * @return the write result
* @throws MongoException * @throws MongoException
*/ */
public WriteResult say( DB db , OutMessage m , WriteConcern concern , ServerAddress hostNeeded ) throws MongoException; public WriteResult say( DB db , OutMessage m , WriteConcern concern , ServerAddress hostNeeded );
/** /**
* does a read operation on the database * does a read operation on the database
@ -70,7 +70,7 @@ public interface DBConnector {
* @throws MongoException * @throws MongoException
*/ */
public Response call( DB db , DBCollection coll , OutMessage m , public Response call( DB db , DBCollection coll , OutMessage m ,
ServerAddress hostNeeded , DBDecoder decoder ) throws MongoException; ServerAddress hostNeeded , DBDecoder decoder );
/** /**
* *
* does a read operation on the database * does a read operation on the database
@ -82,7 +82,7 @@ public interface DBConnector {
* @return the read result * @return the read result
* @throws MongoException * @throws MongoException
*/ */
public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries ) throws MongoException; public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries );
/** /**
* does a read operation on the database * does a read operation on the database
@ -96,11 +96,21 @@ public interface DBConnector {
* @return the read result * @return the read result
* @throws MongoException * @throws MongoException
*/ */
public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries , ReadPreference readPref , DBDecoder decoder ) throws MongoException; public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries , ReadPreference readPref , DBDecoder decoder );
/** /**
* returns true if the connector is in a usable state * returns true if the connector is in a usable state
* @return * @return
*/ */
public boolean isOpen(); public boolean isOpen();
/**
* Authenticate using the given credentials.
*
* @param credentials the credentials.
* @return the result of the authentication command, if successful
* @throws CommandFailureException if the authentication failed
* @since 2.11.0
*/
public CommandResult authenticate(MongoCredential credentials);
} }

View File

@ -18,11 +18,14 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import java.io.Closeable;
import java.util.*;
import com.massivecraft.mcore.xlib.mongodb.DBApiLayer.Result; import com.massivecraft.mcore.xlib.mongodb.DBApiLayer.Result;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
/** An iterator over database results. /** An iterator over database results.
* Doing a <code>find()</code> query on a collection returns a * Doing a <code>find()</code> query on a collection returns a
@ -158,7 +161,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
/** /**
* Informs the database of an indexed field of the collection in order to improve performance. * Informs the database of an indexed field of the collection in order to improve performance.
* @param indexName the name of an index * @param indexName the name of an index
* @return same DBCursort for chaining operations * @return same DBCursor for chaining operations
*/ */
public DBCursor hint( String indexName ){ public DBCursor hint( String indexName ){
if ( _it != null ) if ( _it != null )
@ -195,6 +198,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* "n" : the number of records that the database returned * "n" : the number of records that the database returned
* "millis" : how long it took the database to execute the query * "millis" : how long it took the database to execute the query
* @return a <code>DBObject</code> * @return a <code>DBObject</code>
* @throws MongoException
* @dochub explain * @dochub explain
*/ */
public DBObject explain(){ public DBObject explain(){
@ -261,7 +265,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* Discards a given number of elements at the beginning of the cursor. * Discards a given number of elements at the beginning of the cursor.
* @param n the number of elements to skip * @param n the number of elements to skip
* @return a cursor pointing to the new first element of the results * @return a cursor pointing to the new first element of the results
* @throws RuntimeException if the cursor has started to be iterated through * @throws IllegalStateException if the cursor has started to be iterated through
*/ */
public DBCursor skip( int n ){ public DBCursor skip( int n ){
if ( _it != null ) if ( _it != null )
@ -294,8 +298,8 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* *
* @return a copy of the same cursor (for chaining) * @return a copy of the same cursor (for chaining)
* *
* @deprecated Replaced with ReadPreference.SECONDARY * @deprecated Replaced with {@code ReadPreference.secondaryPreferred()}
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference.SECONDARY * @see ReadPreference#secondaryPreferred()
*/ */
@Deprecated @Deprecated
public DBCursor slaveOk(){ public DBCursor slaveOk(){
@ -342,31 +346,27 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
// ---- internal stuff ------ // ---- internal stuff ------
private void _check() private void _check() {
throws MongoException { if (_it != null)
if ( _it != null )
return; return;
_lookForHints(); _lookForHints();
DBObject foo = _query; QueryOpBuilder builder = new QueryOpBuilder()
if (hasSpecialQueryFields()) { .addQuery(_query)
foo = _specialFields == null ? new BasicDBObject() : _specialFields; .addOrderBy(_orderBy)
.addHint(_hintDBObj)
.addHint(_hint)
.addExplain(_explain)
.addSnapshot(_snapshot)
.addSpecialFields(_specialFields);
_addToQueryObject(foo, "query", _query, true); if (_collection.getDB().getMongo().isMongosConnection()) {
_addToQueryObject(foo, "orderby", _orderBy, false); builder.addReadPreference(_readPref.toDBObject());
if (_hint != null)
_addToQueryObject(foo, "$hint", _hint);
if (_hintDBObj != null)
_addToQueryObject(foo, "$hint", _hintDBObj);
if (_explain)
foo.put("$explain", true);
if (_snapshot)
foo.put("$snapshot", true);
} }
_it = _collection.__find(foo, _keysWanted, _skip, _batchSize, _limit, _options, _readPref, getDecoder()); _it = _collection.__find(builder.get(), _keysWanted, _skip, _batchSize, _limit,
_options, _readPref, getDecoder());
} }
// Only create a new decoder if there is a decoder factory explicitly set on the collection. Otherwise return null // Only create a new decoder if there is a decoder factory explicitly set on the collection. Otherwise return null
@ -400,37 +400,6 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
} }
} }
boolean hasSpecialQueryFields(){
if ( _specialFields != null )
return true;
if ( _orderBy != null && _orderBy.keySet().size() > 0 )
return true;
if ( _hint != null || _hintDBObj != null || _snapshot )
return true;
return _explain;
}
void _addToQueryObject( DBObject query , String field , DBObject thing , boolean sendEmpty ){
if ( thing == null )
return;
if ( ! sendEmpty && thing.keySet().size() == 0 )
return;
_addToQueryObject( query , field , thing );
}
void _addToQueryObject( DBObject query , String field , Object thing ){
if ( thing == null )
return;
query.put( field , thing );
}
void _checkType( CursorType type ){ void _checkType( CursorType type ){
if ( _cursorType == null ){ if ( _cursorType == null ){
_cursorType = type; _cursorType = type;
@ -443,8 +412,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
throw new IllegalArgumentException( "can't switch cursor access methods" ); throw new IllegalArgumentException( "can't switch cursor access methods" );
} }
private DBObject _next() private DBObject _next() {
throws MongoException {
if ( _cursorType == null ) if ( _cursorType == null )
_checkType( CursorType.ITERATOR ); _checkType( CursorType.ITERATOR );
@ -487,8 +455,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
throw new IllegalArgumentException("_it not a real result" ); throw new IllegalArgumentException("_it not a real result" );
} }
private boolean _hasNext() private boolean _hasNext() {
throws MongoException {
_check(); _check();
if ( _limit > 0 && _num >= _limit ) if ( _limit > 0 && _num >= _limit )
@ -512,7 +479,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return * @return
* @throws MongoException * @throws MongoException
*/ */
public boolean hasNext() throws MongoException { public boolean hasNext() {
_checkType( CursorType.ITERATOR ); _checkType( CursorType.ITERATOR );
return _hasNext(); return _hasNext();
} }
@ -522,14 +489,14 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return the next element * @return the next element
* @throws MongoException * @throws MongoException
*/ */
public DBObject next() throws MongoException { public DBObject next() {
_checkType( CursorType.ITERATOR ); _checkType( CursorType.ITERATOR );
return _next(); return _next();
} }
/** /**
* Returns the element the cursor is at. * Returns the element the cursor is at.
* @return the next element * @return the current element
*/ */
public DBObject curr(){ public DBObject curr(){
_checkType( CursorType.ITERATOR ); _checkType( CursorType.ITERATOR );
@ -546,8 +513,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
// ---- array api ----- // ---- array api -----
void _fill( int n ) void _fill( int n ){
throws MongoException {
_checkType( CursorType.ARRAY ); _checkType( CursorType.ARRAY );
while ( n >= _all.size() && _hasNext() ) while ( n >= _all.size() && _hasNext() )
_next(); _next();
@ -561,8 +527,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return the number of elements in the array * @return the number of elements in the array
* @throws MongoException * @throws MongoException
*/ */
public int length() public int length() {
throws MongoException {
_checkType( CursorType.ARRAY ); _checkType( CursorType.ARRAY );
_fill( Integer.MAX_VALUE ); _fill( Integer.MAX_VALUE );
return _all.size(); return _all.size();
@ -573,8 +538,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return an array of elements * @return an array of elements
* @throws MongoException * @throws MongoException
*/ */
public List<DBObject> toArray() public List<DBObject> toArray(){
throws MongoException {
return toArray( Integer.MAX_VALUE ); return toArray( Integer.MAX_VALUE );
} }
@ -584,8 +548,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return an array of objects * @return an array of objects
* @throws MongoException * @throws MongoException
*/ */
public List<DBObject> toArray( int max ) public List<DBObject> toArray( int max ) {
throws MongoException {
_checkType( CursorType.ARRAY ); _checkType( CursorType.ARRAY );
_fill( max - 1 ); _fill( max - 1 );
return _all; return _all;
@ -596,6 +559,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* Iterates cursor and counts objects * Iterates cursor and counts objects
* @see #count() * @see #count()
* @return num objects * @return num objects
* @throws MongoException
*/ */
public int itcount(){ public int itcount(){
int n = 0; int n = 0;
@ -613,14 +577,13 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return the number of objects * @return the number of objects
* @throws MongoException * @throws MongoException
*/ */
public int count() public int count() {
throws MongoException {
if ( _collection == null ) if ( _collection == null )
throw new IllegalArgumentException( "why is _collection null" ); throw new IllegalArgumentException( "why is _collection null" );
if ( _collection._db == null ) if ( _collection._db == null )
throw new IllegalArgumentException( "why is _collection._db null" ); throw new IllegalArgumentException( "why is _collection._db null" );
return (int)_collection.getCount(this._query, this._keysWanted); return (int)_collection.getCount(this._query, this._keysWanted, getReadPreference());
} }
/** /**
@ -630,14 +593,13 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return the number of objects * @return the number of objects
* @throws MongoException * @throws MongoException
*/ */
public int size() public int size() {
throws MongoException {
if ( _collection == null ) if ( _collection == null )
throw new IllegalArgumentException( "why is _collection null" ); throw new IllegalArgumentException( "why is _collection null" );
if ( _collection._db == null ) if ( _collection._db == null )
throw new IllegalArgumentException( "why is _collection._db null" ); throw new IllegalArgumentException( "why is _collection._db null" );
return (int)_collection.getCount(this._query, this._keysWanted, this._limit, this._skip ); return (int)_collection.getCount(this._query, this._keysWanted, this._limit, this._skip, getReadPreference() );
} }
@ -731,6 +693,13 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
return sb.toString(); return sb.toString();
} }
boolean hasFinalizer() {
if (_it == null || ! (_it instanceof Result)) {
return false;
}
return ((Result) _it).hasFinalizer();
}
// ---- query setup ---- // ---- query setup ----
private final DBCollection _collection; private final DBCollection _collection;
private final DBObject _query; private final DBObject _query;

View File

@ -20,14 +20,32 @@ package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.mongodb.util.ThreadUtil; import com.massivecraft.mcore.xlib.mongodb.util.ThreadUtil;
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.GSSName;
import org.ietf.jgss.Oid;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import java.io.BufferedInputStream; import java.io.BufferedInputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.Socket; import java.net.Socket;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map; import java.util.Map;
import java.util.concurrent.ConcurrentHashMap; import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level; import java.util.logging.Level;
import java.util.logging.Logger; import java.util.logging.Logger;
@ -36,6 +54,7 @@ import java.util.logging.Logger;
* Methods implemented at the port level should throw the raw exceptions like IOException, * Methods implemented at the port level should throw the raw exceptions like IOException,
* so that the connector above can make appropriate decisions on how to handle. * so that the connector above can make appropriate decisions on how to handle.
*/ */
@SuppressWarnings({"unused"})
public class DBPort { public class DBPort {
/** /**
@ -50,6 +69,7 @@ public class DBPort {
* creates a new DBPort * creates a new DBPort
* @param addr the server address * @param addr the server address
*/ */
@SuppressWarnings("deprecation")
public DBPort( ServerAddress addr ){ public DBPort( ServerAddress addr ){
this( addr , null , new MongoOptions() ); this( addr , null , new MongoOptions() );
} }
@ -70,12 +90,8 @@ public class DBPort {
return go( msg, coll ); return go( msg, coll );
} }
Response call( OutMessage msg , DBCollection coll , DBDecoder decoder) throws IOException{ Response call(OutMessage msg, DBCollection coll, DBDecoder decoder) throws IOException{
return go( msg, coll, false, null, decoder); return go( msg, coll, false, decoder);
}
Response call( OutMessage msg , DBCollection coll , ReadPreference readPref , DBDecoder decoder) throws IOException{
return go( msg, coll, false, readPref, decoder);
} }
void say( OutMessage msg ) void say( OutMessage msg )
@ -85,14 +101,14 @@ public class DBPort {
private synchronized Response go( OutMessage msg , DBCollection coll ) private synchronized Response go( OutMessage msg , DBCollection coll )
throws IOException { throws IOException {
return go( msg , coll , false, null, null ); return go( msg , coll , false, null );
} }
private synchronized Response go( OutMessage msg , DBCollection coll , DBDecoder decoder ) throws IOException{ private synchronized Response go( OutMessage msg , DBCollection coll , DBDecoder decoder ) throws IOException{
return go( msg, coll, false, null, decoder ); return go( msg, coll, false, decoder );
} }
private synchronized Response go( OutMessage msg , DBCollection coll , boolean forceReponse , ReadPreference readPref, DBDecoder decoder) private synchronized Response go(OutMessage msg, DBCollection coll, boolean forceResponse, DBDecoder decoder)
throws IOException { throws IOException {
if ( _processingResponse ){ if ( _processingResponse ){
@ -105,7 +121,7 @@ public class DBPort {
} }
} }
_calls++; _calls.incrementAndGet();
if ( _socket == null ) if ( _socket == null )
_open(); _open();
@ -115,12 +131,13 @@ public class DBPort {
try { try {
msg.prepare(); msg.prepare();
_activeState = new ActiveState(msg);
msg.pipe( _out ); msg.pipe( _out );
if ( _pool != null ) if ( _pool != null )
_pool._everWorked = true; _pool._everWorked = true;
if ( coll == null && ! forceReponse ) if ( coll == null && ! forceResponse )
return null; return null;
_processingResponse = true; _processingResponse = true;
@ -131,6 +148,7 @@ public class DBPort {
throw ioe; throw ioe;
} }
finally { finally {
_activeState = null;
_processingResponse = false; _processingResponse = false;
} }
} }
@ -141,20 +159,17 @@ public class DBPort {
} }
synchronized private Response findOne( DB db , String coll , DBObject q ) throws IOException { synchronized private Response findOne( DB db , String coll , DBObject q ) throws IOException {
OutMessage msg = OutMessage.query( db._mongo , 0 , db.getName() + "." + coll , 0 , -1 , q , null ); OutMessage msg = OutMessage.query( db.getCollection(coll) , 0 , 0 , -1 , q , null );
Response res = go( msg , db.getCollection( coll ) , null ); try {
return res; Response res = go( msg , db.getCollection( coll ) , null );
} return res;
} finally {
@SuppressWarnings("unused") msg.doneWithMessage();
synchronized private Response findOne( String ns , DBObject q ) throws IOException{ }
OutMessage msg = OutMessage.query( null , 0 , ns , 0 , -1 , q , null );
Response res = go( msg , null , true, null, null );
return res;
} }
synchronized CommandResult runCommand( DB db , DBObject cmd ) throws IOException { synchronized CommandResult runCommand( DB db , DBObject cmd ) throws IOException {
Response res = findOne( db , "$cmd" , cmd ); Response res = findOne(db, "$cmd", cmd);
return convertToCommandResult(cmd, res); return convertToCommandResult(cmd, res);
} }
@ -168,16 +183,16 @@ public class DBPort {
if ( data == null ) if ( data == null )
throw new MongoInternalException( "something is wrong, no command result" ); throw new MongoInternalException( "something is wrong, no command result" );
CommandResult cr = new CommandResult(cmd, res.serverUsed()); CommandResult cr = new CommandResult(res.serverUsed());
cr.putAll( data ); cr.putAll( data );
return cr; return cr;
} }
synchronized CommandResult tryGetLastError( DB db , long last, WriteConcern concern) throws IOException { synchronized CommandResult tryGetLastError( DB db , long last, WriteConcern concern) throws IOException {
if ( last != _calls ) if ( last != _calls.get() )
return null; return null;
return getLastError( db , concern ); return getLastError(db, concern);
} }
/** /**
@ -193,8 +208,7 @@ public class DBPort {
_open(); _open();
} }
boolean _open() void _open() throws IOException {
throws IOException {
long sleepTime = 100; long sleepTime = 100;
@ -203,11 +217,9 @@ public class DBPort {
maxAutoConnectRetryTime = _options.maxAutoConnectRetryTime; maxAutoConnectRetryTime = _options.maxAutoConnectRetryTime;
} }
boolean successfullyConnected = false;
final long start = System.currentTimeMillis(); final long start = System.currentTimeMillis();
while ( true ){ do {
IOException lastError = null;
try { try {
_socket = _options.socketFactory.createSocket(); _socket = _options.socketFactory.createSocket();
_socket.connect( _addr , _options.connectTimeout ); _socket.connect( _addr , _options.connectTimeout );
@ -217,30 +229,28 @@ public class DBPort {
_socket.setSoTimeout( _options.socketTimeout ); _socket.setSoTimeout( _options.socketTimeout );
_in = new BufferedInputStream( _socket.getInputStream() ); _in = new BufferedInputStream( _socket.getInputStream() );
_out = _socket.getOutputStream(); _out = _socket.getOutputStream();
return true; successfullyConnected = true;
} }
catch ( IOException ioe ){ catch ( IOException e ){
lastError = new IOException( "couldn't connect to [" + _addr + "] bc:" + ioe );
_logger.log( Level.INFO , "connect fail to : " + _addr , ioe );
close(); close();
if (!_options.autoConnectRetry || (_pool != null && !_pool._everWorked))
throw e;
long waitSoFar = System.currentTimeMillis() - start;
if (waitSoFar >= maxAutoConnectRetryTime)
throw e;
if (sleepTime + waitSoFar > maxAutoConnectRetryTime)
sleepTime = maxAutoConnectRetryTime - waitSoFar;
_logger.log(Level.WARNING, "Exception connecting to " + serverAddress().getHost() + ": " + e +
". Total wait time so far is " + waitSoFar + " ms. Will retry after sleeping for " + sleepTime + " ms.");
ThreadUtil.sleep(sleepTime);
sleepTime *= 2;
} }
} while (!successfullyConnected);
if ( ! _options.autoConnectRetry || ( _pool != null && ! _pool._everWorked ) )
throw lastError;
long sleptSoFar = System.currentTimeMillis() - start;
if ( sleptSoFar >= maxAutoConnectRetryTime )
throw lastError;
if ( sleepTime + sleptSoFar > maxAutoConnectRetryTime )
sleepTime = maxAutoConnectRetryTime - sleptSoFar;
_logger.severe( "going to sleep and retry. total sleep time after = " + ( sleptSoFar + sleptSoFar ) + "ms this time:" + sleepTime + "ms" );
ThreadUtil.sleep( sleepTime );
sleepTime *= 2;
}
} }
@Override @Override
@ -274,11 +284,19 @@ public class DBPort {
close(); close();
} }
ActiveState getActiveState() {
return _activeState;
}
int getLocalPort() {
return _socket != null ? _socket.getLocalPort() : -1;
}
/** /**
* closes the underlying connection and streams * closes the underlying connection and streams
*/ */
protected void close(){ protected void close(){
_authed.clear(); authenticatedDatabases.clear();
if ( _socket != null ){ if ( _socket != null ){
try { try {
@ -294,35 +312,40 @@ public class DBPort {
_socket = null; _socket = null;
} }
void checkAuth( DB db ) throws IOException { CommandResult authenticate(Mongo mongo, final MongoCredential credentials) {
if ( db._username == null ){ Authenticator authenticator;
if ( db._name.equals( "admin" ) ) if (credentials.getMechanism().equals(MongoCredential.MONGODB_CR_MECHANISM)) {
return; authenticator = new NativeAuthenticator(mongo, credentials);
checkAuth( db._mongo.getDB( "admin" ) ); } else if (credentials.getMechanism().equals(MongoCredential.GSSAPI_MECHANISM)) {
return; authenticator = new GSSAPIAuthenticator(mongo, credentials);
} else {
throw new IllegalArgumentException("Unsupported authentication protocol: " + credentials.getMechanism());
} }
if ( _authed.containsKey( db ) ) CommandResult res = authenticator.authenticate();
return; authenticatedDatabases.add(credentials.getSource());
return res;
}
CommandResult res = runCommand( db , new BasicDBObject( "getnonce" , 1 ) ); void checkAuth(Mongo mongo) throws IOException {
res.throwOnError(); // get the difference between the set of credentialed databases and the set of authenticated databases on this connection
Set<String> unauthenticatedDatabases = new HashSet<String>(mongo.getAuthority().getCredentialsStore().getDatabases());
unauthenticatedDatabases.removeAll(authenticatedDatabases);
DBObject temp = db._authCommand( res.getString( "nonce" ) ); for (String databaseName : unauthenticatedDatabases) {
authenticate(mongo, mongo.getAuthority().getCredentialsStore().get(databaseName));
res = runCommand( db , temp ); }
res.throwOnError();
_authed.put( db , true );
} }
/** /**
* Gets the pool that this port belongs to * Gets the pool that this port belongs to.
* @return * @return the pool that this port belongs to.
*/ */
public DBPortPool getPool() { public DBPortPool getPool() {
return _pool; return _pool;
} }
private static Logger _rootLogger = Logger.getLogger( "com.mongodb.port" );
final int _hashCode; final int _hashCode;
final ServerAddress _sa; final ServerAddress _sa;
final InetSocketAddress _addr; final InetSocketAddress _addr;
@ -331,15 +354,217 @@ public class DBPort {
final Logger _logger; final Logger _logger;
final DBDecoder _decoder; final DBDecoder _decoder;
private Socket _socket; private volatile Socket _socket;
private InputStream _in; private volatile InputStream _in;
private OutputStream _out; private volatile OutputStream _out;
private boolean _processingResponse; private volatile boolean _processingResponse;
private Map<DB,Boolean> _authed = new ConcurrentHashMap<DB, Boolean>( ); // needs synchronization to ensure that modifications are published.
int _lastThread; final Set<String> authenticatedDatabases = Collections.synchronizedSet(new HashSet<String>());
long _calls = 0;
private static Logger _rootLogger = Logger.getLogger( "com.mongodb.port" ); volatile int _lastThread;
final AtomicLong _calls = new AtomicLong();
private volatile ActiveState _activeState;
private volatile Boolean useCRAMAuthenticationProtocol;
class ActiveState {
ActiveState(final OutMessage outMessage) {
this.outMessage = outMessage;
this.startTime = System.nanoTime();
this.threadName = Thread.currentThread().getName();
}
final OutMessage outMessage;
final long startTime;
final String threadName;
}
class GenericSaslAuthenticator extends SaslAuthenticator {
static final String CRAM_MD5 = "CRAM-MD5";
private final String mechanism;
GenericSaslAuthenticator(final Mongo mongo, MongoCredential credentials, String mechanism) {
super(mongo, credentials);
this.mechanism = mechanism;
}
@Override
protected SaslClient createSaslClient() {
try {
return Sasl.createSaslClient(new String[]{mechanism},
credential.getUserName(), MONGODB_PROTOCOL,
serverAddress().getHost(), null, new CredentialsHandlingCallbackHandler());
} catch (SaslException e) {
throw new MongoException("Exception initializing SASL client", e);
}
}
@Override
protected DB getDatabase() {
return mongo.getDB(credential.getSource());
}
@Override
public String getMechanismName() {
return mechanism;
}
class CredentialsHandlingCallbackHandler implements CallbackHandler {
public void handle(final Callback[] callbacks) throws IOException, UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
NameCallback nameCallback = (NameCallback) callback;
nameCallback.setName(credential.getUserName());
}
if (callback instanceof PasswordCallback) {
PasswordCallback passwordCallback = (PasswordCallback) callback;
String hashedPassword = new String(NativeAuthenticationHelper.createHash(
credential.getUserName(), credential.getPassword()));
passwordCallback.setPassword(hashedPassword.toCharArray());
}
}
}
}
}
class GSSAPIAuthenticator extends SaslAuthenticator {
public static final String GSSAPI_OID = "1.2.840.113554.1.2.2";
public static final String GSSAPI_MECHANISM = MongoCredential.GSSAPI_MECHANISM;
GSSAPIAuthenticator(final Mongo mongo, final MongoCredential credentials) {
super(mongo, credentials);
if (!this.credential.getMechanism().equals(MongoCredential.GSSAPI_MECHANISM)) {
throw new MongoException("Incorrect mechanism: " + this.credential.getMechanism());
}
}
@Override
protected SaslClient createSaslClient() {
try {
Map<String, Object> props = new HashMap<String, Object>();
props.put(Sasl.CREDENTIALS, getGSSCredential(credential.getUserName()));
return Sasl.createSaslClient(new String[]{GSSAPI_MECHANISM}, credential.getUserName(), MONGODB_PROTOCOL,
serverAddress().getHost(), props, null);
} catch (SaslException e) {
throw new MongoException("Exception initializing SASL client", e);
} catch (GSSException e) {
throw new MongoException("Exception initializing GSSAPI credentials", e);
}
}
@Override
protected DB getDatabase() {
return mongo.getDB(credential.getSource());
}
@Override
public String getMechanismName() {
return "GSSAPI";
}
private GSSCredential getGSSCredential(String userName) throws GSSException {
Oid krb5Mechanism = new Oid(GSSAPI_OID);
GSSManager manager = GSSManager.getInstance();
GSSName name = manager.createName(userName, GSSName.NT_USER_NAME);
return manager.createCredential(name, GSSCredential.INDEFINITE_LIFETIME,
krb5Mechanism, GSSCredential.INITIATE_ONLY);
}
}
abstract class SaslAuthenticator extends Authenticator {
public static final String MONGODB_PROTOCOL = "mongodb";
SaslAuthenticator(final Mongo mongo, MongoCredential credentials) {
super(mongo, credentials);
}
public CommandResult authenticate() {
SaslClient saslClient = createSaslClient();
try {
byte[] response = (saslClient.hasInitialResponse() ? saslClient.evaluateChallenge(new byte[0]) : null);
CommandResult res = sendSaslStart(response);
res.throwOnError();
int conversationId = (Integer) res.get("conversationId");
while (! (Boolean) res.get("done")) {
response = saslClient.evaluateChallenge((byte[]) res.get("payload"));
if (response == null) {
throw new MongoException("SASL protocol error: no client response to challenge");
}
res = sendSaslContinue(conversationId, response);
res.throwOnError();
}
return res;
} catch (IOException e) {
throw new MongoException.Network("IOException authenticating the connection", e);
} finally {
try {
saslClient.dispose();
} catch (SaslException e) {
// ignore
}
}
}
protected abstract SaslClient createSaslClient();
protected abstract DB getDatabase();
private CommandResult sendSaslStart(final byte[] outToken) throws IOException {
DBObject cmd = new BasicDBObject("saslStart", 1).
append("mechanism", getMechanismName())
.append("payload", outToken != null ? outToken : new byte[0]);
return runCommand(getDatabase(), cmd);
}
private CommandResult sendSaslContinue(final int conversationId, final byte[] outToken) throws IOException {
DB adminDB = getDatabase();
DBObject cmd = new BasicDBObject("saslContinue", 1).append("conversationId", conversationId).
append("payload", outToken);
return runCommand(adminDB, cmd);
}
public abstract String getMechanismName();
}
class NativeAuthenticator extends Authenticator {
NativeAuthenticator(Mongo mongo, MongoCredential credentials) {
super(mongo, credentials);
}
@Override
public CommandResult authenticate() {
try {
DB db = mongo.getDB(credential.getSource());
CommandResult res = runCommand(db, NativeAuthenticationHelper.getNonceCommand());
res.throwOnError();
res = runCommand(db, NativeAuthenticationHelper.getAuthCommand(credential.getUserName(),
credential.getPassword(), res.getString("nonce")));
res.throwOnError();
return res;
} catch (IOException e) {
throw new MongoException.Network("IOException authenticating the connection", e);
}
}
}
abstract class Authenticator {
protected final Mongo mongo;
protected final MongoCredential credential;
Authenticator(Mongo mongo, MongoCredential credential) {
this.mongo = mongo;
this.credential = credential;
}
abstract CommandResult authenticate();
}
} }

View File

@ -18,7 +18,12 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import java.lang.management.ManagementFactory; import com.massivecraft.mcore.xlib.mongodb.util.ConnectionPoolStatisticsBean;
import com.massivecraft.mcore.xlib.mongodb.util.SimplePool;
import com.massivecraft.mcore.xlib.mongodb.util.management.JMException;
import com.massivecraft.mcore.xlib.mongodb.util.management.MBeanServerFactory;
import java.io.InterruptedIOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
@ -28,29 +33,37 @@ import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level; import java.util.logging.Level;
import javax.management.JMException; /**
import javax.management.MBeanServer; * This class is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases.
import javax.management.MalformedObjectNameException; */
import javax.management.ObjectName;
import com.massivecraft.mcore.xlib.mongodb.util.SimplePool;
public class DBPortPool extends SimplePool<DBPort> { public class DBPortPool extends SimplePool<DBPort> {
public String getHost() {
return _addr.getHost();
}
public int getPort() {
return _addr.getPort();
}
public synchronized ConnectionPoolStatisticsBean getStatistics() {
return new ConnectionPoolStatisticsBean(getTotal(), getInUse(), getInUseConnections());
}
private InUseConnectionBean[] getInUseConnections() {
List<InUseConnectionBean> inUseConnectionInfoList = new ArrayList<InUseConnectionBean>();
long currentNanoTime = System.nanoTime();
for (DBPort port : _out) {
inUseConnectionInfoList.add(new InUseConnectionBean(port, currentNanoTime));
}
return inUseConnectionInfoList.toArray(new InUseConnectionBean[inUseConnectionInfoList.size()]);
}
static class Holder { static class Holder {
Holder( MongoOptions options ){ Holder( MongoOptions options ){
_options = options; _options = options;
{
MBeanServer temp = null;
try {
temp = ManagementFactory.getPlatformMBeanServer();
}
catch ( Throwable t ){
}
_server = temp;
}
} }
DBPortPool get( ServerAddress addr ){ DBPortPool get( ServerAddress addr ){
@ -66,40 +79,46 @@ public class DBPortPool extends SimplePool<DBPort> {
return p; return p;
} }
p = new DBPortPool( addr , _options ); p = createPool(addr);
_pools.put( addr , p); _pools.put( addr , p);
if ( _server != null ){ try {
try { String on = createObjectName(addr);
ObjectName on = createObjectName( addr ); if (MBeanServerFactory.getMBeanServer().isRegistered(on)) {
if ( _server.isRegistered( on ) ){ MBeanServerFactory.getMBeanServer().unregisterMBean(on);
_server.unregisterMBean( on ); Bytes.LOGGER.log(Level.INFO, "multiple Mongo instances for same host, jmx numbers might be off");
Bytes.LOGGER.log( Level.INFO , "multiple Mongo instances for same host, jmx numbers might be off" );
}
_server.registerMBean( p , on );
}
catch ( JMException e ){
Bytes.LOGGER.log( Level.WARNING , "jmx registration error: " + e + " continuing..." );
}
catch ( java.security.AccessControlException e ){
Bytes.LOGGER.log( Level.WARNING , "jmx registration error: " + e + " continuing..." );
} }
MBeanServerFactory.getMBeanServer().registerMBean(p, on);
} catch (JMException e) {
Bytes.LOGGER.log(Level.WARNING, "JMX registration error: " + e +
"\nConsider setting com.mongodb.MongoOptions.alwaysUseMBeans property to true." +
"\nContinuing...");
} catch (java.security.AccessControlException e) {
Bytes.LOGGER.log(Level.WARNING, "JMX registration error: " + e +
"\nContinuing...");
} }
} }
return p; return p;
} }
private DBPortPool createPool(final ServerAddress addr) {
if (isJava5 || _options.isAlwaysUseMBeans()) {
return new Java5MongoConnectionPool(addr, _options);
} else {
return new MongoConnectionPool(addr, _options);
}
}
void close(){ void close(){
synchronized ( _pools ){ synchronized ( _pools ){
for ( DBPortPool p : _pools.values() ){ for ( DBPortPool p : _pools.values() ){
p.close(); p.close();
try { try {
ObjectName on = createObjectName( p._addr ); String on = createObjectName( p._addr );
if ( _server.isRegistered( on ) ){ if ( MBeanServerFactory.getMBeanServer().isRegistered(on) ){
_server.unregisterMBean( on ); MBeanServerFactory.getMBeanServer().unregisterMBean(on);
} }
} catch ( JMException e ){ } catch ( JMException e ){
Bytes.LOGGER.log( Level.WARNING , "jmx de-registration error, continuing" , e ); Bytes.LOGGER.log( Level.WARNING , "jmx de-registration error, continuing" , e );
@ -108,20 +127,24 @@ public class DBPortPool extends SimplePool<DBPort> {
} }
} }
private ObjectName createObjectName( ServerAddress addr ) throws MalformedObjectNameException { private String createObjectName( ServerAddress addr ) {
String name = "com.mongodb:type=ConnectionPool,host=" + addr.toString().replace( ":" , ",port=" ) + ",instance=" + _serial; String name = "com.mongodb:type=ConnectionPool,host=" + addr.toString().replace( ":" , ",port=" ) + ",instance=" + _serial;
if ( _options.description != null ) if ( _options.description != null )
name += ",description=" + _options.description; name += ",description=" + _options.description;
return new ObjectName( name ); return name;
}
static {
isJava5 = System.getProperty("java.version").startsWith("1.5");
} }
final MongoOptions _options; final MongoOptions _options;
final Map<ServerAddress,DBPortPool> _pools = Collections.synchronizedMap( new HashMap<ServerAddress,DBPortPool>() ); final Map<ServerAddress,DBPortPool> _pools = Collections.synchronizedMap( new HashMap<ServerAddress,DBPortPool>() );
final MBeanServer _server;
final int _serial = nextSerial.incrementAndGet(); final int _serial = nextSerial.incrementAndGet();
// we use this to give each Holder a different mbean name // we use this to give each Holder a different mbean name
static AtomicInteger nextSerial = new AtomicInteger(0); static AtomicInteger nextSerial = new AtomicInteger(0);
static final boolean isJava5;
} }
// ---- // ----
@ -136,8 +159,13 @@ public class DBPortPool extends SimplePool<DBPort> {
public static class SemaphoresOut extends NoMoreConnection { public static class SemaphoresOut extends NoMoreConnection {
private static final long serialVersionUID = -4415279469780082174L; private static final long serialVersionUID = -4415279469780082174L;
private static final String message = "Concurrent requests for database connection have exceeded limit";
SemaphoresOut(){ SemaphoresOut(){
super( "Out of semaphores to get db connection" ); super( message );
}
SemaphoresOut(int numPermits){
super( message + " of " + numPermits);
} }
} }
@ -151,7 +179,7 @@ public class DBPortPool extends SimplePool<DBPort> {
// ---- // ----
DBPortPool( ServerAddress addr , MongoOptions options ){ DBPortPool( ServerAddress addr , MongoOptions options ){
super( "DBPortPool-" + addr.toString() + ", options = " + options.toString() , options.connectionsPerHost , options.connectionsPerHost ); super( "DBPortPool-" + addr.toString() + ", options = " + options.toString() , options.connectionsPerHost );
_options = options; _options = options;
_addr = addr; _addr = addr;
_waitingSem = new Semaphore( _options.connectionsPerHost * _options.threadsAllowedToBlockForConnectionMultiplier ); _waitingSem = new Semaphore( _options.connectionsPerHost * _options.threadsAllowedToBlockForConnectionMultiplier );
@ -161,50 +189,53 @@ public class DBPortPool extends SimplePool<DBPort> {
return 0; return 0;
} }
protected int pick( int iThink , boolean couldCreate ){ @Override
final int id = System.identityHashCode(Thread.currentThread()); protected int pick( int recommended, boolean couldCreate ){
final int s = _availSafe.size(); int id = System.identityHashCode(Thread.currentThread());
for ( int i=0; i<s; i++ ){ for (int i = _avail.size() - 1; i >= 0; i--){
DBPort p = _availSafe.get(i); if ( _avail.get(i)._lastThread == id )
if ( p._lastThread == id )
return i; return i;
} }
if ( couldCreate ) return couldCreate ? -1 : recommended;
return -1;
return iThink;
} }
public DBPort get(){ /**
* @return
* @throws MongoException
*/
@Override
public DBPort get() {
DBPort port = null; DBPort port = null;
if ( ! _waitingSem.tryAcquire() ) if ( ! _waitingSem.tryAcquire() )
throw new SemaphoresOut(); throw new SemaphoresOut(_options.connectionsPerHost * _options.threadsAllowedToBlockForConnectionMultiplier);
try { try {
port = get( _options.maxWaitTime ); port = get( _options.maxWaitTime );
} } catch (InterruptedException e) {
finally { throw new MongoInterruptedException(e);
} finally {
_waitingSem.release(); _waitingSem.release();
} }
if ( port == null ) if ( port == null )
throw new ConnectionWaitTimeOut( _options.maxWaitTime ); throw new ConnectionWaitTimeOut( _options.maxWaitTime );
port._lastThread = System.identityHashCode(Thread.currentThread()); port._lastThread = System.identityHashCode(Thread.currentThread());
return port; return port;
} }
void gotError( Exception e ){ // return true if the exception is recoverable
if ( e instanceof java.nio.channels.ClosedByInterruptException || boolean gotError( Exception e ){
e instanceof InterruptedException ){ if (e instanceof java.nio.channels.ClosedByInterruptException){
// this is probably a request that is taking too long // this is probably a request that is taking too long
// so usually doesn't mean there is a real db problem // so usually doesn't mean there is a real db problem
return; return true;
} }
if ( e instanceof java.net.SocketTimeoutException ){ if ( e instanceof InterruptedIOException){
// we don't want to clear the port pool for a connection timing out // we don't want to clear the port pool for a connection timing out or interrupted
return; return true;
} }
Bytes.LOGGER.log( Level.WARNING , "emptying DBPortPool to " + getServerAddress() + " b/c of error" , e ); Bytes.LOGGER.log( Level.WARNING , "emptying DBPortPool to " + getServerAddress() + " b/c of error" , e );
@ -212,10 +243,14 @@ public class DBPortPool extends SimplePool<DBPort> {
List<DBPort> all = new ArrayList<DBPort>(); List<DBPort> all = new ArrayList<DBPort>();
while ( true ){ while ( true ){
DBPort temp = get(0); try {
if ( temp == null ) DBPort temp = get(0);
break; if ( temp == null )
all.add( temp ); break;
all.add( temp );
} catch (InterruptedException interruptedException) {
throw new MongoInterruptedException(interruptedException);
}
} }
for ( DBPort p : all ){ for ( DBPort p : all ){
@ -223,20 +258,15 @@ public class DBPortPool extends SimplePool<DBPort> {
done(p); done(p);
} }
return false;
} }
void close(){ @Override
clear();
}
public void cleanup( DBPort p ){ public void cleanup( DBPort p ){
p.close(); p.close();
} }
public boolean ok( DBPort t ){ @Override
return _addr.getSocketAddress().equals( t._addr );
}
protected DBPort createNew(){ protected DBPort createNew(){
return new DBPort( _addr , this , _options ); return new DBPort( _addr , this , _options );
} }

View File

@ -52,6 +52,7 @@ public class DBRef extends DBRefBase {
* @param db the database * @param db the database
* @param ref the reference * @param ref the reference
* @return * @return
* @throws MongoException
*/ */
public static DBObject fetch(DB db, DBObject ref) { public static DBObject fetch(DB db, DBObject ref) {
String ns; String ns;

View File

@ -39,8 +39,9 @@ public class DBRefBase {
/** /**
* fetches the object referenced from the database * fetches the object referenced from the database
* @return * @return
* @throws MongoException
*/ */
public DBObject fetch() { public DBObject fetch() throws MongoException {
if (_loadedPointedTo) if (_loadedPointedTo)
return _pointedTo; return _pointedTo;
@ -84,16 +85,23 @@ public class DBRefBase {
} }
@Override @Override
public boolean equals(Object obj) { public boolean equals(final Object o) {
if (obj == this) if (this == o) return true;
return true; if (o == null || getClass() != o.getClass()) return false;
if (obj instanceof DBRefBase) { final DBRefBase dbRefBase = (DBRefBase) o;
DBRefBase ref = (DBRefBase) obj;
if (_ns.equals(ref.getRef()) && _id.equals(ref.getId())) if (_id != null ? !_id.equals(dbRefBase._id) : dbRefBase._id != null) return false;
return true; if (_ns != null ? !_ns.equals(dbRefBase._ns) : dbRefBase._ns != null) return false;
}
return false; return true;
}
@Override
public int hashCode() {
int result = _id != null ? _id.hashCode() : 0;
result = 31 * result + (_ns != null ? _ns.hashCode() : 0);
return result;
} }
final Object _id; final Object _id;

View File

@ -18,71 +18,39 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.mongodb.ReadPreference.TaggedReadPreference;
import java.io.IOException; import java.io.IOException;
import java.net.SocketTimeoutException; import java.net.SocketTimeoutException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level; import java.util.logging.Level;
import java.util.logging.Logger; import java.util.logging.Logger;
public class DBTCPConnector implements DBConnector { public class DBTCPConnector implements DBConnector {
static Logger _logger = Logger.getLogger( Bytes.LOGGER.getName() + ".tcp" ); static Logger _logger = Logger.getLogger( Bytes.LOGGER.getName() + ".tcp" );
static Logger _createLogger = Logger.getLogger( _logger.getName() + ".connect" );
public DBTCPConnector( Mongo m , ServerAddress addr ) /**
throws MongoException { * @param mongo the Mongo instance
_mongo = m; * @throws MongoException
_portHolder = new DBPortPool.Holder( m._options ); */
_checkAddress( addr ); public DBTCPConnector( Mongo mongo ) {
_mongo = mongo;
_createLogger.info( addr.toString() ); _portHolder = new DBPortPool.Holder( mongo._options );
MongoAuthority.Type type = mongo.getAuthority().getType();
setMasterAddress(addr); if (type == MongoAuthority.Type.Direct) {
_allHosts = null; setMasterAddress(mongo.getAuthority().getServerAddresses().get(0));
_rsStatus = null; } else if (type == MongoAuthority.Type.Set) {
_connectionStatus = new DynamicConnectionStatus(mongo, mongo.getAuthority().getServerAddresses());
} } else {
throw new IllegalArgumentException("Unsupported authority type: " + type);
public DBTCPConnector( Mongo m , ServerAddress ... all ) }
throws MongoException {
this( m , Arrays.asList( all ) );
}
public DBTCPConnector( Mongo m , List<ServerAddress> all )
throws MongoException {
_mongo = m;
_portHolder = new DBPortPool.Holder( m._options );
_checkAddress( all );
_allHosts = new ArrayList<ServerAddress>( all ); // make a copy so it can't be modified
_rsStatus = new ReplicaSetStatus( m, _allHosts );
_createLogger.info( all + " -> " + getAddress() );
} }
public void start() { public void start() {
if (_rsStatus != null) if (_connectionStatus != null) {
_rsStatus.start(); _connectionStatus.start();
} }
private static ServerAddress _checkAddress( ServerAddress addr ){
if ( addr == null )
throw new NullPointerException( "address can't be null" );
return addr;
}
private static ServerAddress _checkAddress( List<ServerAddress> addrs ){
if ( addrs == null )
throw new NullPointerException( "addresses can't be null" );
if ( addrs.size() == 0 )
throw new IllegalArgumentException( "need to specify at least 1 address" );
return addrs.get(0);
} }
/** /**
@ -98,7 +66,7 @@ public class DBTCPConnector implements DBConnector {
*/ */
@Override @Override
public void requestStart(){ public void requestStart(){
_myPort.get().requestStart(); _myPort.requestStart();
} }
/** /**
@ -110,12 +78,16 @@ public class DBTCPConnector implements DBConnector {
*/ */
@Override @Override
public void requestDone(){ public void requestDone(){
_myPort.get().requestDone(); _myPort.requestDone();
} }
/**
* @throws MongoException
*/
@Override @Override
public void requestEnsureConnection(){ public void requestEnsureConnection(){
_myPort.get().requestEnsureConnection(); checkMaster( false , true );
_myPort.requestEnsureConnection();
} }
void _checkClosed(){ void _checkClosed(){
@ -124,31 +96,47 @@ public class DBTCPConnector implements DBConnector {
} }
WriteResult _checkWriteError( DB db, DBPort port , WriteConcern concern ) WriteResult _checkWriteError( DB db, DBPort port , WriteConcern concern )
throws MongoException, IOException { throws IOException{
CommandResult e = port.runCommand( db , concern.getCommand() ); CommandResult e = port.runCommand( db , concern.getCommand() );
e.throwOnError(); e.throwOnError();
return new WriteResult( e , concern ); return new WriteResult( e , concern );
} }
/**
* @param db
* @param m
* @param concern
* @return
* @throws MongoException
*/
@Override @Override
public WriteResult say( DB db , OutMessage m , WriteConcern concern ) public WriteResult say( DB db , OutMessage m , WriteConcern concern ){
throws MongoException {
return say( db , m , concern , null ); return say( db , m , concern , null );
} }
/**
* @param db
* @param m
* @param concern
* @param hostNeeded
* @return
* @throws MongoException
*/
@Override @Override
public WriteResult say( DB db , OutMessage m , WriteConcern concern , ServerAddress hostNeeded ) public WriteResult say( DB db , OutMessage m , WriteConcern concern , ServerAddress hostNeeded ){
throws MongoException {
if (concern == null) {
throw new IllegalArgumentException("Write concern is null");
}
_checkClosed(); _checkClosed();
checkMaster( false , true ); checkMaster( false , true );
MyPort mp = _myPort.get(); DBPort port = _myPort.get(true, ReadPreference.primary(), hostNeeded);
DBPort port = mp.get( true , ReadPreference.PRIMARY, hostNeeded );
try { try {
port.checkAuth( db ); port.checkAuth( db.getMongo() );
port.say( m ); port.say( m );
if ( concern.callGetLastError() ){ if ( concern.callGetLastError() ){
return _checkWriteError( db , port , concern ); return _checkWriteError( db , port , concern );
@ -158,11 +146,11 @@ public class DBTCPConnector implements DBConnector {
} }
} }
catch ( IOException ioe ){ catch ( IOException ioe ){
mp.error( port , ioe ); _myPort.error(port, ioe);
_error( ioe, false ); _error( ioe, false );
if ( concern.raiseNetworkErrors() ) if ( concern.raiseNetworkErrors() )
throw new MongoException.Network( "can't say something" , ioe ); throw new MongoException.Network("Write operation to server " + port.host() + " failed on database " + db , ioe );
CommandResult res = new CommandResult(port.serverAddress()); CommandResult res = new CommandResult(port.serverAddress());
res.put( "ok" , false ); res.put( "ok" , false );
@ -173,69 +161,107 @@ public class DBTCPConnector implements DBConnector {
throw me; throw me;
} }
catch ( RuntimeException re ){ catch ( RuntimeException re ){
mp.error( port , re ); _myPort.error(port, re);
throw re; throw re;
} }
finally { finally {
mp.done( port ); _myPort.done(port);
m.doneWithMessage(); m.doneWithMessage();
} }
} }
/**
* @param db
* @param coll
* @param m
* @param hostNeeded
* @param decoder
* @return
* @throws MongoException
*/
@Override @Override
public Response call( DB db , DBCollection coll , OutMessage m, ServerAddress hostNeeded, DBDecoder decoder ) public Response call( DB db , DBCollection coll , OutMessage m, ServerAddress hostNeeded, DBDecoder decoder ){
throws MongoException {
return call( db , coll , m , hostNeeded , 2, null, decoder ); return call( db , coll , m , hostNeeded , 2, null, decoder );
} }
/**
public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries ) throws MongoException { * @param db
* @param coll
* @param m
* @param hostNeeded
* @param retries
* @return
* @throws MongoException
*/
@Override
public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries ){
return call( db, coll, m, hostNeeded, retries, null, null); return call( db, coll, m, hostNeeded, retries, null, null);
} }
/**
* @param db
* @param coll
* @param m
* @param hostNeeded
* @param readPref
* @param decoder
* @return
* @throws MongoException
*/
@Override @Override
public Response call( DB db, DBCollection coll, OutMessage m, ServerAddress hostNeeded, int retries, ReadPreference readPref, DBDecoder decoder ) throws MongoException{ public Response call( DB db, DBCollection coll, OutMessage m, ServerAddress hostNeeded, int retries,
ReadPreference readPref, DBDecoder decoder ){
try {
return innerCall(db, coll, m, hostNeeded, retries, readPref, decoder);
} finally {
m.doneWithMessage();
}
}
// This method is recursive. It calls itself to implement query retry logic.
private Response innerCall(final DB db, final DBCollection coll, final OutMessage m, final ServerAddress hostNeeded,
final int retries, ReadPreference readPref, final DBDecoder decoder) {
if (readPref == null) if (readPref == null)
readPref = ReadPreference.PRIMARY; readPref = ReadPreference.primary();
if (readPref == ReadPreference.PRIMARY && m.hasOption( Bytes.QUERYOPTION_SLAVEOK )) if (readPref == ReadPreference.primary() && m.hasOption( Bytes.QUERYOPTION_SLAVEOK ))
readPref = ReadPreference.SECONDARY; readPref = ReadPreference.secondaryPreferred();
boolean secondaryOk = !(readPref == ReadPreference.PRIMARY); boolean secondaryOk = !(readPref == ReadPreference.primary());
_checkClosed(); _checkClosed();
checkMaster( false, !secondaryOk ); // Don't check master on secondary reads unless connected to a replica set
if (!secondaryOk || getReplicaSetStatus() == null)
checkMaster( false, !secondaryOk );
final MyPort mp = _myPort.get(); final DBPort port = _myPort.get(false, readPref, hostNeeded);
final DBPort port = mp.get( false , readPref, hostNeeded );
Response res = null; Response res = null;
boolean retry = false; boolean retry = false;
try { try {
port.checkAuth( db ); port.checkAuth( db.getMongo() );
res = port.call( m , coll, readPref, decoder ); res = port.call( m , coll, decoder );
if ( res._responseTo != m.getId() ) if ( res._responseTo != m.getId() )
throw new MongoException( "ids don't match" ); throw new MongoException( "ids don't match" );
} }
catch ( IOException ioe ){ catch ( IOException ioe ){
mp.error( port , ioe ); _myPort.error(port, ioe);
retry = retries > 0 && !coll._name.equals( "$cmd" ) retry = retries > 0 && !coll._name.equals( "$cmd" )
&& !(ioe instanceof SocketTimeoutException) && _error( ioe, secondaryOk ); && !(ioe instanceof SocketTimeoutException) && _error( ioe, secondaryOk );
if ( !retry ){ if ( !retry ){
throw new MongoException.Network( "can't call something : " + port.host() + "/" + db, throw new MongoException.Network("Read operation to server " + port.host() + " failed on database " + db , ioe );
ioe );
} }
} }
catch ( RuntimeException re ){ catch ( RuntimeException re ){
mp.error( port , re ); _myPort.error(port, re);
throw re; throw re;
} finally { } finally {
mp.done( port ); _myPort.done(port);
} }
if (retry) if (retry)
return call( db , coll , m , hostNeeded , retries - 1 , readPref, decoder ); return innerCall( db , coll , m , hostNeeded , retries - 1 , readPref, decoder );
ServerError err = res.getError(); ServerError err = res.getError();
@ -244,10 +270,9 @@ public class DBTCPConnector implements DBConnector {
if ( retries <= 0 ){ if ( retries <= 0 ){
throw new MongoException( "not talking to master and retries used up" ); throw new MongoException( "not talking to master and retries used up" );
} }
return call( db , coll , m , hostNeeded , retries -1, readPref, decoder ); return innerCall( db , coll , m , hostNeeded , retries -1, readPref, decoder );
} }
m.doneWithMessage();
return res; return res;
} }
@ -261,17 +286,18 @@ public class DBTCPConnector implements DBConnector {
* @return * @return
*/ */
public List<ServerAddress> getAllAddress() { public List<ServerAddress> getAllAddress() {
return _allHosts; return _mongo._authority.getServerAddresses();
} }
/** /**
* Gets the list of server addresses currently seen by the connector. * Gets the list of server addresses currently seen by the connector.
* This includes addresses auto-discovered from a replica set. * This includes addresses auto-discovered from a replica set.
* @return * @return
* @throws MongoException
*/ */
public List<ServerAddress> getServerAddressList() { public List<ServerAddress> getServerAddressList() {
if (_rsStatus != null) { if (_connectionStatus != null) {
return _rsStatus.getServerAddressList(); return _connectionStatus.getServerAddressList();
} }
ServerAddress master = getAddress(); ServerAddress master = getAddress();
@ -285,7 +311,30 @@ public class DBTCPConnector implements DBConnector {
} }
public ReplicaSetStatus getReplicaSetStatus() { public ReplicaSetStatus getReplicaSetStatus() {
return _rsStatus; if (_connectionStatus instanceof ReplicaSetStatus) {
return (ReplicaSetStatus) _connectionStatus;
} else if (_connectionStatus instanceof DynamicConnectionStatus) {
return ((DynamicConnectionStatus) _connectionStatus).asReplicaSetStatus();
} else {
return null;
}
}
// This call can block if it's not yet known.
// Be careful when modifying this method, as this method is using the fact that _isMongosDirectConnection
// is of type Boolean and is null when uninitialized.
boolean isMongosConnection() {
if (_connectionStatus instanceof MongosStatus) {
return true;
} else if (_connectionStatus instanceof DynamicConnectionStatus) {
return ((DynamicConnectionStatus) _connectionStatus).asMongosStatus() != null;
}
if (_isMongosDirectConnection == null) {
initDirectConnection();
}
return _isMongosDirectConnection != null ? _isMongosDirectConnection : false;
} }
public String getConnectPoint(){ public String getConnectPoint(){
@ -301,9 +350,8 @@ public class DBTCPConnector implements DBConnector {
* @return true if the request should be retried, false otherwise * @return true if the request should be retried, false otherwise
* @throws MongoException * @throws MongoException
*/ */
boolean _error( Throwable t, boolean secondaryOk ) boolean _error( Throwable t, boolean secondaryOk ){
throws MongoException { if (_connectionStatus == null) {
if (_rsStatus == null) {
// single server, no need to retry // single server, no need to retry
return false; return false;
} }
@ -311,127 +359,145 @@ public class DBTCPConnector implements DBConnector {
// the replset has at least 1 server up, try to see if should switch master // the replset has at least 1 server up, try to see if should switch master
// if no server is up, we wont retry until the updater thread finds one // if no server is up, we wont retry until the updater thread finds one
// this is to cut down the volume of requests/errors when all servers are down // this is to cut down the volume of requests/errors when all servers are down
if ( _rsStatus.hasServerUp() ){ if ( _connectionStatus.hasServerUp() ){
checkMaster( true , !secondaryOk ); checkMaster( true , !secondaryOk );
} }
return _rsStatus.hasServerUp(); return _connectionStatus.hasServerUp();
} }
class MyPort { class MyPort {
DBPort get( boolean keep , ReadPreference readPref, ServerAddress hostNeeded ){ DBPort get( boolean keep , ReadPreference readPref, ServerAddress hostNeeded ){
if ( hostNeeded != null ){ DBPort pinnedRequestPort = getPinnedRequestPortForThread();
if (_requestPort != null && _requestPort.serverAddress().equals(hostNeeded)) {
return _requestPort; if ( hostNeeded != null ) {
if (pinnedRequestPort != null && pinnedRequestPort.serverAddress().equals(hostNeeded)) {
return pinnedRequestPort;
} }
// asked for a specific host // asked for a specific host
return _portHolder.get( hostNeeded ).get(); return _portHolder.get( hostNeeded ).get();
} }
if ( _requestPort != null ){ if ( pinnedRequestPort != null ){
// we are within a request, and have a port, should stick to it // we are within a request, and have a port, should stick to it
if ( _requestPort.getPool() == _masterPortPool || !keep ) { if ( pinnedRequestPort.getPool() == _masterPortPool || !keep ) {
// if keep is false, it's a read, so we use port even if master changed // if keep is false, it's a read, so we use port even if master changed
return _requestPort; return pinnedRequestPort;
} }
// it's write and master has changed // it's write and master has changed
// we fall back on new master and try to go on with request // we fall back on new master and try to go on with request
// this may not be best behavior if spec of request is to stick with same server // this may not be best behavior if spec of request is to stick with same server
_requestPort.getPool().done(_requestPort); pinnedRequestPort.getPool().done(pinnedRequestPort);
_requestPort = null; setPinnedRequestPortForThread(null);
} }
if ( !(readPref == ReadPreference.PRIMARY) && _rsStatus != null ){ DBPort port;
// if not a primary read set, try to use a secondary if (getReplicaSetStatus() == null){
// Do they want a Secondary, or a specific tag set? if (_masterPortPool == null) {
if (readPref == ReadPreference.SECONDARY) { // this should only happen in rare case that no master was ever found
ServerAddress slave = _rsStatus.getASecondary(); // may get here at startup if it's a read, slaveOk=true, and ALL servers are down
if ( slave != null ){ throw new MongoException("Rare case where master=null, probably all servers are down");
return _portHolder.get( slave ).get();
}
} else if (readPref instanceof ReadPreference.TaggedReadPreference) {
// Tag based read
ServerAddress secondary = _rsStatus.getASecondary( ( (TaggedReadPreference) readPref ).getTags() );
if (secondary != null)
return _portHolder.get( secondary ).get();
else
throw new MongoException( "Could not find any valid secondaries with the supplied tags ('" +
( (TaggedReadPreference) readPref ).getTags() + "'");
} }
port = _masterPortPool.get();
}
else {
ReplicaSetStatus.ReplicaSet replicaSet = getReplicaSetStatus()._replicaSetHolder.get();
ConnectionStatus.Node node = readPref.getNode(replicaSet);
if (node == null)
throw new MongoException("No replica set members available in " + replicaSet + " for " + readPref.toDBObject().toString());
port = _portHolder.get(node.getServerAddress()).get();
} }
if (_masterPortPool == null) { // if within request, remember port to stick to same server
// this should only happen in rare case that no master was ever found if (threadHasPinnedRequest()) {
// may get here at startup if it's a read, slaveOk=true, and ALL servers are down setPinnedRequestPortForThread(port);
throw new MongoException("Rare case where master=null, probably all servers are down");
} }
// use master return port;
DBPort p = _masterPortPool.get();
if ( _inRequest ) {
// if within request, remember port to stick to same server
_requestPort = p;
}
return p;
} }
void done( DBPort p ){ void done( DBPort port ) {
DBPort requestPort = getPinnedRequestPortForThread();
// keep request port // keep request port
if ( p != _requestPort ){ if (port != requestPort) {
p.getPool().done(p); port.getPool().done(port);
} }
} }
/** /**
* call this method when there is an IOException or other low level error on port. * call this method when there is an IOException or other low level error on port.
* @param p * @param port
* @param e * @param e
*/ */
void error( DBPort p , Exception e ){ void error( DBPort port , Exception e ){
p.close(); port.close();
_requestPort = null; pinnedRequestStatusThreadLocal.remove();
// _logger.log( Level.SEVERE , "MyPort.error called" , e );
// depending on type of error, may need to close other connections in pool // depending on type of error, may need to close other connections in pool
p.getPool().gotError(e); boolean recoverable = port.getPool().gotError(e);
if (!recoverable && _connectionStatus != null && _masterPortPool._addr.equals(port.serverAddress())) {
ConnectionStatus.Node newMaster = _connectionStatus.ensureMaster();
if (newMaster != null) {
setMaster(newMaster);
}
}
} }
void requestEnsureConnection(){ void requestEnsureConnection(){
if ( ! _inRequest ) if ( !threadHasPinnedRequest() )
return; return;
if ( _requestPort != null ) if ( getPinnedRequestPortForThread() != null )
return; return;
_requestPort = _masterPortPool.get(); setPinnedRequestPortForThread(_masterPortPool.get());
} }
void requestStart(){ void requestStart(){
_inRequest = true; pinnedRequestStatusThreadLocal.set(new PinnedRequestStatus());
} }
void requestDone(){ void requestDone(){
if ( _requestPort != null ) DBPort requestPort = getPinnedRequestPortForThread();
_requestPort.getPool().done( _requestPort ); if ( requestPort != null )
_requestPort = null; requestPort.getPool().done( requestPort );
_inRequest = false; pinnedRequestStatusThreadLocal.remove();
} }
DBPort _requestPort; PinnedRequestStatus getPinnedRequestStatusForThread() {
// DBPortPool _requestPool; return pinnedRequestStatusThreadLocal.get();
boolean _inRequest; }
boolean threadHasPinnedRequest() {
return pinnedRequestStatusThreadLocal.get() != null;
}
DBPort getPinnedRequestPortForThread() {
return threadHasPinnedRequest() ? pinnedRequestStatusThreadLocal.get().requestPort : null;
}
void setPinnedRequestPortForThread(final DBPort port) {
pinnedRequestStatusThreadLocal.get().requestPort = port;
}
private final ThreadLocal<PinnedRequestStatus> pinnedRequestStatusThreadLocal = new ThreadLocal<PinnedRequestStatus>();
} }
void checkMaster( boolean force , boolean failIfNoMaster ) static class PinnedRequestStatus {
throws MongoException { DBPort requestPort;
}
if ( _rsStatus != null ){ void checkMaster( boolean force , boolean failIfNoMaster ){
if ( _connectionStatus != null ){
if ( _masterPortPool == null || force ){ if ( _masterPortPool == null || force ){
ReplicaSetStatus.Node master = _rsStatus.ensureMaster(); ConnectionStatus.Node master = _connectionStatus.ensureMaster();
if ( master == null ){ if ( master == null ){
if ( failIfNoMaster ) if ( failIfNoMaster )
throw new MongoException( "can't find a master" ); throw new MongoException( "can't find a master" );
@ -442,42 +508,43 @@ public class DBTCPConnector implements DBConnector {
} }
} else { } else {
// single server, may have to obtain max bson size // single server, may have to obtain max bson size
if (_maxBsonObjectSize.get() == 0) if (_maxBsonObjectSize == 0)
fetchMaxBsonObjectSize(); initDirectConnection();
} }
} }
synchronized void setMaster(ReplicaSetStatus.Node master) { synchronized void setMaster(ConnectionStatus.Node master) {
if (_closed.get()) { if (_closed.get()) {
return; return;
} }
setMasterAddress(master.getServerAddress()); setMasterAddress(master.getServerAddress());
_maxBsonObjectSize.set(master.getMaxBsonObjectSize()); _maxBsonObjectSize = master.getMaxBsonObjectSize();
} }
/** /**
* Fetches the maximum size for a BSON object from the current master server * Fetches the maximum size for a BSON object from the current master server
* @return the size, or 0 if it could not be obtained * @return the size, or 0 if it could not be obtained
*/ */
int fetchMaxBsonObjectSize() { void initDirectConnection() {
if (_masterPortPool == null) if (_masterPortPool == null)
return 0; return;
DBPort port = _masterPortPool.get(); DBPort port = _masterPortPool.get();
try { try {
CommandResult res = port.runCommand(_mongo.getDB("admin"), new BasicDBObject("isMaster", 1)); CommandResult res = port.runCommand(_mongo.getDB("admin"), new BasicDBObject("isMaster", 1));
// max size was added in 1.8 // max size was added in 1.8
if (res.containsField("maxBsonObjectSize")) { if (res.containsField("maxBsonObjectSize")) {
_maxBsonObjectSize.set(((Integer) res.get("maxBsonObjectSize")).intValue()); _maxBsonObjectSize = (Integer) res.get("maxBsonObjectSize");
} else { } else {
_maxBsonObjectSize.set(Bytes.MAX_OBJECT_SIZE); _maxBsonObjectSize = Bytes.MAX_OBJECT_SIZE;
} }
String msg = res.getString("msg");
_isMongosDirectConnection = msg != null && msg.equals("isdbgrid");
} catch (Exception e) { } catch (Exception e) {
_logger.log(Level.WARNING, "Exception determining maxBSONObjectSize ", e); _logger.log(Level.WARNING, "Exception executing isMaster command on " + port.serverAddress(), e);
} finally { } finally {
port.getPool().done(port); port.getPool().done(port);
} }
return _maxBsonObjectSize.get();
} }
@ -488,15 +555,15 @@ public class DBTCPConnector implements DBConnector {
return false; return false;
if ( _masterPortPool != null ) if ( _masterPortPool != null )
_logger.log(Level.WARNING, "Master switching from " + _masterPortPool.getServerAddress() + " to " + addr); _logger.log(Level.WARNING, "Primary switching from " + _masterPortPool.getServerAddress() + " to " + addr);
_masterPortPool = newPool; _masterPortPool = newPool;
return true; return true;
} }
public String debugString(){ public String debugString(){
StringBuilder buf = new StringBuilder( "DBTCPConnector: " ); StringBuilder buf = new StringBuilder( "DBTCPConnector: " );
if ( _rsStatus != null ) { if ( _connectionStatus != null ) {
buf.append( "replica set : " ).append( _allHosts ); buf.append( "set : " ).append( _mongo._authority.getServerAddresses() );
} else { } else {
ServerAddress master = getAddress(); ServerAddress master = getAddress();
buf.append( master ).append( " " ).append( master != null ? master.getSocketAddress() : null ); buf.append( master ).append( " " ).append( master != null ? master.getSocketAddress() : null );
@ -513,16 +580,12 @@ public class DBTCPConnector implements DBConnector {
_portHolder = null; _portHolder = null;
} catch (final Throwable t) { /* nada */ } } catch (final Throwable t) { /* nada */ }
} }
if ( _rsStatus != null ) { if ( _connectionStatus != null ) {
try { try {
_rsStatus.close(); _connectionStatus.close();
_rsStatus = null; _connectionStatus = null;
} catch (final Throwable t) { /* nada */ } } catch (final Throwable t) { /* nada */ }
} }
// below this will remove the myport for this thread only
// client using thread pool in web framework may need to call close() from all threads
_myPort.remove();
} }
/** /**
@ -549,33 +612,43 @@ public class DBTCPConnector implements DBConnector {
return ! _closed.get(); return ! _closed.get();
} }
@Override
public CommandResult authenticate(MongoCredential credentials) {
checkMaster(false, true);
final DBPort port = _myPort.get(false, ReadPreference.primaryPreferred(), null);
try {
CommandResult result = port.authenticate(_mongo, credentials);
_mongo.getAuthority().getCredentialsStore().add(credentials);
return result;
} finally {
_myPort.done(port);
}
}
/** /**
* Gets the maximum size for a BSON object supported by the current master server. * Gets the maximum size for a BSON object supported by the current master server.
* Note that this value may change over time depending on which server is master. * Note that this value may change over time depending on which server is master.
* @return the maximum size, or 0 if not obtained from servers yet. * @return the maximum size, or 0 if not obtained from servers yet.
*/ */
public int getMaxBsonObjectSize() { public int getMaxBsonObjectSize() {
return _maxBsonObjectSize.get(); return _maxBsonObjectSize;
} }
// expose for unit testing // expose for unit testing
MyPort getMyPort() { MyPort getMyPort() {
return _myPort.get(); return _myPort;
} }
private volatile DBPortPool _masterPortPool; private volatile DBPortPool _masterPortPool;
private final Mongo _mongo; private final Mongo _mongo;
private DBPortPool.Holder _portHolder; private DBPortPool.Holder _portHolder;
private final List<ServerAddress> _allHosts; private ConnectionStatus _connectionStatus;
private ReplicaSetStatus _rsStatus;
private final AtomicBoolean _closed = new AtomicBoolean(false); private final AtomicBoolean _closed = new AtomicBoolean(false);
private final AtomicInteger _maxBsonObjectSize = new AtomicInteger(0); private volatile int _maxBsonObjectSize;
private volatile Boolean _isMongosDirectConnection;
private ThreadLocal<MyPort> _myPort = new ThreadLocal<MyPort>(){
protected MyPort initialValue(){
return new MyPort();
}
};
MyPort _myPort = new MyPort();
} }

View File

@ -19,19 +19,21 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
// Bson // Bson
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import com.massivecraft.mcore.xlib.bson.BSONObject; import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.BasicBSONCallback; import com.massivecraft.mcore.xlib.bson.BasicBSONCallback;
import com.massivecraft.mcore.xlib.bson.types.ObjectId; import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import java.util.LinkedList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
/** /**
* This class overrides BasicBSONCallback to implement some extra features specific to the Database. * This class overrides BasicBSONCallback to implement some extra features specific to the Database.
* For example DBRef type. * For example DBRef type.
* @author antoine * @author antoine
*/ */
@SuppressWarnings({"rawtypes"})
public class DefaultDBCallback extends BasicBSONCallback implements DBCallback { public class DefaultDBCallback extends BasicBSONCallback implements DBCallback {
static class DefaultFactory implements DBCallbackFactory { static class DefaultFactory implements DBCallbackFactory {
@ -59,27 +61,41 @@ public class DefaultDBCallback extends BasicBSONCallback implements DBCallback {
@Override @Override
public void objectStart(boolean array, String name){ public void objectStart(boolean array, String name){
_lastName = name; _nameStack.addLast(name);
super.objectStart( array , name ); super.objectStart( array , name );
} }
@Override @Override
public Object objectDone(){ public Object objectDone(){
BSONObject o = (BSONObject)super.objectDone(); BSONObject o = (BSONObject)super.objectDone();
if ( ! ( o instanceof List ) && String lastName = null;
if ( _nameStack.size() > 0 ){
lastName = _nameStack.removeLast();
}
if ( ! ( o instanceof List ) && lastName != null &&
o.containsField( "$ref" ) && o.containsField( "$ref" ) &&
o.containsField( "$id" ) ){ o.containsField( "$id" ) ){
return cur().put( _lastName , new DBRef( _db, o ) ); return cur().put(lastName, new DBRef( _db, o ) );
} }
return o; return o;
} }
/**
* @return
* @throws MongoException
*/
@Override @Override
public BSONObject create(){ public BSONObject create(){
return _create( null ); return _create( null );
} }
/**
* @param array
* @param path
* @return
* @throws MongoException
*/
@Override @Override
public BSONObject create( boolean array , List<String> path ){ public BSONObject create( boolean array , List<String> path ){
if ( array ) if ( array )
@ -87,8 +103,7 @@ public class DefaultDBCallback extends BasicBSONCallback implements DBCallback {
return _create( path ); return _create( path );
} }
@SuppressWarnings("rawtypes") private DBObject _create( List<String> path ){
private DBObject _create( List<String> path ){
Class c = null; Class c = null;
@ -131,11 +146,11 @@ public class DefaultDBCallback extends BasicBSONCallback implements DBCallback {
@Override @Override
public void reset(){ public void reset(){
_lastName = null; _nameStack = new LinkedList<String>();
super.reset(); super.reset();
} }
private String _lastName; private LinkedList<String> _nameStack;
final DBCollection _collection; final DBCollection _collection;
final DB _db; final DB _db;
static final Logger LOGGER = Logger.getLogger( "com.mongo.DECODING" ); static final Logger LOGGER = Logger.getLogger( "com.mongo.DECODING" );

View File

@ -15,11 +15,11 @@
*/ */
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.BasicBSONDecoder;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import com.massivecraft.mcore.xlib.bson.BasicBSONDecoder;
/** /**
* *
* @author antoine * @author antoine
@ -31,6 +31,11 @@ public class DefaultDBDecoder extends BasicBSONDecoder implements DBDecoder {
public DBDecoder create( ){ public DBDecoder create( ){
return new DefaultDBDecoder( ); return new DefaultDBDecoder( );
} }
@Override
public String toString() {
return "DefaultDBDecoder.DefaultFactory";
}
} }
public static DBDecoderFactory FACTORY = new DefaultFactory(); public static DBDecoderFactory FACTORY = new DefaultFactory();
@ -57,4 +62,8 @@ public class DefaultDBDecoder extends BasicBSONDecoder implements DBDecoder {
return (DBObject) cbk.get(); return (DBObject) cbk.get();
} }
@Override
public String toString() {
return "DefaultDBDecoder";
}
} }

View File

@ -12,13 +12,12 @@
*/ */
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import static com.massivecraft.mcore.xlib.bson.BSON.EOO; import com.massivecraft.mcore.xlib.bson.BSONObject;
import static com.massivecraft.mcore.xlib.bson.BSON.OBJECT; import com.massivecraft.mcore.xlib.bson.BasicBSONEncoder;
import static com.massivecraft.mcore.xlib.bson.BSON.REF; import com.massivecraft.mcore.xlib.bson.io.OutputBuffer;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import com.massivecraft.mcore.xlib.bson.*; import static com.massivecraft.mcore.xlib.bson.BSON.*;
import com.massivecraft.mcore.xlib.bson.io.*;
import com.massivecraft.mcore.xlib.bson.types.*;
public class DefaultDBEncoder extends BasicBSONEncoder implements DBEncoder { public class DefaultDBEncoder extends BasicBSONEncoder implements DBEncoder {
@ -35,6 +34,12 @@ public class DefaultDBEncoder extends BasicBSONEncoder implements DBEncoder {
public DBEncoder create( ){ public DBEncoder create( ){
return new DefaultDBEncoder( ); return new DefaultDBEncoder( );
} }
@Override
public String toString() {
return "DefaultDBEncoder.DefaultFactory";
}
} }
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
@ -80,4 +85,9 @@ public class DefaultDBEncoder extends BasicBSONEncoder implements DBEncoder {
public DefaultDBEncoder( ){ public DefaultDBEncoder( ){
} }
@Override
public String toString() {
return "DefaultDBEncoder";
}
} }

View File

@ -0,0 +1,195 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Responsible for dynamically determining whether the list of server address represents a set of mongos server or
* a replica set. It starts threads that call the ismaster command on every server in the seed list, and as soon as it
* reaches one determines what type of server it is. It then creates the appropriate ConnectionStatus implementation
* and forwards all calls to it.
*/
class DynamicConnectionStatus extends ConnectionStatus {
private static final Logger logger = Logger.getLogger("com.mongodb.DynamicConnectionStatus");
DynamicConnectionStatus(Mongo mongo, List<ServerAddress> mongosAddresses) {
super(mongosAddresses, mongo);
}
@Override
void start() {
super.start();
executorService = Executors.newFixedThreadPool(_mongosAddresses.size());
initExecutorService();
}
@Override
void close() {
if (connectionStatus != null) {
connectionStatus.close();
}
if (executorService != null) {
executorService.shutdownNow();
}
super.close();
}
ReplicaSetStatus asReplicaSetStatus() {
ConnectionStatus connectionStatus = getConnectionStatus();
if (connectionStatus instanceof ReplicaSetStatus) {
return (ReplicaSetStatus) connectionStatus;
}
return null;
}
MongosStatus asMongosStatus() {
ConnectionStatus connectionStatus = getConnectionStatus();
if (connectionStatus instanceof MongosStatus) {
return (MongosStatus) connectionStatus;
}
return null;
}
@Override
List<ServerAddress> getServerAddressList() {
if (connectionStatus != null) {
return connectionStatus.getServerAddressList();
} else {
return new ArrayList<ServerAddress>(_mongosAddresses);
}
}
@Override
boolean hasServerUp() {
ConnectionStatus connectionStatus = getConnectionStatus();
if (connectionStatus != null) {
return connectionStatus.hasServerUp();
} else {
return false;
}
}
@Override
Node ensureMaster() {
ConnectionStatus connectionStatus = getConnectionStatus();
if (connectionStatus != null) {
return connectionStatus.ensureMaster();
} else {
return null;
}
}
void initExecutorService() {
try {
for (final ServerAddress cur : _mongosAddresses) {
executorService.submit(new Runnable() {
@Override
public void run() {
DynamicNode node = new DynamicNode(cur, _mongo, _mongoOptions);
try {
while (!Thread.interrupted()) {
try {
node.update();
if (node._ok) {
notifyOfOkNode(node);
return;
}
} catch (Exception e) {
logger.log(Level.WARNING, "couldn't reach " + node._addr, e);
}
int sleepTime = updaterIntervalNoMasterMS;
Thread.sleep(sleepTime);
}
} catch (InterruptedException e) {
// fall through
}
}
});
}
} catch (RejectedExecutionException e) {
// Ignore, as this can happen if a good node is found before all jobs are submitted and the service has
// been shutdown.
}
}
private void notifyOfOkNode(DynamicNode node) {
synchronized (this) {
if (connectionStatus != null) {
return;
}
if (node.isMongos) {
connectionStatus = new MongosStatus(_mongo, _mongosAddresses);
} else {
connectionStatus = new ReplicaSetStatus(_mongo, _mongosAddresses);
}
notifyAll();
}
connectionStatus.start();
executorService.shutdownNow();
}
static class DynamicNode extends UpdatableNode {
DynamicNode(final ServerAddress addr, Mongo mongo, MongoOptions mongoOptions) {
super(addr, mongo, mongoOptions);
}
@Override
protected Logger getLogger() {
return logger;
}
@Override
public CommandResult update() {
CommandResult res = super.update();
if (res != null) {
String msg = res.getString("msg");
if (msg != null && msg.equals("isdbgrid")) {
isMongos = true;
}
}
return res;
}
private boolean isMongos;
}
private synchronized ConnectionStatus getConnectionStatus() {
if (connectionStatus == null) {
try {
wait(_mongo.getMongoOptions().getConnectTimeout());
} catch (InterruptedException e) {
throw new MongoInterruptedException("Interrupted while waiting for next update to dynamic status", e);
}
}
return connectionStatus;
}
private volatile ConnectionStatus connectionStatus;
private ExecutorService executorService;
}

View File

@ -0,0 +1,81 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
import java.util.concurrent.TimeUnit;
/**
* This class is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases.
*/
public class InUseConnectionBean {
InUseConnectionBean(final DBPort port, long currentNanoTime) {
DBPort.ActiveState activeState = port.getActiveState();
if (activeState == null) {
durationMS = 0;
namespace = null;
opCode = null;
query = null;
threadName = null;
numDocuments = 0;
}
else {
durationMS = TimeUnit.NANOSECONDS.toMillis(currentNanoTime - activeState.startTime);
namespace = activeState.outMessage.getNamespace();
opCode = activeState.outMessage.getOpCode();
query = activeState.outMessage.getQuery() != null ? activeState.outMessage.getQuery().toString() : null;
threadName = activeState.threadName;
numDocuments = activeState.outMessage.getNumDocuments();
}
localPort = port.getLocalPort();
}
public String getNamespace() {
return namespace;
}
public OutMessage.OpCode getOpCode() {
return opCode;
}
public String getQuery() {
return query;
}
public int getLocalPort() {
return localPort;
}
public long getDurationMS() {
return durationMS;
}
public String getThreadName() {
return threadName;
}
public int getNumDocuments() {
return numDocuments;
}
private final String namespace;
private final OutMessage.OpCode opCode;
private final String query;
private final int localPort;
private final long durationMS;
private final String threadName;
private final int numDocuments;
}

View File

@ -0,0 +1,29 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* This class exists only so that on Java 5 the driver can create instances of a standard MBean,
* therefore keeping compatibility with the JMX implementation in the Java 5 JMX class libraries.
*/
class Java5MongoConnectionPool extends DBPortPool implements Java5MongoConnectionPoolMBean {
Java5MongoConnectionPool(ServerAddress addr, MongoOptions options) {
super(addr, options);
}
}

View File

@ -0,0 +1,66 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* A standard MBean interface for a Mongo connection pool, for use on Java 5 virtual machines.
* <p>
* This interface is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases.
*/
public interface Java5MongoConnectionPoolMBean {
/**
* Gets the name of the pool.
*
* @return the name of the pool
*/
String getName();
/**
* Gets the host that this connection pool is connecting to.
*
* @return the host
*/
String getHost();
/**
* Gets the port that this connection pool is connecting to.
*
* @return the port
*/
int getPort();
/**
* Gets the total number of pool members, including idle and and in-use members.
*
* @return total number of members
*/
int getTotal();
/**
* Gets the number of pool members that are currently in use.
*
* @return number of in-use members
*/
int getInUse();
/**
* Gets the maximum allowed size of the pool, including idle and in-use members.
*
* @return the maximum size
*/
int getMaxSize();
}

View File

@ -24,6 +24,7 @@ import com.massivecraft.mcore.xlib.bson.types.ObjectId;
/** /**
* *
*/ */
@SuppressWarnings({"rawtypes", "unused"})
public class LazyDBCallback extends LazyBSONCallback implements DBCallback { public class LazyDBCallback extends LazyBSONCallback implements DBCallback {
public LazyDBCallback( DBCollection coll ){ public LazyDBCallback( DBCollection coll ){
@ -31,8 +32,7 @@ public class LazyDBCallback extends LazyBSONCallback implements DBCallback {
_db = _collection == null ? null : _collection.getDB(); _db = _collection == null ? null : _collection.getDB();
} }
@SuppressWarnings("rawtypes") @Override
@Override
public Object createObject( byte[] data, int offset ){ public Object createObject( byte[] data, int offset ){
LazyDBObject o = new LazyDBObject( data, offset, this ); LazyDBObject o = new LazyDBObject( data, offset, this );
//log.info("Created inner BSONObject: " + o); //log.info("Created inner BSONObject: " + o);
@ -52,6 +52,5 @@ public class LazyDBCallback extends LazyBSONCallback implements DBCallback {
final DBCollection _collection; final DBCollection _collection;
final DB _db; final DB _db;
@SuppressWarnings("unused") private static final Logger log = Logger.getLogger( LazyDBCallback.class.getName() );
private static final Logger log = Logger.getLogger( LazyDBCallback.class.getName() );
} }

View File

@ -25,6 +25,13 @@ import java.io.IOException;
* Encoder that only knows how to encode BSONObject instances of type LazyDBObject. * Encoder that only knows how to encode BSONObject instances of type LazyDBObject.
*/ */
public class LazyDBEncoder implements DBEncoder { public class LazyDBEncoder implements DBEncoder {
/**
* @param buf
* @param o
* @return
* @throws MongoException
*/
@Override @Override
public int writeObject(final OutputBuffer buf, BSONObject o) { public int writeObject(final OutputBuffer buf, BSONObject o) {
if (!(o instanceof LazyDBObject)) { if (!(o instanceof LazyDBObject)) {

View File

@ -21,14 +21,14 @@ import java.util.logging.Logger;
/** /**
* *
*/ */
@SuppressWarnings({"rawtypes", "unused"})
public class LazyWriteableDBCallback extends LazyDBCallback { public class LazyWriteableDBCallback extends LazyDBCallback {
public LazyWriteableDBCallback( DBCollection coll ){ public LazyWriteableDBCallback( DBCollection coll ){
super(coll); super(coll);
} }
@SuppressWarnings("rawtypes") @Override
@Override
public Object createObject( byte[] data, int offset ){ public Object createObject( byte[] data, int offset ){
LazyWriteableDBObject o = new LazyWriteableDBObject( data, offset, this ); LazyWriteableDBObject o = new LazyWriteableDBObject( data, offset, this );
//log.info("Created inner BSONObject: " + o); //log.info("Created inner BSONObject: " + o);
@ -42,6 +42,5 @@ public class LazyWriteableDBCallback extends LazyDBCallback {
return o; return o;
} }
@SuppressWarnings("unused") private static final Logger log = Logger.getLogger( LazyWriteableDBCallback.class.getName() );
private static final Logger log = Logger.getLogger( LazyWriteableDBCallback.class.getName() );
} }

View File

@ -24,6 +24,7 @@ import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.LazyBSONCallback; import com.massivecraft.mcore.xlib.bson.LazyBSONCallback;
import com.massivecraft.mcore.xlib.bson.io.BSONByteBuffer; import com.massivecraft.mcore.xlib.bson.io.BSONByteBuffer;
@SuppressWarnings({"unchecked", "rawtypes"})
public class LazyWriteableDBObject extends LazyDBObject { public class LazyWriteableDBObject extends LazyDBObject {
public LazyWriteableDBObject(BSONByteBuffer buff, LazyBSONCallback cbk){ public LazyWriteableDBObject(BSONByteBuffer buff, LazyBSONCallback cbk){
@ -64,7 +65,6 @@ public class LazyWriteableDBObject extends LazyDBObject {
/* (non-Javadoc) /* (non-Javadoc)
* @see org.bson.LazyBSONObject#putAll(java.util.Map) * @see org.bson.LazyBSONObject#putAll(java.util.Map)
*/ */
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override @Override
public void putAll(Map m) { public void putAll(Map m) {
writeable.putAll(m); writeable.putAll(m);

View File

@ -52,10 +52,9 @@ public class MapReduceCommand {
* @param query * @param query
* the query to use on input * the query to use on input
* @return * @return
* @throws MongoException
* @dochub mapreduce * @dochub mapreduce
*/ */
public MapReduceCommand(DBCollection inputCollection , String map , String reduce , String outputCollection, OutputType type, DBObject query) throws MongoException { public MapReduceCommand(DBCollection inputCollection , String map , String reduce , String outputCollection, OutputType type, DBObject query) {
_input = inputCollection.getName(); _input = inputCollection.getName();
_map = map; _map = map;
_reduce = reduce; _reduce = reduce;

View File

@ -65,6 +65,7 @@ public class MapReduceOutput {
/** /**
* drops the collection that holds the results * drops the collection that holds the results
* @throws MongoException
*/ */
public void drop(){ public void drop(){
if ( _coll != null) if ( _coll != null)

View File

@ -18,6 +18,8 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.io.PoolOutputBuffer;
import java.net.UnknownHostException; import java.net.UnknownHostException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -25,54 +27,56 @@ import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import java.util.logging.Logger;
import com.massivecraft.mcore.xlib.bson.io.PoolOutputBuffer;
/** /**
* A database connection with internal pooling. * A database connection with internal connection pooling. For most applications, you should have one Mongo instance
* For most application, you should have 1 Mongo instance for the entire JVM. * for the entire JVM.
* * <p>
* The following are equivalent, and all connect to the * The following are equivalent, and all connect to the local database running on the default port:
* local database running on the default port: * <pre>
* * Mongo mongo1 = new Mongo();
* <blockquote><pre> * Mongo mongo1 = new Mongo("localhost");
* Mongo mongo1 = new Mongo( "127.0.0.1" ); * Mongo mongo2 = new Mongo("localhost", 27017);
* Mongo mongo2 = new Mongo( "127.0.0.1", 27017 ); * Mongo mongo4 = new Mongo(new ServerAddress("localhost"));
* Mongo mongo3 = new Mongo( new DBAddress( "127.0.0.1", 27017, "test" ) ); * </pre>
* Mongo mongo4 = new Mongo( new ServerAddress( "127.0.0.1") );
* </pre></blockquote>
*
* Mongo instances have connection pooling built in - see the requestStart
* and requestDone methods for more information.
* http://www.mongodb.org/display/DOCS/Java+Driver+Concurrency
*
* <h3>Connecting to a Replica Set</h3>
* <p> * <p>
* You can connect to a * You can connect to a
* <a href="http://www.mongodb.org/display/DOCS/Replica+Sets">replica set</a> * <a href="http://www.mongodb.org/display/DOCS/Replica+Sets">replica set</a> using the Java driver by passing
* using the Java driver by passing several a list if ServerAddress to the * a ServerAddress list to the Mongo constructor. For example:
* Mongo constructor. * <pre>
* For example: * Mongo mongo = new Mongo(Arrays.asList(
* </p> * new ServerAddress("localhost", 27017),
* <blockquote><pre> * new ServerAddress("localhost", 27018),
* List<ServerAddress> addrs = new ArrayList<ServerAddress>(); * new ServerAddress("localhost", 27019)));
* addrs.add( new ServerAddress( "127.0.0.1" , 27017 ) ); * </pre>
* addrs.add( new ServerAddress( "127.0.0.1" , 27018 ) ); * You can connect to a sharded cluster using the same constructor. Mongo will auto-detect whether the servers are
* addrs.add( new ServerAddress( "127.0.0.1" , 27019 ) ); * a list of replica set members or a list of mongos servers.
*
* Mongo mongo = new Mongo( addrs );
* </pre></blockquote>
*
* <p> * <p>
* By default, all read and write operations will be made on the master. * By default, all read and write operations will be made on the primary,
* But it's possible to read from the slave(s) by using slaveOk: * but it's possible to read from secondaries by changing the read preference:
* </p> * <p>
* <blockquote><pre> * <pre>
* mongo.slaveOk(); * mongo.setReadPreference(ReadPreference.secondary());
* </pre></blockquote> * </pre>
* By default, write operations will not throw exceptions on failure, but that is easily changed too:
* <p>
* <pre>
* mongo.setWriteConcern(WriteConcern.SAFE);
* </pre>
*
* Note: This class has been superseded by {@code MongoClient}, and may be deprecated in a future release.
*
* @see MongoClient
* @see ReadPreference
* @see WriteConcern
*/ */
@SuppressWarnings({"rawtypes"})
public class Mongo { public class Mongo {
static Logger logger = Logger.getLogger(Bytes.LOGGER.getName() + ".Mongo");
// Make sure you don't change the format of these two static variables. A preprocessing regexp // Make sure you don't change the format of these two static variables. A preprocessing regexp
// is applied and updates the version based on configuration in build.properties. // is applied and updates the version based on configuration in build.properties.
@ -86,11 +90,14 @@ public class Mongo {
* @deprecated Replaced by <code>Mongo.getMinorVersion()</code> * @deprecated Replaced by <code>Mongo.getMinorVersion()</code>
*/ */
@Deprecated @Deprecated
public static final int MINOR_VERSION = 8; public static final int MINOR_VERSION = 11;
private static final String FULL_VERSION = "2.8.0"; private static final String FULL_VERSION = "2.11.1";
static int cleanerIntervalMS; static int cleanerIntervalMS;
private static final String ADMIN_DATABASE_NAME = "admin";
static { static {
cleanerIntervalMS = Integer.parseInt(System.getProperty("com.mongodb.cleanerIntervalMS", "1000")); cleanerIntervalMS = Integer.parseInt(System.getProperty("com.mongodb.cleanerIntervalMS", "1000"));
} }
@ -115,6 +122,7 @@ public class Mongo {
* returns a database object * returns a database object
* @param addr the database address * @param addr the database address
* @return * @return
* @throws MongoException
*/ */
public static DB connect( DBAddress addr ){ public static DB connect( DBAddress addr ){
return new Mongo( addr ).getDB( addr.getDBName() ); return new Mongo( addr ).getDB( addr.getDBName() );
@ -124,9 +132,13 @@ public class Mongo {
* Creates a Mongo instance based on a (single) mongodb node (localhost, default port) * Creates a Mongo instance based on a (single) mongodb node (localhost, default port)
* @throws UnknownHostException * @throws UnknownHostException
* @throws MongoException * @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient()})
*
*/ */
@Deprecated
public Mongo() public Mongo()
throws UnknownHostException , MongoException { throws UnknownHostException {
this( new ServerAddress() ); this( new ServerAddress() );
} }
@ -135,9 +147,13 @@ public class Mongo {
* @param host server to connect to * @param host server to connect to
* @throws UnknownHostException if the database host cannot be resolved * @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException * @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(String)}
*
*/ */
@Deprecated
public Mongo( String host ) public Mongo( String host )
throws UnknownHostException , MongoException { throws UnknownHostException{
this( new ServerAddress( host ) ); this( new ServerAddress( host ) );
} }
@ -147,9 +163,13 @@ public class Mongo {
* @param options default query options * @param options default query options
* @throws UnknownHostException if the database host cannot be resolved * @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException * @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(String, MongoClientOptions)}
*
*/ */
@Deprecated
public Mongo( String host , MongoOptions options ) public Mongo( String host , MongoOptions options )
throws UnknownHostException , MongoException { throws UnknownHostException {
this( new ServerAddress( host ) , options ); this( new ServerAddress( host ) , options );
} }
@ -159,9 +179,13 @@ public class Mongo {
* @param port the port on which the database is running * @param port the port on which the database is running
* @throws UnknownHostException if the database host cannot be resolved * @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException * @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(String, int)}
*
*/ */
@Deprecated
public Mongo( String host , int port ) public Mongo( String host , int port )
throws UnknownHostException , MongoException { throws UnknownHostException {
this( new ServerAddress( host , port ) ); this( new ServerAddress( host , port ) );
} }
@ -170,10 +194,13 @@ public class Mongo {
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress * @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @param addr the database address * @param addr the database address
* @throws MongoException * @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(ServerAddress)}
*
*/ */
public Mongo( ServerAddress addr ) @Deprecated
throws MongoException { public Mongo( ServerAddress addr ) {
this( addr , new MongoOptions() ); this(addr, new MongoOptions());
} }
/** /**
@ -182,17 +209,13 @@ public class Mongo {
* @param addr the database address * @param addr the database address
* @param options default query options * @param options default query options
* @throws MongoException * @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(ServerAddress, MongoClientOptions)}
*
*/ */
public Mongo( ServerAddress addr , MongoOptions options ) @Deprecated
throws MongoException { public Mongo( ServerAddress addr , MongoOptions options ) {
_addr = addr; this(MongoAuthority.direct(addr), options);
_addrs = null;
_options = options;
_applyMongoOptions();
_connector = new DBTCPConnector( this , _addr );
_connector.start();
_cleaner = new DBCleanerThread();
_cleaner.start();
} }
/** /**
@ -206,9 +229,8 @@ public class Mongo {
* @throws MongoException * @throws MongoException
*/ */
@Deprecated @Deprecated
public Mongo( ServerAddress left , ServerAddress right ) public Mongo( ServerAddress left , ServerAddress right ) {
throws MongoException { this(left, right, new MongoOptions());
this( left , right , new MongoOptions() );
} }
/** /**
@ -223,94 +245,103 @@ public class Mongo {
* @throws MongoException * @throws MongoException
*/ */
@Deprecated @Deprecated
public Mongo( ServerAddress left , ServerAddress right , MongoOptions options ) public Mongo( ServerAddress left , ServerAddress right , MongoOptions options ) {
throws MongoException { this(MongoAuthority.dynamicSet(Arrays.asList(left, right)), options);
_addr = null;
_addrs = Arrays.asList( left , right );
_options = options;
_applyMongoOptions();
_connector = new DBTCPConnector( this , _addrs );
_connector.start();
_cleaner = new DBCleanerThread();
_cleaner.start();
} }
/** /**
* <p>Creates a Mongo based on a replica set, or pair. * Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default). If you pass in a single server in the list, * It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server, * the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.</p> * use the Mongo(ServerAddress) constructor.
* <p>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress * @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @param replicaSetSeeds Put as many servers as you can in the list and * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* the system will figure out the rest. * either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* sharded cluster.
* @throws MongoException * @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(java.util.List)}
*
*/ */
public Mongo( List<ServerAddress> replicaSetSeeds ) @Deprecated
throws MongoException { public Mongo( List<ServerAddress> seeds ) {
this( replicaSetSeeds , new MongoOptions() ); this( seeds , new MongoOptions() );
} }
/** /**
* <p>Creates a Mongo based on a replica set, or pair. * Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default).</p> * It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.
* <p>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress * @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @param replicaSetSeeds put as many servers as you can in the list. * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* the system will figure the rest out * either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* @param options default query options * sharded cluster.
* @param options for configuring this Mongo instance
* @throws MongoException * @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(java.util.List, MongoClientOptions)}
*
*/ */
public Mongo( List<ServerAddress> replicaSetSeeds , MongoOptions options ) @Deprecated
throws MongoException { public Mongo( List<ServerAddress> seeds , MongoOptions options ) {
this(MongoAuthority.dynamicSet(seeds), options);
_addr = null;
_addrs = replicaSetSeeds;
_options = options;
_applyMongoOptions();
_connector = new DBTCPConnector( this , _addrs );
_connector.start();
_cleaner = new DBCleanerThread();
_cleaner.start();
} }
/** /**
* Creates a Mongo described by a URI. * Creates a Mongo described by a URI.
* If only one address is used it will only connect to that node, otherwise it will discover all nodes. * If only one address is used it will only connect to that node, otherwise it will discover all nodes.
* If the URI contains database credentials, the database will be authenticated lazily on first use
* with those credentials.
* @param uri * @param uri
* @see MongoURI * @see MongoURI
* <p>examples: * <p>examples:
* <li>mongodb://127.0.0.1</li> * <li>mongodb://localhost</li>
* <li>mongodb://fred:foobar@127.0.0.1/</li> * <li>mongodb://fred:foobar@localhost/</li>
* </p> * </p>
* @throws MongoException * @throws MongoException
* @throws UnknownHostException * @throws UnknownHostException
* @dochub connections * @dochub connections
*
* @deprecated Replaced by {@link MongoClient#MongoClient(MongoClientURI)}
*
*/ */
@Deprecated
public Mongo( MongoURI uri ) throws UnknownHostException {
this(getMongoAuthorityFromURI(uri), uri.getOptions());
}
public Mongo( MongoURI uri ) /**
throws MongoException , UnknownHostException { * Creates a Mongo based on an authority and options.
* <p>
_options = uri.getOptions(); * Note: This constructor is provisional and is subject to change before the final release
*
* @param authority the authority
* @param options the options
*/
Mongo(MongoAuthority authority, MongoOptions options) {
logger.info("Creating Mongo instance (driver version " + getVersion() + ") with authority " + authority + " and options " + options);
_authority = authority;
_options = options;
_applyMongoOptions(); _applyMongoOptions();
if ( uri.getHosts().size() == 1 ){ _connector = new DBTCPConnector( this );
_addr = new ServerAddress( uri.getHosts().get(0) );
_addrs = null;
_connector = new DBTCPConnector( this , _addr );
}
else {
List<ServerAddress> replicaSetSeeds = new ArrayList<ServerAddress>( uri.getHosts().size() );
for ( String host : uri.getHosts() )
replicaSetSeeds.add( new ServerAddress( host ) );
_addr = null;
_addrs = replicaSetSeeds;
_connector = new DBTCPConnector( this , replicaSetSeeds );
}
_connector.start(); _connector.start();
_cleaner = new DBCleanerThread(); if (_options.cursorFinalizerEnabled) {
_cleaner.start(); _cleaner = new CursorCleanerThread();
_cleaner.start();
} else {
_cleaner = null;
}
} }
/** /**
@ -345,15 +376,13 @@ public class Mongo {
* @return * @return
* @throws MongoException * @throws MongoException
*/ */
@SuppressWarnings("rawtypes") public List<String> getDatabaseNames(){
public List<String> getDatabaseNames()
throws MongoException {
BasicDBObject cmd = new BasicDBObject(); BasicDBObject cmd = new BasicDBObject();
cmd.put("listDatabases", 1); cmd.put("listDatabases", 1);
CommandResult res = getDB( "admin" ).command(cmd, getOptions()); CommandResult res = getDB(ADMIN_DATABASE_NAME).command(cmd, getOptions());
res.throwOnError(); res.throwOnError();
List l = (List)res.get("databases"); List l = (List)res.get("databases");
@ -372,8 +401,7 @@ public class Mongo {
* @param dbName name of database to drop * @param dbName name of database to drop
* @throws MongoException * @throws MongoException
*/ */
public void dropDatabase(String dbName) public void dropDatabase(String dbName){
throws MongoException {
getDB( dbName ).dropDatabase(); getDB( dbName ).dropDatabase();
} }
@ -442,6 +470,7 @@ public class Mongo {
* Gets the list of server addresses currently seen by the connector. * Gets the list of server addresses currently seen by the connector.
* This includes addresses auto-discovered from a replica set. * This includes addresses auto-discovered from a replica set.
* @return * @return
* @throws MongoException
*/ */
public List<ServerAddress> getServerAddressList() { public List<ServerAddress> getServerAddressList() {
return _connector.getServerAddressList(); return _connector.getServerAddressList();
@ -457,12 +486,14 @@ public class Mongo {
_connector.close(); _connector.close();
} catch (final Throwable t) { /* nada */ } } catch (final Throwable t) { /* nada */ }
_cleaner.interrupt(); if (_cleaner != null) {
_cleaner.interrupt();
try { try {
_cleaner.join(); _cleaner.join();
} catch (InterruptedException e) { } catch (InterruptedException e) {
//end early //end early
}
} }
} }
@ -505,10 +536,10 @@ public class Mongo {
} }
/** /**
* makes it possible to run read queries on slave nodes * makes it possible to run read queries on secondary nodes
* *
* @deprecated Replaced with ReadPreference.SECONDARY * @deprecated Replaced with {@code ReadPreference.secondaryPreferred()}
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference.SECONDARY * @see ReadPreference#secondaryPreferred()
*/ */
@Deprecated @Deprecated
public void slaveOk(){ public void slaveOk(){
@ -552,8 +583,13 @@ public class Mongo {
*/ */
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
void _applyMongoOptions() { void _applyMongoOptions() {
if (_options.slaveOk) slaveOk(); if (_options.slaveOk) {
setWriteConcern( _options.getWriteConcern() ); slaveOk();
}
if (_options.getReadPreference() != null) {
setReadPreference(_options.getReadPreference());
}
setWriteConcern(_options.getWriteConcern());
} }
/** /**
@ -568,23 +604,42 @@ public class Mongo {
* Note that this value may change over time depending on which server is master. * Note that this value may change over time depending on which server is master.
* If the size is not known yet, a request may be sent to the master server * If the size is not known yet, a request may be sent to the master server
* @return the maximum size * @return the maximum size
* @throws MongoException
*/ */
public int getMaxBsonObjectSize() { public int getMaxBsonObjectSize() {
int maxsize = _connector.getMaxBsonObjectSize(); int maxsize = _connector.getMaxBsonObjectSize();
if (maxsize == 0) if (maxsize == 0) {
maxsize = _connector.fetchMaxBsonObjectSize(); _connector.initDirectConnection();
}
maxsize = _connector.getMaxBsonObjectSize();
return maxsize > 0 ? maxsize : Bytes.MAX_OBJECT_SIZE; return maxsize > 0 ? maxsize : Bytes.MAX_OBJECT_SIZE;
} }
final ServerAddress _addr; boolean isMongosConnection() {
final List<ServerAddress> _addrs; return _connector.isMongosConnection();
}
private static MongoAuthority getMongoAuthorityFromURI(final MongoURI uri) throws UnknownHostException {
if ( uri.getHosts().size() == 1 ){
return MongoAuthority.direct(new ServerAddress(uri.getHosts().get(0)), uri.getCredentials());
}
else {
List<ServerAddress> replicaSetSeeds = new ArrayList<ServerAddress>(uri.getHosts().size());
for ( String host : uri.getHosts() )
replicaSetSeeds.add( new ServerAddress( host ) );
return MongoAuthority.dynamicSet(replicaSetSeeds, uri.getCredentials());
}
}
final MongoOptions _options; final MongoOptions _options;
final DBTCPConnector _connector; final DBTCPConnector _connector;
final ConcurrentMap<String,DB> _dbs = new ConcurrentHashMap<String,DB>(); final ConcurrentMap<String,DB> _dbs = new ConcurrentHashMap<String,DB>();
private WriteConcern _concern = WriteConcern.NORMAL; private WriteConcern _concern = WriteConcern.NORMAL;
private ReadPreference _readPref = ReadPreference.PRIMARY; private ReadPreference _readPref = ReadPreference.primary();
final Bytes.OptionHolder _netOptions = new Bytes.OptionHolder( null ); final Bytes.OptionHolder _netOptions = new Bytes.OptionHolder( null );
final DBCleanerThread _cleaner; final CursorCleanerThread _cleaner;
final MongoAuthority _authority;
com.massivecraft.mcore.xlib.bson.util.SimplePool<PoolOutputBuffer> _bufferPool = com.massivecraft.mcore.xlib.bson.util.SimplePool<PoolOutputBuffer> _bufferPool =
new com.massivecraft.mcore.xlib.bson.util.SimplePool<PoolOutputBuffer>( 1000 ){ new com.massivecraft.mcore.xlib.bson.util.SimplePool<PoolOutputBuffer>( 1000 ){
@ -600,33 +655,36 @@ public class Mongo {
* This is done automatically by the server at intervals, but can be forced for better reliability. * This is done automatically by the server at intervals, but can be forced for better reliability.
* @param async if true, the fsync will be done asynchronously on the server. * @param async if true, the fsync will be done asynchronously on the server.
* @return * @return
* @throws MongoException
*/ */
public CommandResult fsync(boolean async) { public CommandResult fsync(boolean async) {
DBObject cmd = new BasicDBObject("fsync", 1); DBObject cmd = new BasicDBObject("fsync", 1);
if (async) { if (async) {
cmd.put("async", 1); cmd.put("async", 1);
} }
return getDB("admin").command(cmd); return getDB(ADMIN_DATABASE_NAME).command(cmd);
} }
/** /**
* Forces the master server to fsync the RAM data to disk, then lock all writes. * Forces the master server to fsync the RAM data to disk, then lock all writes.
* The database will be read-only after this command returns. * The database will be read-only after this command returns.
* @return * @return
* @throws MongoException
*/ */
public CommandResult fsyncAndLock() { public CommandResult fsyncAndLock() {
DBObject cmd = new BasicDBObject("fsync", 1); DBObject cmd = new BasicDBObject("fsync", 1);
cmd.put("lock", 1); cmd.put("lock", 1);
return getDB("admin").command(cmd); return getDB(ADMIN_DATABASE_NAME).command(cmd);
} }
/** /**
* Unlocks the database, allowing the write operations to go through. * Unlocks the database, allowing the write operations to go through.
* This command may be asynchronous on the server, which means there may be a small delay before the database becomes writable. * This command may be asynchronous on the server, which means there may be a small delay before the database becomes writable.
* @return * @return
* @throws MongoException
*/ */
public DBObject unlock() { public DBObject unlock() {
DB db = getDB("admin"); DB db = getDB(ADMIN_DATABASE_NAME);
DBCollection col = db.getCollection("$cmd.sys.unlock"); DBCollection col = db.getCollection("$cmd.sys.unlock");
return col.findOne(); return col.findOne();
} }
@ -634,9 +692,10 @@ public class Mongo {
/** /**
* Returns true if the database is locked (read-only), false otherwise. * Returns true if the database is locked (read-only), false otherwise.
* @return * @return
* @throws MongoException
*/ */
public boolean isLocked() { public boolean isLocked() {
DB db = getDB("admin"); DB db = getDB(ADMIN_DATABASE_NAME);
DBCollection col = db.getCollection("$cmd.sys.inprog"); DBCollection col = db.getCollection("$cmd.sys.inprog");
BasicDBObject res = (BasicDBObject) col.findOne(); BasicDBObject res = (BasicDBObject) col.findOne();
if (res.containsField("fsyncLock")) { if (res.containsField("fsyncLock")) {
@ -663,7 +722,7 @@ public class Mongo {
* @throws UnknownHostException * @throws UnknownHostException
*/ */
public Mongo connect( MongoURI uri ) public Mongo connect( MongoURI uri )
throws MongoException , UnknownHostException { throws UnknownHostException {
String key = _toKey( uri ); String key = _toKey( uri );
@ -701,9 +760,9 @@ public class Mongo {
} }
class DBCleanerThread extends Thread { class CursorCleanerThread extends Thread {
DBCleanerThread() { CursorCleanerThread() {
setDaemon(true); setDaemon(true);
setName("MongoCleaner" + hashCode()); setName("MongoCleaner" + hashCode());
} }
@ -728,15 +787,18 @@ public class Mongo {
@Override @Override
public String toString() { public String toString() {
StringBuilder str = new StringBuilder("Mongo: "); return "Mongo{" +
List<ServerAddress> list = getServerAddressList(); "authority=" + _authority +
if (list == null || list.size() == 0) ", options=" + _options +
str.append("null"); '}';
else { }
for ( ServerAddress addr : list )
str.append( addr.toString() ).append( ',' ); /**
str.deleteCharAt( str.length() - 1 ); * Gets the authority, which includes the connection type, the server address(es), and the credentials.
}
return str.toString(); * @return the authority
*/
MongoAuthority getAuthority() {
return _authority;
} }
} }

View File

@ -0,0 +1,214 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.util.annotations.Immutable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
* This class represents the authority to which this client is connecting. It includes
* both the server address(es) and optional authentication credentials. The class name is informed by the
* <a href="http://tools.ietf.org/html/rfc3986#section-3.2">URI RFC</a>, which refers to the username/host/port
* part of a URI as the "authority".
*
* @since 2.11.0
*/
@Immutable
class MongoAuthority {
private final Type type;
private final List<ServerAddress> serverAddresses;
private final MongoCredentialsStore credentialsStore;
/**
* Enumeration of the connection types.
*/
enum Type {
Direct,
Set
}
/**
*
* @param serverAddress
* @return
*/
public static MongoAuthority direct(ServerAddress serverAddress) {
return direct(serverAddress, (MongoCredential) null);
}
/**
*
* @param serverAddress
* @param credentials
* @return
*/
public static MongoAuthority direct(ServerAddress serverAddress, MongoCredential credentials) {
return direct(serverAddress, new MongoCredentialsStore(credentials));
}
/**
*
* @param serverAddress
* @param credentialsStore
* @return
*/
public static MongoAuthority direct(ServerAddress serverAddress, MongoCredentialsStore credentialsStore) {
return new MongoAuthority(serverAddress, credentialsStore);
}
/**
*
* @param serverAddresses
* @return
*/
public static MongoAuthority dynamicSet(List<ServerAddress> serverAddresses) {
return dynamicSet(serverAddresses, (MongoCredential) null);
}
/**
*
* @param serverAddresses
* @param credentials
* @return
*/
public static MongoAuthority dynamicSet(List<ServerAddress> serverAddresses, MongoCredential credentials) {
return dynamicSet(serverAddresses, new MongoCredentialsStore(credentials));
}
/**
*
* @param serverAddresses
* @param credentialsStore
* @return
*/
public static MongoAuthority dynamicSet(List<ServerAddress> serverAddresses, MongoCredentialsStore credentialsStore) {
return new MongoAuthority(serverAddresses, Type.Set, credentialsStore);
}
/**
* Constructs an instance with a single server address and a store of authentication credentials.
* This will be a direct connection, even if it's part of a replica set.
*
* @param serverAddress the server address of a mongo server
*/
private MongoAuthority(final ServerAddress serverAddress, MongoCredentialsStore credentialsStore) {
if (serverAddress == null) {
throw new IllegalArgumentException("serverAddress can not be null");
}
if (credentialsStore == null) {
throw new IllegalArgumentException("credentialsStore can not be null");
}
this.serverAddresses = Arrays.asList(serverAddress);
this.credentialsStore = credentialsStore;
this.type = Type.Direct;
}
/**
* Constructs an instance with a list of server addresses, which may either be a list of mongos servers
* or a list of members of a replica set, and a store of authentication credentials.
*
* @param serverAddresses the server addresses
* @param credentialsStore the credentials store
*/
private MongoAuthority(final List<ServerAddress> serverAddresses, Type type, MongoCredentialsStore credentialsStore) {
if (serverAddresses == null) {
throw new IllegalArgumentException("serverAddresses can not be null");
}
if (credentialsStore == null) {
throw new IllegalArgumentException("credentialsStore can not be null");
}
if (type == null) {
throw new IllegalArgumentException("type can not be null");
}
if (type == Type.Direct) {
throw new IllegalArgumentException("type can not be Direct with a list of server addresses");
}
this.type = type;
this.serverAddresses = new ArrayList<ServerAddress>(serverAddresses);
this.credentialsStore = credentialsStore;
}
/**
* Returns the list of server addresses.
*
* @return the server address list
*/
public List<ServerAddress> getServerAddresses() {
return serverAddresses == null ? null : Collections.unmodifiableList(serverAddresses);
}
/**
* Gets the credentials store. If this instance was constructed with a single credential, this store will
* contain it.
*
* @return the credentials store
*/
public MongoCredentialsStore getCredentialsStore() {
return credentialsStore;
}
/**
* Gets the authority type
*
* @return the authority type
*/
public Type getType() {
return type;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final MongoAuthority that = (MongoAuthority) o;
if (!credentialsStore.equals(that.credentialsStore)) return false;
if (!serverAddresses.equals(that.serverAddresses)) return false;
if (type != that.type) return false;
return true;
}
@Override
public int hashCode() {
int result = credentialsStore.hashCode();
result = 31 * result + serverAddresses.hashCode();
result = 31 * result + type.hashCode();
return result;
}
@Override
public String toString() {
return "MongoAuthority{" +
"type=" + type +
", serverAddresses=" + serverAddresses +
", credentials=" + credentialsStore +
'}';
}
}

View File

@ -0,0 +1,286 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import java.net.UnknownHostException;
import java.util.List;
/**
* A MongoDB client with internal connection pooling. For most applications, you should have one MongoClient instance
* for the entire JVM.
* <p>
* The following are equivalent, and all connect to the local database running on the default port:
* <pre>
* MongoClient mongoClient1 = new MongoClient();
* MongoClient mongoClient1 = new MongoClient("localhost");
* MongoClient mongoClient2 = new MongoClient("localhost", 27017);
* MongoClient mongoClient4 = new MongoClient(new ServerAddress("localhost"));
* MongoClient mongoClient5 = new MongoClient(new ServerAddress("localhost"), new MongoClientOptions.Builder().build());
* </pre>
* <p>
* You can connect to a
* <a href="http://www.mongodb.org/display/DOCS/Replica+Sets">replica set</a> using the Java driver by passing
* a ServerAddress list to the MongoClient constructor. For example:
* <pre>
* MongoClient mongoClient = new MongoClient(Arrays.asList(
* new ServerAddress("localhost", 27017),
* new ServerAddress("localhost", 27018),
* new ServerAddress("localhost", 27019)));
* </pre>
* You can connect to a sharded cluster using the same constructor. MongoClient will auto-detect whether the servers are
* a list of replica set members or a list of mongos servers.
* <p>
* By default, all read and write operations will be made on the primary, but it's possible to read from secondaries
* by changing the read preference:
* <pre>
* mongoClient.setReadPreference(ReadPreference.secondaryPreferred());
* </pre>
* By default, all write operations will wait for acknowledgment by the server, as the default write concern is
* {@code WriteConcern.ACKNOWLEDGED}.
* <p>
* Note: This class supersedes the {@code Mongo} class. While it extends {@code Mongo}, it differs from it in that
* the default write concern is to wait for acknowledgment from the server of all write operations. In addition, its
* constructors accept instances of {@code MongoClientOptions} and {@code MongoClientURI}, which both also
* set the same default write concern.
* <p>
* In general, users of this class will pick up all of the default options specified in {@code MongoClientOptions}. In
* particular, note that the default value of the connectionsPerHost option has been increased to 100 from the old
* default value of 10 used by the superseded {@code Mongo} class.
*
* @see ReadPreference#primary()
* @see com.massivecraft.mcore.xlib.mongodb.WriteConcern#ACKNOWLEDGED
* @see MongoClientOptions
* @see MongoClientURI
* @since 2.10.0
*/
public class MongoClient extends Mongo {
private final MongoClientOptions options;
/**
* Creates an instance based on a (single) mongodb node (localhost, default port).
*
* @throws UnknownHostException
* @throws MongoException
*/
public MongoClient() throws UnknownHostException {
this(new ServerAddress());
}
/**
* Creates a Mongo instance based on a (single) mongodb node.
*
* @param host server to connect to in format host[:port]
* @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException
*/
public MongoClient(String host) throws UnknownHostException {
this(new ServerAddress(host));
}
/**
* Creates a Mongo instance based on a (single) mongodb node (default port).
*
* @param host server to connect to in format host[:port]
* @param options default query options
* @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException
*/
public MongoClient(String host, MongoClientOptions options) throws UnknownHostException {
this(new ServerAddress(host), options);
}
/**
* Creates a Mongo instance based on a (single) mongodb node.
*
* @param host the database's host address
* @param port the port on which the database is running
* @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException
*/
public MongoClient(String host, int port) throws UnknownHostException {
this(new ServerAddress(host, port));
}
/**
* Creates a Mongo instance based on a (single) mongodb node
*
* @param addr the database address
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
*/
public MongoClient(ServerAddress addr) {
this(addr, new MongoClientOptions.Builder().build());
}
/**
* Creates a Mongo instance based on a (single) mongodb node and a list of credentials
*
* @param addr the database address
* @param credentialsList the list of credentials used to authenticate all connections
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @since 2.11.0
*/
public MongoClient(ServerAddress addr, List<MongoCredential> credentialsList) {
this(addr, credentialsList, new MongoClientOptions.Builder().build());
}
/**
* Creates a Mongo instance based on a (single) mongo node using a given ServerAddress and default options.
*
* @param addr the database address
* @param options default options
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
*/
public MongoClient(ServerAddress addr, MongoClientOptions options) {
this(addr, null, options);
}
/**
* Creates a Mongo instance based on a (single) mongo node using a given ServerAddress and default options.
*
* @param addr the database address
* @param credentialsList the list of credentials used to authenticate all connections
* @param options default options
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @since 2.11.0
*/
@SuppressWarnings("deprecation")
public MongoClient(ServerAddress addr, List<MongoCredential> credentialsList, MongoClientOptions options) {
super(MongoAuthority.direct(addr, new MongoCredentialsStore(credentialsList)), new MongoOptions(options));
this.options = options;
}
/**
* Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.
* <p/>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* sharded cluster.
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
*/
public MongoClient(List<ServerAddress> seeds) {
this(seeds, null, new MongoClientOptions.Builder().build());
}
/**
* Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.
* <p/>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* sharded cluster. \
* @param credentialsList the list of credentials used to authenticate all connections
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @since 2.11.0
*/
public MongoClient(List<ServerAddress> seeds, List<MongoCredential> credentialsList) {
this(seeds, credentialsList, new MongoClientOptions.Builder().build());
}
/**
* Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.
* <p/>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* sharded cluster.
* @param options default options
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
*/
public MongoClient(List<ServerAddress> seeds, MongoClientOptions options) {
this(seeds, null, options);
}
/**
* Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.
* <p/>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* sharded cluster.
* @param credentialsList the list of credentials used to authenticate all connections
* @param options default options
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @since 2.11.0
*/
@SuppressWarnings("deprecation")
public MongoClient(List<ServerAddress> seeds, List<MongoCredential> credentialsList, MongoClientOptions options) {
super(MongoAuthority.dynamicSet(seeds, new MongoCredentialsStore(credentialsList)), new MongoOptions(options));
this.options = options;
}
/**
* Creates a Mongo described by a URI.
* If only one address is used it will only connect to that node, otherwise it will discover all nodes.
* @param uri the URI
* @throws MongoException
* @throws UnknownHostException
* @see MongoURI
* @dochub connections
*/
@SuppressWarnings("deprecation")
public MongoClient(MongoClientURI uri) throws UnknownHostException {
super(new MongoURI(uri));
this.options = uri.getOptions();
}
/**
* Gets the list of credentials that this client authenticates all connections with
*
* @return the list of credentials
* @since 2.11.0
*/
public List<MongoCredential> getCredentialsList() {
return getAuthority().getCredentialsStore().asList();
}
public MongoClientOptions getMongoClientOptions() {
return options;
}
}

View File

@ -0,0 +1,600 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.util.annotations.Immutable;
import javax.net.SocketFactory;
/**
* Various settings to control the behavior of a <code>MongoClient</code>.
* <p/>
* Note: This class is a replacement for {@code MongoOptions}, to be used with {@code MongoClient}. The main difference
* in behavior is that the default write concern is {@code WriteConcern.ACKNOWLEDGED}.
*
* @see MongoClient
* @since 2.10.0
*/
@Immutable
public class MongoClientOptions {
/**
* A builder for MongoClientOptions so that MongoClientOptions can be immutable, and to support easier
* construction through chaining.
*
* @since 2.10.0
*/
public static class Builder {
private String description;
private int connectionsPerHost = 100;
private int threadsAllowedToBlockForConnectionMultiplier = 5;
private int maxWaitTime = 1000 * 60 * 2;
private int connectTimeout = 1000 * 10;
private int socketTimeout = 0;
private boolean socketKeepAlive = false;
private boolean autoConnectRetry = false;
private long maxAutoConnectRetryTime = 0;
private ReadPreference readPreference = ReadPreference.primary();
private DBDecoderFactory dbDecoderFactory = DefaultDBDecoder.FACTORY;
private DBEncoderFactory dbEncoderFactory = DefaultDBEncoder.FACTORY;
private WriteConcern writeConcern = WriteConcern.ACKNOWLEDGED;
private SocketFactory socketFactory = SocketFactory.getDefault();
private boolean cursorFinalizerEnabled = true;
private boolean alwaysUseMBeans = false;
/**
* Sets the description.
*
* @param description the description of this MongoClient
* @return {@code this}
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getDescription()
*/
public Builder description(final String description) {
this.description = description;
return this;
}
/**
* Sets the maximum number of connections per host.
*
* @param connectionsPerHost maximum number of connections
* @return {@code this}
* @throws IllegalArgumentException if <code>connnectionsPerHost < 1</code>
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getConnectionsPerHost()
*/
public Builder connectionsPerHost(final int connectionsPerHost) {
if (connectionsPerHost < 1) {
throw new IllegalArgumentException("Minimum value is 1");
}
this.connectionsPerHost = connectionsPerHost;
return this;
}
/**
* Sets the multiplier for number of threads allowed to block waiting for a connection.
*
* @param threadsAllowedToBlockForConnectionMultiplier
* the multiplier
* @return {@code this}
* @throws IllegalArgumentException if <code>threadsAllowedToBlockForConnectionMultiplier < 1</code>
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getThreadsAllowedToBlockForConnectionMultiplier()
*/
public Builder threadsAllowedToBlockForConnectionMultiplier(final int threadsAllowedToBlockForConnectionMultiplier) {
if (threadsAllowedToBlockForConnectionMultiplier < 1) {
throw new IllegalArgumentException("Minimum value is 1");
}
this.threadsAllowedToBlockForConnectionMultiplier = threadsAllowedToBlockForConnectionMultiplier;
return this;
}
/**
* Sets the maximum time that a thread will block waiting for a connection.
*
* @param maxWaitTime the maximum wait time
* @return {@code this}
* @throws IllegalArgumentException if <code>maxWaitTime < 0</code>
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getMaxWaitTime()
*/
public Builder maxWaitTime(final int maxWaitTime) {
if (maxWaitTime < 0) {
throw new IllegalArgumentException("Minimum value is 0");
}
this.maxWaitTime = maxWaitTime;
return this;
}
/**
* Sets the connection timeout.
*
* @param connectTimeout the connection timeout
* @return {@code this}
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getConnectTimeout()
*/
public Builder connectTimeout(final int connectTimeout) {
if (connectTimeout < 0) {
throw new IllegalArgumentException("Minimum value is 0");
}
this.connectTimeout = connectTimeout;
return this;
}
/**
* Sets the socket timeout.
*
* @param socketTimeout the socket timeout
* @return {@code this}
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getSocketTimeout()
*/
public Builder socketTimeout(final int socketTimeout) {
if (socketTimeout < 0) {
throw new IllegalArgumentException("Minimum value is 0");
}
this.socketTimeout = socketTimeout;
return this;
}
/**
* Sets whether socket keep alive is enabled.
*
* @param socketKeepAlive keep alive
* @return {@code this}
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#isSocketKeepAlive()
*/
public Builder socketKeepAlive(final boolean socketKeepAlive) {
this.socketKeepAlive = socketKeepAlive;
return this;
}
/**
* Sets whether auto connect retry is enabled.
*
* @param autoConnectRetry auto connect retry
* @return {@code this}
* @see MongoClientOptions#isAutoConnectRetry()
*/
public Builder autoConnectRetry(final boolean autoConnectRetry) {
this.autoConnectRetry = autoConnectRetry;
return this;
}
/**
* Sets the maximum auto connect retry time.
*
* @param maxAutoConnectRetryTime the maximum auto connect retry time
* @return {@code this}
* @see MongoClientOptions#getMaxAutoConnectRetryTime()
*/
public Builder maxAutoConnectRetryTime(final long maxAutoConnectRetryTime) {
if (maxAutoConnectRetryTime < 0) {
throw new IllegalArgumentException("Minimum value is 0");
}
this.maxAutoConnectRetryTime = maxAutoConnectRetryTime;
return this;
}
/**
* Sets the read preference.
*
* @param readPreference read preference
* @return {@code this}
* @see MongoClientOptions#getReadPreference()
*/
public Builder readPreference(final ReadPreference readPreference) {
if (readPreference == null) {
throw new IllegalArgumentException("null is not a legal value");
}
this.readPreference = readPreference;
return this;
}
/**
* Sets the decoder factory.
*
* @param dbDecoderFactory the decoder factory
* @return {@code this}
* @see MongoClientOptions#getDbDecoderFactory()
*/
public Builder dbDecoderFactory(final DBDecoderFactory dbDecoderFactory) {
if (dbDecoderFactory == null) {
throw new IllegalArgumentException("null is not a legal value");
}
this.dbDecoderFactory = dbDecoderFactory;
return this;
}
/**
* Sets the encoder factory.
*
* @param dbEncoderFactory the encoder factory
* @return {@code this}
* @see MongoClientOptions#getDbEncoderFactory()
*/
public Builder dbEncoderFactory(final DBEncoderFactory dbEncoderFactory) {
if (dbEncoderFactory == null) {
throw new IllegalArgumentException("null is not a legal value");
}
this.dbEncoderFactory = dbEncoderFactory;
return this;
}
/**
* Sets the write concern.
*
* @param writeConcern the write concern
* @return {@code this}
* @see MongoClientOptions#getWriteConcern()
*/
public Builder writeConcern(final WriteConcern writeConcern) {
if (writeConcern == null) {
throw new IllegalArgumentException("null is not a legal value");
}
this.writeConcern = writeConcern;
return this;
}
/**
* Sets the socket factory.
*
* @param socketFactory the socket factory
* @return {@code this}
* @see MongoClientOptions#getSocketFactory()
*/
public Builder socketFactory(final SocketFactory socketFactory) {
if (socketFactory == null) {
throw new IllegalArgumentException("null is not a legal value");
}
this.socketFactory = socketFactory;
return this;
}
/**
* Sets whether cursor finalizers are enabled.
*
* @param cursorFinalizerEnabled whether cursor finalizers are enabled.
* @return {@code this}
* @see MongoClientOptions#isCursorFinalizerEnabled()
*/
public Builder cursorFinalizerEnabled(final boolean cursorFinalizerEnabled) {
this.cursorFinalizerEnabled = cursorFinalizerEnabled;
return this;
}
/**
* Sets whether JMX beans registered by the driver should always be MBeans, regardless of whether the VM is
* Java 6 or greater. If false, the driver will use MXBeans if the VM is Java 6 or greater, and use MBeans if
* the VM is Java 5.
*
* @param alwaysUseMBeans true if driver should always use MBeans, regardless of VM version
* @return this
* @see MongoClientOptions#isAlwaysUseMBeans()
*/
public Builder alwaysUseMBeans(final boolean alwaysUseMBeans) {
this.alwaysUseMBeans = alwaysUseMBeans;
return this;
}
/**
* Sets defaults to be what they are in {@code MongoOptions}.
*
* @return {@code this}
* @see MongoOptions
*/
public Builder legacyDefaults() {
connectionsPerHost = 10;
writeConcern = WriteConcern.NORMAL;
return this;
}
/**
* Build an instance of MongoClientOptions.
*
* @return the options from this builder
*/
public MongoClientOptions build() {
return new MongoClientOptions(this);
}
}
/**
* Create a new Builder instance. This is a convenience method, equivalent to {@code new MongoClientOptions.Builder()}.
*
* @return a new instance of a Builder
*/
public static Builder builder() {
return new Builder();
}
/**
* Gets the description for this MongoClient, which is used in various places like logging and JMX.
* <p/>
* Default is null.
*
* @return the description
*/
public String getDescription() {
return description;
}
/**
* The maximum number of connections allowed per host for this MongoClient instance.
* Those connections will be kept in a pool when idle.
* Once the pool is exhausted, any operation requiring a connection will block waiting for an available connection.
* <p/>
* Default is 100.
*
* @return the maximum size of the connection pool per host
* @see MongoClientOptions#getThreadsAllowedToBlockForConnectionMultiplier()
*/
public int getConnectionsPerHost() {
return connectionsPerHost;
}
/**
* this multiplier, multiplied with the connectionsPerHost setting, gives the maximum number of threads that
* may be waiting for a connection to become available from the pool. All further threads will get an exception right
* away. For example if connectionsPerHost is 10 and threadsAllowedToBlockForConnectionMultiplier is 5, then up to 50
* threads can wait for a connection.
* <p/>
* Default is 5.
*
* @return the multiplier
*/
public int getThreadsAllowedToBlockForConnectionMultiplier() {
return threadsAllowedToBlockForConnectionMultiplier;
}
/**
* The maximum wait time in milliseconds that a thread may wait for a connection to become available.
* <p/>
* Default is 120,000. A value of 0 means that it will not wait. A negative value means to wait indefinitely.
*
* @return the maximum wait time.
*/
public int getMaxWaitTime() {
return maxWaitTime;
}
/**
* The connection timeout in milliseconds. A value of 0 means no timeout.
* It is used solely when establishing a new connection {@link java.net.Socket#connect(java.net.SocketAddress, int) }
* <p/>
* Default is 10,000.
*
* @return the socket connect timeout
*/
public int getConnectTimeout() {
return connectTimeout;
}
/**
* The socket timeout in milliseconds.
* It is used for I/O socket read and write operations {@link java.net.Socket#setSoTimeout(int)}
* <p/>
* Default is 0 and means no timeout.
*
* @return the socket timeout
*/
public int getSocketTimeout() {
return socketTimeout;
}
/**
* This flag controls the socket keep alive feature that keeps a connection alive through firewalls {@link java.net.Socket#setKeepAlive(boolean)}
* <p/>
* * Default is false.
*
* @return whether keep-alive is enabled on each socket
*/
public boolean isSocketKeepAlive() {
return socketKeepAlive;
}
/**
* If true, the driver will keep trying to connect to the same server in case that the socket cannot be established.
* There is maximum amount of time to keep retrying, which is 15s by default.
* This can be useful to avoid some exceptions being thrown when a server is down temporarily by blocking the operations.
* It also can be useful to smooth the transition to a new master (so that a new master is elected within the retry time).
* Note that when using this flag:
* - for a replica set, the driver will trying to connect to the old master for that time, instead of failing over to the new one right away
* - this does not prevent exception from being thrown in read/write operations on the socket, which must be handled by application
* <p/>
* Even if this flag is false, the driver already has mechanisms to automatically recreate broken connections and retry the read operations.
* Default is false.
*
* @return whether socket connect is retried
*/
public boolean isAutoConnectRetry() {
return autoConnectRetry;
}
/**
* The maximum amount of time in MS to spend retrying to open connection to the same server.
* Default is 0, which means to use the default 15s if autoConnectRetry is on.
*
* @return the maximum socket connect retry time.
*/
public long getMaxAutoConnectRetryTime() {
return maxAutoConnectRetryTime;
}
/**
* The read preference to use for queries, map-reduce, aggregation, and count.
* <p/>
* Default is {@code ReadPreference.primary()}.
*
* @return the read preference
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference#primary()
*/
public ReadPreference getReadPreference() {
return readPreference;
}
/**
* Override the decoder factory. Default is for the standard Mongo Java driver configuration.
*
* @return the decoder factory
*/
public DBDecoderFactory getDbDecoderFactory() {
return dbDecoderFactory;
}
/**
* Override the encoder factory. Default is for the standard Mongo Java driver configuration.
*
* @return the encoder factory
*/
public DBEncoderFactory getDbEncoderFactory() {
return dbEncoderFactory;
}
/**
* The write concern to use.
* <p/>
* Default is {@code WriteConcern.ACKNOWLEDGED}.
*
* @return the write concern
* @see WriteConcern#ACKNOWLEDGED
*/
public WriteConcern getWriteConcern() {
return writeConcern;
}
/**
* The socket factory for creating sockets to the mongo server.
* <p/>
* Default is SocketFactory.getDefault()
*
* @return the socket factory
*/
public SocketFactory getSocketFactory() {
return socketFactory;
}
/**
* Gets whether there is a a finalize method created that cleans up instances of DBCursor that the client
* does not close. If you are careful to always call the close method of DBCursor, then this can safely be set to false.
* <p/>
* Default is true.
*
* @return whether finalizers are enabled on cursors
* @see DBCursor
* @see com.massivecraft.mcore.xlib.mongodb.DBCursor#close()
*/
public boolean isCursorFinalizerEnabled() {
return cursorFinalizerEnabled;
}
/**
* Gets whether JMX beans registered by the driver should always be MBeans, regardless of whether the VM is
* Java 6 or greater. If false, the driver will use MXBeans if the VM is Java 6 or greater, and use MBeans if
* the VM is Java 5.
* <p>
* Default is false.
* </p>
*/
public boolean isAlwaysUseMBeans() {
return alwaysUseMBeans;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final MongoClientOptions that = (MongoClientOptions) o;
if (alwaysUseMBeans != that.alwaysUseMBeans) return false;
if (autoConnectRetry != that.autoConnectRetry) return false;
if (connectTimeout != that.connectTimeout) return false;
if (connectionsPerHost != that.connectionsPerHost) return false;
if (cursorFinalizerEnabled != that.cursorFinalizerEnabled) return false;
if (maxAutoConnectRetryTime != that.maxAutoConnectRetryTime) return false;
if (maxWaitTime != that.maxWaitTime) return false;
if (socketKeepAlive != that.socketKeepAlive) return false;
if (socketTimeout != that.socketTimeout) return false;
if (threadsAllowedToBlockForConnectionMultiplier != that.threadsAllowedToBlockForConnectionMultiplier)
return false;
if (!dbDecoderFactory.equals(that.dbDecoderFactory)) return false;
if (!dbEncoderFactory.equals(that.dbEncoderFactory)) return false;
if (description != null ? !description.equals(that.description) : that.description != null) return false;
if (!readPreference.equals(that.readPreference)) return false;
// Compare SocketFactory Class, since some equivalent SocketFactory instances are not equal to each other
if (!socketFactory.getClass().equals(that.socketFactory.getClass())) return false;
if (!writeConcern.equals(that.writeConcern)) return false;
return true;
}
@Override
public int hashCode() {
int result = description != null ? description.hashCode() : 0;
result = 31 * result + connectionsPerHost;
result = 31 * result + threadsAllowedToBlockForConnectionMultiplier;
result = 31 * result + maxWaitTime;
result = 31 * result + connectTimeout;
result = 31 * result + socketTimeout;
result = 31 * result + (socketKeepAlive ? 1 : 0);
result = 31 * result + (autoConnectRetry ? 1 : 0);
result = 31 * result + (int) (maxAutoConnectRetryTime ^ (maxAutoConnectRetryTime >>> 32));
result = 31 * result + readPreference.hashCode();
result = 31 * result + dbDecoderFactory.hashCode();
result = 31 * result + dbEncoderFactory.hashCode();
result = 31 * result + writeConcern.hashCode();
result = 31 * result + socketFactory.hashCode();
result = 31 * result + (cursorFinalizerEnabled ? 1 : 0);
result = 31 * result + (alwaysUseMBeans ? 1 : 0);
return result;
}
private MongoClientOptions(final Builder builder) {
description = builder.description;
connectionsPerHost = builder.connectionsPerHost;
threadsAllowedToBlockForConnectionMultiplier = builder.threadsAllowedToBlockForConnectionMultiplier;
maxWaitTime = builder.maxWaitTime;
connectTimeout = builder.connectTimeout;
socketTimeout = builder.socketTimeout;
autoConnectRetry = builder.autoConnectRetry;
socketKeepAlive = builder.socketKeepAlive;
maxAutoConnectRetryTime = builder.maxAutoConnectRetryTime;
readPreference = builder.readPreference;
dbDecoderFactory = builder.dbDecoderFactory;
dbEncoderFactory = builder.dbEncoderFactory;
writeConcern = builder.writeConcern;
socketFactory = builder.socketFactory;
cursorFinalizerEnabled = builder.cursorFinalizerEnabled;
alwaysUseMBeans = builder.alwaysUseMBeans;
}
private final String description;
private final int connectionsPerHost;
private final int threadsAllowedToBlockForConnectionMultiplier;
private final int maxWaitTime;
private final int connectTimeout;
private final int socketTimeout;
private final boolean socketKeepAlive;
private final boolean autoConnectRetry;
private final long maxAutoConnectRetryTime;
private final ReadPreference readPreference;
private final DBDecoderFactory dbDecoderFactory;
private final DBEncoderFactory dbEncoderFactory;
private final WriteConcern writeConcern;
private final SocketFactory socketFactory;
private final boolean cursorFinalizerEnabled;
private final boolean alwaysUseMBeans;
}

View File

@ -0,0 +1,612 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import javax.net.ssl.SSLSocketFactory;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Logger;
/**
* Represents a <a href="http://www.mongodb.org/display/DOCS/Connections">URI</a>
* which can be used to create a MongoClient instance. The URI describes the hosts to
* be used and options.
* <p>The format of the URI is:
* <pre>
* mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]
* </pre>
* <ul>
* <li>{@code mongodb://} is a required prefix to identify that this is a string in the standard connection format.</li>
* <li>{@code username:password@} are optional. If given, the driver will attempt to login to a database after
* connecting to a database server.</li>
* <li>{@code host1} is the only required part of the URI. It identifies a server address to connect to.</li>
* <li>{@code :portX} is optional and defaults to :27017 if not provided.</li>
* <li>{@code /database} is the name of the database to login to and thus is only relevant if the
* {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.</li>
* <li>{@code ?options} are connection options. Note that if {@code database} is absent there is still a {@code /}
* required between the last host and the {@code ?} introducing the options. Options are name=value pairs and the pairs
* are separated by "&amp;". For backwards compatibility, ";" is accepted as a separator in addition to "&amp;",
* but should be considered as deprecated.</li>
* </ul>
* <p>
* The Java driver supports the following options (case insensitive):
* <p>
* Replica set configuration:
* </p>
* <ul>
* <li>{@code replicaSet=name}: Implies that the hosts given are a seed list, and the driver will attempt to find
* all members of the set.</li>
* </ul>
* <p>Connection Configuration:</p>
* <ul>
* <li>{@code ssl=true|false}: Whether to connect using SSL.</li>
* <li>{@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.</li>
* <li>{@code socketTimeoutMS=ms}: How long a send or receive on a socket can take before timing out.</li>
* </ul>
* <p>Connection pool configuration:</p>
* <ul>
* <li>{@code maxPoolSize=n}: The maximum number of connections in the connection pool.</li>
* <li>{@code waitQueueMultiple=n} : this multiplier, multiplied with the maxPoolSize setting, gives the maximum number of
* threads that may be waiting for a connection to become available from the pool. All further threads will get an
* exception right away.</li>
* <li>{@code waitQueueTimeoutMS=ms}: The maximum wait time in milliseconds that a thread may wait for a connection to
* become available.</li>
* </ul>
* <p>Write concern configuration:</p>
* <ul>
* <li>{@code safe=true|false}
* <ul>
* <li>{@code true}: the driver sends a getLastError command after every update to ensure that the update succeeded
* (see also {@code w} and {@code wtimeoutMS}).</li>
* <li>{@code false}: the driver does not send a getLastError command after every update.</li>
* </ul>
* </li>
* <li>{@code w=wValue}
* <ul>
* <li>The driver adds { w : wValue } to the getLastError command. Implies {@code safe=true}.</li>
* <li>wValue is typically a number, but can be any string in order to allow for specifications like
* {@code "majority"}</li>
* </ul>
* </li>
* <li>{@code wtimeoutMS=ms}
* <ul>
* <li>The driver adds { wtimeout : ms } to the getlasterror command. Implies {@code safe=true}.</li>
* <li>Used in combination with {@code w}</li>
* </ul>
* </li>
* </ul>
* <p>Read preference configuration:</p>
* <ul>
* <li>{@code slaveOk=true|false}: Whether a driver connected to a replica set will send reads to slaves/secondaries.</li>
* <li>{@code readPreference=enum}: The read preference for this connection. If set, it overrides any slaveOk value.
* <ul>
* <li>Enumerated values:
* <ul>
* <li>{@code primary}</li>
* <li>{@code primaryPreferred}</li>
* <li>{@code secondary}</li>
* <li>{@code secondaryPreferred}</li>
* <li>{@code nearest}</li>
* </ul>
* </li>
* </ul>
* </li>
* <li>{@code readPreferenceTags=string}. A representation of a tag set as a comma-separated list of colon-separated
* key-value pairs, e.g. {@code "dc:ny,rack:1}". Spaces are stripped from beginning and end of all keys and values.
* To specify a list of tag sets, using multiple readPreferenceTags,
* e.g. {@code readPreferenceTags=dc:ny,rack:1;readPreferenceTags=dc:ny;readPreferenceTags=}
* <ul>
* <li>Note the empty value for the last one, which means match any secondary as a last resort.</li>
* <li>Order matters when using multiple readPreferenceTags.</li>
* </ul>
* </li>
* </ul>
* <p>Authentication configuration:</p>
* <ul>
* <li>{@code authMechanism=MONGO-CR|GSSAPI}: The authentication mechanism to use if a credential was supplied.
* The default is MONGODB-CR, which is the native MongoDB Challenge Response mechanism.
* </li>
* <li>{@code authSource=string}: The source of the authentication credentials. This is typically the database that
* the credentials have been created. The value defaults to the database specified in the path portion of the URI.
* If the database is specified in neither place, the default value is "admin". For GSSAPI, it's not necessary to specify
* a source.
* </li>
* <ul>
* <p>
* Note: This class is a replacement for {@code MongoURI}, to be used with {@code MongoClient}. The main difference
* in behavior is that the default write concern is {@code WriteConcern.ACKNOWLEDGED}.
* </p>
*
* @see MongoClientOptions for the default values for all options
* @since 2.10.0
*/
public class MongoClientURI {
private static final String PREFIX = "mongodb://";
private static final String UTF_8 = "UTF-8";
/**
* Creates a MongoURI from the given string.
*
* @param uri the URI
* @dochub connections
*/
public MongoClientURI(final String uri) {
this(uri, new MongoClientOptions.Builder());
}
/**
* Creates a MongoURI from the given URI string, and MongoClientOptions.Builder. The builder can be configured
* with default options, which may be overridden by options specified in the URI string.
*
* @param uri the URI
* @param builder a Builder
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientURI#getOptions()
* @since 2.11.0
*/
public MongoClientURI(String uri, MongoClientOptions.Builder builder) {
try {
this.uri = uri;
if (!uri.startsWith(PREFIX))
throw new IllegalArgumentException("uri needs to start with " + PREFIX);
uri = uri.substring(PREFIX.length());
String serverPart;
String nsPart;
String optionsPart;
String userName = null;
char[] password = null;
{
int idx = uri.lastIndexOf("/");
if (idx < 0) {
if (uri.contains("?")) {
throw new IllegalArgumentException("URI contains options without trailing slash");
}
serverPart = uri;
nsPart = null;
optionsPart = "";
} else {
serverPart = uri.substring(0, idx);
nsPart = uri.substring(idx + 1);
idx = nsPart.indexOf("?");
if (idx >= 0) {
optionsPart = nsPart.substring(idx + 1);
nsPart = nsPart.substring(0, idx);
} else {
optionsPart = "";
}
}
}
{ // userName,password,hosts
List<String> all = new LinkedList<String>();
int idx = serverPart.indexOf("@");
if (idx > 0) {
String authPart = serverPart.substring(0, idx);
serverPart = serverPart.substring(idx + 1);
idx = authPart.indexOf(":");
if (idx == -1) {
userName = URLDecoder.decode(authPart, UTF_8);
} else {
userName = URLDecoder.decode(authPart.substring(0, idx), UTF_8);
password = URLDecoder.decode(authPart.substring(idx + 1), UTF_8).toCharArray();
}
}
Collections.addAll(all, serverPart.split(","));
hosts = Collections.unmodifiableList(all);
}
if (nsPart != null && !nsPart.isEmpty()) { // database,_collection
int idx = nsPart.indexOf(".");
if (idx < 0) {
database = nsPart;
collection = null;
} else {
database = nsPart.substring(0, idx);
collection = nsPart.substring(idx + 1);
}
} else {
database = null;
collection = null;
}
Map<String, List<String>> optionsMap = parseOptions(optionsPart);
options = createOptions(optionsMap, builder);
credentials = createCredentials(optionsMap, userName, password, database);
warnOnUnsupportedOptions(optionsMap);
} catch (UnsupportedEncodingException e) {
throw new MongoInternalException("This should not happen", e);
}
}
static Set<String> generalOptionsKeys = new HashSet<String>();
static Set<String> authKeys = new HashSet<String>();
static Set<String> readPreferenceKeys = new HashSet<String>();
static Set<String> writeConcernKeys = new HashSet<String>();
static Set<String> allKeys = new HashSet<String>();
static {
generalOptionsKeys.add("maxpoolsize");
generalOptionsKeys.add("waitqueuemultiple");
generalOptionsKeys.add("waitqueuetimeoutms");
generalOptionsKeys.add("connecttimeoutms");
generalOptionsKeys.add("sockettimeoutms");
generalOptionsKeys.add("sockettimeoutms");
generalOptionsKeys.add("autoconnectretry");
generalOptionsKeys.add("ssl");
readPreferenceKeys.add("slaveok");
readPreferenceKeys.add("readpreference");
readPreferenceKeys.add("readpreferencetags");
writeConcernKeys.add("safe");
writeConcernKeys.add("w");
writeConcernKeys.add("wtimeout");
writeConcernKeys.add("fsync");
writeConcernKeys.add("j");
authKeys.add("authmechanism");
authKeys.add("authsource");
allKeys.addAll(generalOptionsKeys);
allKeys.addAll(authKeys);
allKeys.addAll(readPreferenceKeys);
allKeys.addAll(writeConcernKeys);
}
private void warnOnUnsupportedOptions(Map<String, List<String>> optionsMap) {
for (String key : optionsMap.keySet()) {
if (!allKeys.contains(key)) {
LOGGER.warning("Unknown or Unsupported Option '" + key + "'");
}
}
}
private MongoClientOptions createOptions(Map<String, List<String>> optionsMap, MongoClientOptions.Builder builder) {
for (String key : generalOptionsKeys) {
String value = getLastValue(optionsMap, key);
if (value == null) {
continue;
}
if (key.equals("maxpoolsize")) {
builder.connectionsPerHost(Integer.parseInt(value));
} else if (key.equals("waitqueuemultiple")) {
builder.threadsAllowedToBlockForConnectionMultiplier(Integer.parseInt(value));
} else if (key.equals("waitqueuetimeoutms")) {
builder.maxWaitTime(Integer.parseInt(value));
} else if (key.equals("connecttimeoutms")) {
builder.connectTimeout(Integer.parseInt(value));
} else if (key.equals("sockettimeoutms")) {
builder.socketTimeout(Integer.parseInt(value));
} else if (key.equals("autoconnectretry")) {
builder.autoConnectRetry(_parseBoolean(value));
} else if (key.equals("ssl")) {
if (_parseBoolean(value)) {
builder.socketFactory(SSLSocketFactory.getDefault());
}
}
}
WriteConcern writeConcern = createWriteConcern(optionsMap);
ReadPreference readPreference = createReadPreference(optionsMap);
if (writeConcern != null) {
builder.writeConcern(writeConcern);
}
if (readPreference != null) {
builder.readPreference(readPreference);
}
return builder.build();
}
private WriteConcern createWriteConcern(final Map<String, List<String>> optionsMap) {
Boolean safe = null;
String w = null;
int wTimeout = 0;
boolean fsync = false;
boolean journal = false;
for (String key : writeConcernKeys) {
String value = getLastValue(optionsMap, key);
if (value == null) {
continue;
}
if (key.equals("safe")) {
safe = _parseBoolean(value);
} else if (key.equals("w")) {
w = value;
} else if (key.equals("wtimeout")) {
wTimeout = Integer.parseInt(value);
} else if (key.equals("fsync")) {
fsync = _parseBoolean(value);
} else if (key.equals("j")) {
journal = _parseBoolean(value);
}
}
return buildWriteConcern(safe, w, wTimeout, fsync, journal);
}
private ReadPreference createReadPreference(final Map<String, List<String>> optionsMap) {
Boolean slaveOk = null;
String readPreferenceType = null;
DBObject firstTagSet = null;
List<DBObject> remainingTagSets = new ArrayList<DBObject>();
for (String key : readPreferenceKeys) {
String value = getLastValue(optionsMap, key);
if (value == null) {
continue;
}
if (key.equals("slaveok")) {
slaveOk = _parseBoolean(value);
} else if (key.equals("readpreference")) {
readPreferenceType = value;
} else if (key.equals("readpreferencetags")) {
for (String cur : optionsMap.get(key)) {
DBObject tagSet = getTagSet(cur.trim());
if (firstTagSet == null) {
firstTagSet = tagSet;
} else {
remainingTagSets.add(tagSet);
}
}
}
}
return buildReadPreference(readPreferenceType, firstTagSet, remainingTagSets, slaveOk);
}
private MongoCredential createCredentials(Map<String, List<String>> optionsMap, final String userName,
final char[] password, String database) {
if (userName == null) {
return null;
}
if (database == null) {
database = "admin";
}
String mechanism = MongoCredential.MONGODB_CR_MECHANISM;
String authSource = database;
for (String key : authKeys) {
String value = getLastValue(optionsMap, key);
if (value == null) {
continue;
}
if (key.equals("authmechanism")) {
mechanism = value;
} else if (key.equals("authsource")) {
authSource = value;
}
}
if (mechanism.equals(MongoCredential.GSSAPI_MECHANISM)) {
return MongoCredential.createGSSAPICredential(userName);
}
else if (mechanism.equals(MongoCredential.MONGODB_CR_MECHANISM)) {
return MongoCredential.createMongoCRCredential(userName, authSource, password);
}
else {
throw new IllegalArgumentException("Unsupported authMechanism: " + mechanism);
}
}
private String getLastValue(final Map<String, List<String>> optionsMap, final String key) {
List<String> valueList = optionsMap.get(key);
if (valueList == null) {
return null;
}
return valueList.get(valueList.size() - 1);
}
private Map<String, List<String>> parseOptions(String optionsPart) {
Map<String, List<String>> optionsMap = new HashMap<String, List<String>>();
for (String _part : optionsPart.split("&|;")) {
int idx = _part.indexOf("=");
if (idx >= 0) {
String key = _part.substring(0, idx).toLowerCase();
String value = _part.substring(idx + 1);
List<String> valueList = optionsMap.get(key);
if (valueList == null) {
valueList = new ArrayList<String>(1);
}
valueList.add(value);
optionsMap.put(key, valueList);
}
}
return optionsMap;
}
private ReadPreference buildReadPreference(final String readPreferenceType, final DBObject firstTagSet,
final List<DBObject> remainingTagSets, final Boolean slaveOk) {
if (readPreferenceType != null) {
if (firstTagSet == null) {
return ReadPreference.valueOf(readPreferenceType);
} else {
return ReadPreference.valueOf(readPreferenceType, firstTagSet,
remainingTagSets.toArray(new DBObject[remainingTagSets.size()]));
}
} else if (slaveOk != null) {
if (slaveOk.equals(Boolean.TRUE)) {
return ReadPreference.secondaryPreferred();
}
}
return null;
}
private WriteConcern buildWriteConcern(final Boolean safe, final String w,
final int wTimeout, final boolean fsync, final boolean journal) {
if (w != null || wTimeout != 0 || fsync || journal) {
if (w == null) {
return new WriteConcern(1, wTimeout, fsync, journal);
} else {
try {
return new WriteConcern(Integer.parseInt(w), wTimeout, fsync, journal);
} catch (NumberFormatException e) {
return new WriteConcern(w, wTimeout, fsync, journal);
}
}
} else if (safe != null) {
if (safe) {
return WriteConcern.ACKNOWLEDGED;
} else {
return WriteConcern.UNACKNOWLEDGED;
}
}
return null;
}
private DBObject getTagSet(String tagSetString) {
DBObject tagSet = new BasicDBObject();
if (tagSetString.length() > 0) {
for (String tag : tagSetString.split(",")) {
String[] tagKeyValuePair = tag.split(":");
if (tagKeyValuePair.length != 2) {
throw new IllegalArgumentException("Bad read preference tags: " + tagSetString);
}
tagSet.put(tagKeyValuePair[0].trim(), tagKeyValuePair[1].trim());
}
}
return tagSet;
}
boolean _parseBoolean(String _in) {
String in = _in.trim();
return in != null && in.length() > 0 && (in.equals("1") || in.toLowerCase().equals("true") || in.toLowerCase()
.equals("yes"));
}
// ---------------------------------
/**
* Gets the username
*
* @return the username
*/
public String getUsername() {
return credentials != null ? credentials.getUserName() : null;
}
/**
* Gets the password
*
* @return the password
*/
public char[] getPassword() {
return credentials != null ? credentials.getPassword() : null;
}
/**
* Gets the list of hosts
*
* @return the host list
*/
public List<String> getHosts() {
return hosts;
}
/**
* Gets the database name
*
* @return the database name
*/
public String getDatabase() {
return database;
}
/**
* Gets the collection name
*
* @return the collection name
*/
public String getCollection() {
return collection;
}
/**
* Get the unparsed URI.
*
* @return the URI
*/
public String getURI() {
return uri;
}
/**
* Gets the credentials.
*
* @return the credentials
*/
public MongoCredential getCredentials() {
return credentials;
}
/**
* Gets the options
*
* @return the MongoClientOptions based on this URI.
*/
public MongoClientOptions getOptions() {
return options;
}
// ---------------------------------
private final MongoClientOptions options;
private final MongoCredential credentials;
private final List<String> hosts;
private final String database;
private final String collection;
private final String uri;
static final Logger LOGGER = Logger.getLogger("com.mongodb.MongoURI");
@Override
public String toString() {
return uri;
}
}

View File

@ -0,0 +1,28 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* This class exists only so that, on Java 6 and above, the driver can create instances of an MXBean.
*/
class MongoConnectionPool extends DBPortPool implements MongoConnectionPoolMXBean {
MongoConnectionPool(ServerAddress addr, MongoOptions options) {
super(addr, options);
}
}

View File

@ -0,0 +1,62 @@
/**
* Copyright (c) 2008 - 20112 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.mongodb.util.ConnectionPoolStatisticsBean;
/**
* A standard MXBean interface for a Mongo connection pool, for use on Java 6 and above virtual machines.
* <p>
* This interface is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases.
*/
public interface MongoConnectionPoolMXBean {
/**
* Gets the name of the pool.
*
* @return the name of the pool
*/
String getName();
/**
* Gets the maximum allowed size of the pool, including idle and in-use members.
*
* @return the maximum size
*/
int getMaxSize();
/**
* Gets the host that this connection pool is connecting to.
*
* @return the host
*/
String getHost();
/**
* Gets the port that this connection pool is connecting to.
*
* @return the port
*/
int getPort();
/**
* Gets the statistics for this connection pool.
*
* @return the connection pool statistics
*/
ConnectionPoolStatisticsBean getStatistics();
}

View File

@ -0,0 +1,175 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.util.annotations.Immutable;
import java.util.Arrays;
/**
* Represents credentials to authenticate to a mongo server, as well as the source of the credentials and
* the authentication mechanism to use.
*
* @since 2.11.0
*/
@Immutable
public final class MongoCredential {
/**
* The GSSAPI mechanism. See the <a href="http://tools.ietf.org/html/rfc4752">RFC</a>.
*/
public static final String GSSAPI_MECHANISM = "GSSAPI";
/**
* The MongoDB Challenge Response mechanism.
*/
public static final String MONGODB_CR_MECHANISM = "MONGODB-CR";
private final String mechanism;
private final String userName;
private final String source;
private final char[] password;
/**
* Creates a MongoCredential instance for the MongoDB Challenge Response protocol.
*
* @param userName the user name
* @param database the database where the user is defined
* @param password the user's password
* @return the credential
*/
public static MongoCredential createMongoCRCredential(String userName, String database, char[] password) {
return new MongoCredential(MONGODB_CR_MECHANISM, userName, database, password);
}
/**
* Creates a MongoCredential instance for the GSSAPI SASL mechanism.
*
* @param userName the user name
* @return the credential
*/
public static MongoCredential createGSSAPICredential(String userName) {
return new MongoCredential(GSSAPI_MECHANISM, userName, "$external", null);
}
/**
*
* Constructs a new instance using the given mechanism, userName, source, and password
*
* @param mechanism the authentication mechanism
* @param userName the user name
* @param source the source of the user name, typically a database name
* @param password the password
*/
MongoCredential(final String mechanism, final String userName, final String source, final char[] password) {
if (mechanism == null) {
throw new IllegalArgumentException("mechanism can not be null");
}
if (userName == null) {
throw new IllegalArgumentException("username can not be null");
}
if (mechanism.equals(MONGODB_CR_MECHANISM) && password == null) {
throw new IllegalArgumentException("Password can not be null for " + MONGODB_CR_MECHANISM + " mechanism");
}
if (mechanism.equals(GSSAPI_MECHANISM) && password != null) {
throw new IllegalArgumentException("Password must be null for the " + GSSAPI_MECHANISM + " mechanism");
}
this.mechanism = mechanism;
this.userName = userName;
this.source = source;
this.password = password != null ? password.clone() : null;
}
/**
* Gets the mechanism
*
* @return the mechanism.
*/
public String getMechanism() {
return mechanism;
}
/**
* Gets the user name
*
* @return the user name. Can never be null.
*/
public String getUserName() {
return userName;
}
/**
* Gets the source of the user name, typically the name of the database where the user is defined.
*
* @return the user name. Can never be null.
*/
public String getSource() {
return source;
}
/**
* Gets the password.
*
* @return the password. Can be null for some mechanisms.
*/
public char[] getPassword() {
if (password == null) {
return null;
}
return password.clone();
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final MongoCredential that = (MongoCredential) o;
if (!mechanism.equals(that.mechanism)) return false;
if (!Arrays.equals(password, that.password)) return false;
if (!source.equals(that.source)) return false;
if (!userName.equals(that.userName)) return false;
return true;
}
@Override
public int hashCode() {
int result = mechanism.hashCode();
result = 31 * result + userName.hashCode();
result = 31 * result + source.hashCode();
result = 31 * result + (password != null ? Arrays.hashCode(password) : 0);
return result;
}
@Override
public String toString() {
return "MongoCredential{" +
"mechanism='" + mechanism + '\'' +
", userName='" + userName + '\'' +
", source='" + source + '\'' +
", password=<hidden>" +
'}';
}
}

View File

@ -0,0 +1,147 @@
/*
* Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.util.annotations.ThreadSafe;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* An effectively immutable store of credentials to mongo servers. It enforces the invariant that there can be at most
* one credentials for each database. It allows at most one credentials with a null database.
*
* There is still a package-protected method to add a new credentials to the store, but it's only there
* to support DB.authenticate, which allows you to add new credentials at any point during the life of a MongoClient.
*
* @since 2.11.0
*/
@ThreadSafe
class MongoCredentialsStore {
private final Map<String, MongoCredential> credentialsMap = new HashMap<String, MongoCredential>();
private volatile Set<String> allDatabasesWithCredentials = new HashSet<String>();
/**
* Creates an empty store
*/
public MongoCredentialsStore() {
}
/**
* Creates a store with a single credentials.
*
* @param credentials A single credentials, which may be null.
*/
public MongoCredentialsStore(MongoCredential credentials) {
if (credentials == null) {
return;
}
add(credentials);
}
/**
* Creates a store with the list of credentials.
*
* @param credentialsList The list of credentials
*/
public MongoCredentialsStore(Iterable<MongoCredential> credentialsList) {
if (credentialsList == null) {
return;
}
for (MongoCredential cur : credentialsList) {
add(cur);
}
}
/**
* Adds a new credentials.
*
* @param credentials the new credentials
* @throws IllegalArgumentException if there already exist different credentials for the same database
*/
synchronized void add(MongoCredential credentials) {
MongoCredential existingCredentials = credentialsMap.get(credentials.getSource());
if (existingCredentials != null) {
if (existingCredentials.equals(credentials)) {
return;
}
throw new IllegalArgumentException("Can't add more than one credentials for the same database");
}
credentialsMap.put(credentials.getSource(), credentials);
allDatabasesWithCredentials = new HashSet<String>(allDatabasesWithCredentials);
allDatabasesWithCredentials.add(credentials.getSource());
}
/**
* Gets the set of databases for which there are credentials stored.
*
* @return an unmodifiable set of database names. Can contain the null string.
*/
public Set<String> getDatabases() {
return Collections.unmodifiableSet(allDatabasesWithCredentials);
}
/**
* Gets the stored credentials for the given database.
*
* @param database the database. This can be null, to get the credentials with the null database.
* @return the credentials for the given database. Can be null if not are stored.
*/
public synchronized MongoCredential get(String database) {
return credentialsMap.get(database);
}
/**
* Gets the MongoCredentials in this map as a List
* @return the list of credentials
*/
public synchronized List<MongoCredential> asList() {
return new ArrayList<MongoCredential>(credentialsMap.values());
}
@Override
public synchronized boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final MongoCredentialsStore that = (MongoCredentialsStore) o;
if (!credentialsMap.equals(that.credentialsMap)) return false;
return true;
}
@Override
public synchronized int hashCode() {
return credentialsMap.hashCode();
}
@Override
public String toString() {
return "{" +
"credentials=" + credentialsMap +
'}';
}
}

View File

@ -96,12 +96,21 @@ public class MongoException extends RuntimeException {
private static final long serialVersionUID = -4415279469780082174L; private static final long serialVersionUID = -4415279469780082174L;
Network( String msg , java.io.IOException ioe ){ /**
*
* @param msg the message
* @param ioe the cause
*/
public Network( String msg , java.io.IOException ioe ){
super( -2 , msg , ioe ); super( -2 , msg , ioe );
_ioe = ioe; _ioe = ioe;
} }
Network( java.io.IOException ioe ){ /**
*
* @param ioe the cause
*/
public Network( java.io.IOException ioe ){
super( ioe.toString() , ioe ); super( ioe.toString() , ioe );
_ioe = ioe; _ioe = ioe;
} }
@ -110,14 +119,14 @@ public class MongoException extends RuntimeException {
} }
/** /**
* Subclass of MongoException representing a duplicate key exception * Subclass of WriteConcernException representing a duplicate key error
*/ */
public static class DuplicateKey extends MongoException { public static class DuplicateKey extends WriteConcernException {
private static final long serialVersionUID = -4415279469780082174L; private static final long serialVersionUID = -4415279469780082174L;
DuplicateKey( int code , String msg ){ public DuplicateKey(final CommandResult commandResult) {
super( code , msg ); super(commandResult);
} }
} }
@ -133,10 +142,10 @@ public class MongoException extends RuntimeException {
/** /**
* *
* @param cursorId * @param cursorId cursor
* @param serverAddress * @param serverAddress server address
*/ */
CursorNotFound(long cursorId, ServerAddress serverAddress){ public CursorNotFound(long cursorId, ServerAddress serverAddress){
super( -5 , "cursor " + cursorId + " not found on server " + serverAddress ); super( -5 , "cursor " + cursorId + " not found on server " + serverAddress );
this.cursorId = cursorId; this.cursorId = cursorId;
this.serverAddress = serverAddress; this.serverAddress = serverAddress;

View File

@ -0,0 +1,36 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* A non-checked exception indicating that the driver has been interrupted by a call to Thread.interrupt.
*
* @see Thread#interrupt()
* @see InterruptedException
*/
public class MongoInterruptedException extends MongoException {
private static final long serialVersionUID = -4110417867718417860L;
public MongoInterruptedException(final InterruptedException e) {
super("A driver operation has been interrupted", e);
}
public MongoInterruptedException(final String message, final InterruptedException e) {
super(message, e);
}
}

View File

@ -21,25 +21,53 @@ package com.massivecraft.mcore.xlib.mongodb;
import javax.net.SocketFactory; import javax.net.SocketFactory;
/** /**
* Various settings for the driver. * Various settings for a Mongo instance. Not thread safe, and superseded by MongoClientOptions. This class may
* Not thread safe. * be deprecated in a future release.
*
* @see MongoClientOptions
* @see MongoClient
*/ */
public class MongoOptions { public class MongoOptions {
@Deprecated
public MongoOptions(){ public MongoOptions(){
reset(); reset();
} }
/**
* @deprecated Replaced by {@link MongoClientOptions}
*/
@Deprecated
public MongoOptions(final MongoClientOptions options) {
connectionsPerHost = options.getConnectionsPerHost();
threadsAllowedToBlockForConnectionMultiplier = options.getThreadsAllowedToBlockForConnectionMultiplier();
maxWaitTime = options.getMaxWaitTime();
connectTimeout = options.getConnectTimeout();
socketTimeout = options.getSocketTimeout();
socketKeepAlive = options.isSocketKeepAlive();
autoConnectRetry = options.isAutoConnectRetry();
maxAutoConnectRetryTime = options.getMaxAutoConnectRetryTime();
readPreference = options.getReadPreference();
dbDecoderFactory = options.getDbDecoderFactory();
dbEncoderFactory = options.getDbEncoderFactory();
socketFactory = options.getSocketFactory();
description = options.getDescription();
cursorFinalizerEnabled = options.isCursorFinalizerEnabled();
writeConcern = options.getWriteConcern();
slaveOk = false; // default to false, as readPreference field will be responsible
}
public void reset(){ public void reset(){
connectionsPerHost = Bytes.CONNECTIONS_PER_HOST; connectionsPerHost = Bytes.CONNECTIONS_PER_HOST;
threadsAllowedToBlockForConnectionMultiplier = 5; threadsAllowedToBlockForConnectionMultiplier = 5;
maxWaitTime = 1000 * 60 * 2; maxWaitTime = 1000 * 60 * 2;
connectTimeout = 0; connectTimeout = 1000 * 10;
socketTimeout = 0; socketTimeout = 0;
socketKeepAlive = false; socketKeepAlive = false;
autoConnectRetry = false; autoConnectRetry = false;
maxAutoConnectRetryTime = 0; maxAutoConnectRetryTime = 0;
slaveOk = false; slaveOk = false;
readPreference = null;
safe = false; safe = false;
w = 0; w = 0;
wtimeout = 0; wtimeout = 0;
@ -49,6 +77,8 @@ public class MongoOptions {
dbEncoderFactory = DefaultDBEncoder.FACTORY; dbEncoderFactory = DefaultDBEncoder.FACTORY;
socketFactory = SocketFactory.getDefault(); socketFactory = SocketFactory.getDefault();
description = null; description = null;
cursorFinalizerEnabled = true;
alwaysUseMBeans = false;
} }
public MongoOptions copy() { public MongoOptions copy() {
@ -62,6 +92,7 @@ public class MongoOptions {
m.autoConnectRetry = autoConnectRetry; m.autoConnectRetry = autoConnectRetry;
m.maxAutoConnectRetryTime = maxAutoConnectRetryTime; m.maxAutoConnectRetryTime = maxAutoConnectRetryTime;
m.slaveOk = slaveOk; m.slaveOk = slaveOk;
m.readPreference = readPreference;
m.safe = safe; m.safe = safe;
m.w = w; m.w = w;
m.wtimeout = wtimeout; m.wtimeout = wtimeout;
@ -71,21 +102,88 @@ public class MongoOptions {
m.dbEncoderFactory = dbEncoderFactory; m.dbEncoderFactory = dbEncoderFactory;
m.socketFactory = socketFactory; m.socketFactory = socketFactory;
m.description = description; m.description = description;
m.cursorFinalizerEnabled = cursorFinalizerEnabled;
m.alwaysUseMBeans = alwaysUseMBeans;
return m; return m;
} }
/** /**
* Helper method to return the appropriate WriteConcern instance based * Helper method to return the appropriate WriteConcern instance based on the current related options settings.
* on the current related options settings.
**/ **/
public WriteConcern getWriteConcern(){ public WriteConcern getWriteConcern() {
// Ensure we only set writeconcern once; if non-default w, etc skip safe (implied) if (writeConcern != null) {
if ( w != 0 || wtimeout != 0 || fsync ) return writeConcern;
return new WriteConcern( w , wtimeout , fsync ); } else if ( w != 0 || wtimeout != 0 || fsync | j) {
else if (safe) return new WriteConcern( w , wtimeout , fsync, j );
} else if (safe) {
return WriteConcern.SAFE; return WriteConcern.SAFE;
else } else {
return WriteConcern.NORMAL; return WriteConcern.NORMAL;
}
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final MongoOptions options = (MongoOptions) o;
if (autoConnectRetry != options.autoConnectRetry) return false;
if (connectTimeout != options.connectTimeout) return false;
if (connectionsPerHost != options.connectionsPerHost) return false;
if (cursorFinalizerEnabled != options.cursorFinalizerEnabled) return false;
if (fsync != options.fsync) return false;
if (j != options.j) return false;
if (maxAutoConnectRetryTime != options.maxAutoConnectRetryTime) return false;
if (maxWaitTime != options.maxWaitTime) return false;
if (safe != options.safe) return false;
if (slaveOk != options.slaveOk) return false;
if (socketKeepAlive != options.socketKeepAlive) return false;
if (socketTimeout != options.socketTimeout) return false;
if (threadsAllowedToBlockForConnectionMultiplier != options.threadsAllowedToBlockForConnectionMultiplier)
return false;
if (w != options.w) return false;
if (wtimeout != options.wtimeout) return false;
if (dbDecoderFactory != null ? !dbDecoderFactory.equals(options.dbDecoderFactory) : options.dbDecoderFactory != null)
return false;
if (dbEncoderFactory != null ? !dbEncoderFactory.equals(options.dbEncoderFactory) : options.dbEncoderFactory != null)
return false;
if (description != null ? !description.equals(options.description) : options.description != null) return false;
if (readPreference != null ? !readPreference.equals(options.readPreference) : options.readPreference != null)
return false;
if (socketFactory != null ? !socketFactory.equals(options.socketFactory) : options.socketFactory != null)
return false;
if (writeConcern != null ? !writeConcern.equals(options.writeConcern) : options.writeConcern != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = description != null ? description.hashCode() : 0;
result = 31 * result + connectionsPerHost;
result = 31 * result + threadsAllowedToBlockForConnectionMultiplier;
result = 31 * result + maxWaitTime;
result = 31 * result + connectTimeout;
result = 31 * result + socketTimeout;
result = 31 * result + (socketKeepAlive ? 1 : 0);
result = 31 * result + (autoConnectRetry ? 1 : 0);
result = 31 * result + (int) (maxAutoConnectRetryTime ^ (maxAutoConnectRetryTime >>> 32));
result = 31 * result + (slaveOk ? 1 : 0);
result = 31 * result + (readPreference != null ? readPreference.hashCode() : 0);
result = 31 * result + (dbDecoderFactory != null ? dbDecoderFactory.hashCode() : 0);
result = 31 * result + (dbEncoderFactory != null ? dbEncoderFactory.hashCode() : 0);
result = 31 * result + (safe ? 1 : 0);
result = 31 * result + w;
result = 31 * result + wtimeout;
result = 31 * result + (fsync ? 1 : 0);
result = 31 * result + (j ? 1 : 0);
result = 31 * result + (socketFactory != null ? socketFactory.hashCode() : 0);
result = 31 * result + (cursorFinalizerEnabled ? 1 : 0);
result = 31 * result + (writeConcern != null ? writeConcern.hashCode() : 0);
return result;
} }
/** /**
@ -118,9 +216,9 @@ public class MongoOptions {
public int maxWaitTime; public int maxWaitTime;
/** /**
* The connection timeout in milliseconds. * The connection timeout in milliseconds. A value of 0 means no timeout.
* It is used solely when establishing a new connection {@link java.net.Socket#connect(java.net.SocketAddress, int) } * It is used solely when establishing a new connection {@link java.net.Socket#connect(java.net.SocketAddress, int) }
* Default is 0 and means no timeout. * Default is 10,000.
*/ */
public int connectTimeout; public int connectTimeout;
@ -164,12 +262,17 @@ public class MongoOptions {
* Note that reading from secondaries can increase performance and reliability, but it may result in temporary inconsistent results. * Note that reading from secondaries can increase performance and reliability, but it may result in temporary inconsistent results.
* Default is false. * Default is false.
* *
* @deprecated Replaced in MongoDB 2.0/Java Driver 2.7 with ReadPreference.SECONDARY * @deprecated Replaced with {@code ReadPreference.secondaryPreferred()}
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference.SECONDARY * @see ReadPreference#secondaryPreferred()
*/ */
@Deprecated @Deprecated
public boolean slaveOk; public boolean slaveOk;
/**
* Specifies the read preference.
*/
public ReadPreference readPreference;
/** /**
* Override the DBCallback factory. Default is for the standard Mongo Java driver configuration. * Override the DBCallback factory. Default is for the standard Mongo Java driver configuration.
*/ */
@ -219,29 +322,39 @@ public class MongoOptions {
*/ */
public SocketFactory socketFactory; public SocketFactory socketFactory;
public String toString(){ /**
StringBuilder buf = new StringBuilder(); * Sets whether there is a a finalize method created that cleans up instances of DBCursor that the client
buf.append( "description=" ).append( description ).append( ", " ); * does not close. If you are careful to always call the close method of DBCursor, then this can safely be set to false.
buf.append( "connectionsPerHost=" ).append( connectionsPerHost ).append( ", " ); * @see com.mongodb.DBCursor#close().
buf.append( "threadsAllowedToBlockForConnectionMultiplier=" ).append( threadsAllowedToBlockForConnectionMultiplier ).append( ", " ); * Default is true.
buf.append( "maxWaitTime=" ).append( maxWaitTime ).append( ", " ); */
buf.append( "connectTimeout=" ).append( connectTimeout ).append( ", " ); public boolean cursorFinalizerEnabled;
buf.append( "socketTimeout=" ).append( socketTimeout ).append( ", " );
buf.append( "socketKeepAlive=" ).append( socketKeepAlive ).append( ", " );
buf.append( "autoConnectRetry=" ).append( autoConnectRetry ).append( ", " );
buf.append( "maxAutoConnectRetryTime=" ).append( maxAutoConnectRetryTime ).append( ", " );
buf.append( "slaveOk=" ).append( slaveOk ).append( ", " );
buf.append( "safe=" ).append( safe ).append( ", " );
buf.append( "w=" ).append( w ).append( ", " );
buf.append( "wtimeout=" ).append( wtimeout ).append( ", " );
buf.append( "fsync=" ).append( fsync ).append( ", " );
buf.append( "j=" ).append( j );
return buf.toString();
}
/** /**
* @return The description for <code>Mongo</code> instances created with these options * Sets the write concern. If this is not set, the write concern defaults to the combination of settings of
* the other write concern-related fields. If set, this will override all of the other write concern-related
* fields.
*
* @see #w
* @see #safe
* @see #wtimeout
* @see #fsync
* @see #j
*/
public WriteConcern writeConcern;
/**
* Sets whether JMX beans registered by the driver should always be MBeans, regardless of whether the VM is
* Java 6 or greater. If false, the driver will use MXBeans if the VM is Java 6 or greater, and use MBeans if
* the VM is Java 5.
* <p>
* Default is false.
* </p>
*/
public boolean alwaysUseMBeans;
/**
* @return The description for <code>MongoClient</code> instances created with these options
*/ */
public synchronized String getDescription() { public synchronized String getDescription() {
return description; return description;
@ -282,7 +395,7 @@ public class MongoOptions {
/** /**
* *
* @param this multiplied with connectionsPerHost, sets the maximum number of threads that * @param threads multiplied with connectionsPerHost, sets the maximum number of threads that
* may be waiting for a connection * may be waiting for a connection
*/ */
public synchronized void setThreadsAllowedToBlockForConnectionMultiplier(int threads) { public synchronized void setThreadsAllowedToBlockForConnectionMultiplier(int threads) {
@ -497,6 +610,14 @@ public class MongoOptions {
j = safe; j = safe;
} }
/**
*
* @param writeConcern sets the write concern
*/
public void setWriteConcern(final WriteConcern writeConcern) {
this.writeConcern = writeConcern;
}
/** /**
* *
* @return the socket factory for creating sockets to mongod * @return the socket factory for creating sockets to mongod
@ -512,4 +633,82 @@ public class MongoOptions {
public synchronized void setSocketFactory(SocketFactory factory) { public synchronized void setSocketFactory(SocketFactory factory) {
socketFactory = factory; socketFactory = factory;
} }
/**
*
* @return the read preference
*/
public ReadPreference getReadPreference() {
return readPreference;
}
/**
*
* @param readPreference the read preference
*/
public void setReadPreference(ReadPreference readPreference) {
this.readPreference = readPreference;
}
/**
*
* @return whether DBCursor finalizer is enabled
*/
public boolean isCursorFinalizerEnabled() {
return cursorFinalizerEnabled;
}
/**
*
* @param cursorFinalizerEnabled whether cursor finalizer is enabled
*/
public void setCursorFinalizerEnabled(final boolean cursorFinalizerEnabled) {
this.cursorFinalizerEnabled = cursorFinalizerEnabled;
}
/**
*
* @return true if the driver should always use MBeans, regardless of VM
*/
public boolean isAlwaysUseMBeans() {
return alwaysUseMBeans;
}
/**
*
* @param alwaysUseMBeans sets whether the driver should always use MBeans, regardless of VM
*/
public void setAlwaysUseMBeans(final boolean alwaysUseMBeans) {
this.alwaysUseMBeans = alwaysUseMBeans;
}
@Override
public String toString() {
return "MongoOptions{" +
"description='" + description + '\'' +
", connectionsPerHost=" + connectionsPerHost +
", threadsAllowedToBlockForConnectionMultiplier=" + threadsAllowedToBlockForConnectionMultiplier +
", maxWaitTime=" + maxWaitTime +
", connectTimeout=" + connectTimeout +
", socketTimeout=" + socketTimeout +
", socketKeepAlive=" + socketKeepAlive +
", autoConnectRetry=" + autoConnectRetry +
", maxAutoConnectRetryTime=" + maxAutoConnectRetryTime +
", slaveOk=" + slaveOk +
", readPreference=" + readPreference +
", dbDecoderFactory=" + dbDecoderFactory +
", dbEncoderFactory=" + dbEncoderFactory +
", safe=" + safe +
", w=" + w +
", wtimeout=" + wtimeout +
", fsync=" + fsync +
", j=" + j +
", socketFactory=" + socketFactory +
", cursorFinalizerEnabled=" + cursorFinalizerEnabled +
", writeConcern=" + writeConcern +
", alwaysUseMBeans=" + alwaysUseMBeans +
'}';
}
} }

View File

@ -17,154 +17,135 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import java.net.UnknownHostException; import java.net.UnknownHostException;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.logging.Logger;
/** /**
* Represents a <a href="http://www.mongodb.org/display/DOCS/Connections">URI</a> * Represents a <a href="http://www.mongodb.org/display/DOCS/Connections">URI</a>
* which can be used to create a Mongo instance. The URI describes the hosts to * which can be used to create a Mongo instance. The URI describes the hosts to
* be used and options. * be used and options.
* * <p>
* The Java driver supports the following options (case insensitive):<br /> * This class has been superseded by <{@code MongoClientURI}, and may be deprecated in a future release.
* * <p>The format of the URI is:
* <pre>
* mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]
* </pre>
* <ul> * <ul>
* <li>maxpoolsize</li> * <li>{@code mongodb://} is a required prefix to identify that this is a string in the standard connection format.</li>
* <li>waitqueuemultiple</li> * <li>{@code username:password@} are optional. If given, the driver will attempt to login to a database after
* <li>waitqueuetimeoutms</li> * connecting to a database server.</li>
* <li>connecttimeoutms</li> * <li>{@code host1} is the only required part of the URI. It identifies a server address to connect to.</li>
* <li>sockettimeoutms</li> * <li>{@code :portX} is optional and defaults to :27017 if not provided.</li>
* <li>autoconnectretry</li> * <li>{@code /database} is the name of the database to login to and thus is only relevant if the
* <li>slaveok</li> * {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.</li>
* <li>safe</li> * <li>{@code ?options} are connection options. Note that if {@code database} is absent there is still a {@code /}
* <li>w</li> * required between the last host and the {@code ?} introducing the options. Options are name=value pairs and the pairs
* <li>wtimeout</li> * are separated by "&amp;". For backwards compatibility, ";" is accepted as a separator in addition to "&amp;",
* <li>fsync</li> * but should be considered as deprecated.</li>
* </ul> * </ul>
* <p>
* The Java driver supports the following options (case insensitive):
* <p>
* Replica set configuration:
* </p>
* <ul>
* <li>{@code replicaSet=name}: Implies that the hosts given are a seed list, and the driver will attempt to find
* all members of the set.</li>
* </ul>
* <p>Connection Configuration:</p>
* <ul>
* <li>{@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.</li>
* <li>{@code socketTimeoutMS=ms}: How long a send or receive on a socket can take before timing out.</li>
* </ul>
* <p>Connection pool configuration:</p>
* <ul>
* <li>{@code maxPoolSize=n}: The maximum number of connections in the connection pool.</li>
* <li>{@code waitQueueMultiple=n} : this multiplier, multiplied with the maxPoolSize setting, gives the maximum number of
* threads that may be waiting for a connection to become available from the pool. All further threads will get an
* exception right away.</li>
* <li>{@code waitQueueTimeoutMS=ms}: The maximum wait time in milliseconds that a thread may wait for a connection to
* become available.</li>
* </ul>
* <p>Write concern configuration:</p>
* <ul>
* <li>{@code safe=true|false}
* <ul>
* <li>{@code true}: the driver sends a getLastError command after every update to ensure that the update succeeded
* (see also {@code w} and {@code wtimeoutMS}).</li>
* <li>{@code false}: the driver does not send a getLastError command after every update.</li>
* </ul>
* </li>
* <li>{@code w=wValue}
* <ul>
* <li>The driver adds { w : wValue } to the getLastError command. Implies {@code safe=true}.</li>
* <li>wValue is typically a number, but can be any string in order to allow for specifications like
* {@code "majority"}</li>
* </ul>
* </li>
* <li>{@code wtimeoutMS=ms}
* <ul>
* <li>The driver adds { wtimeout : ms } to the getlasterror command. Implies {@code safe=true}.</li>
* <li>Used in combination with {@code w}</li>
* </ul>
* </li>
* </ul>
* <p>Read preference configuration:</p>
* <ul>
* <li>{@code slaveOk=true|false}: Whether a driver connected to a replica set will send reads to slaves/secondaries.</li>
* <li>{@code readPreference=enum}: The read preference for this connection. If set, it overrides any slaveOk value.
* <ul>
* <li>Enumerated values:
* <ul>
* <li>{@code primary}</li>
* <li>{@code primaryPreferred}</li>
* <li>{@code secondary}</li>
* <li>{@code secondaryPreferred}</li>
* <li>{@code nearest}</li>
* </ul>
* </li>
* </ul>
* </li>
* <li>{@code readPreferenceTags=string}. A representation of a tag set as a comma-separated list of colon-separated
* key-value pairs, e.g. {@code "dc:ny,rack:1}". Spaces are stripped from beginning and end of all keys and values.
* To specify a list of tag sets, using multiple readPreferenceTags,
* e.g. {@code readPreferenceTags=dc:ny,rack:1;readPreferenceTags=dc:ny;readPreferenceTags=}
* <ul>
* <li>Note the empty value for the last one, which means match any secondary as a last resort.</li>
* <li>Order matters when using multiple readPreferenceTags.</li>
* </ul>
* </li>
* </ul>
* @see MongoClientURI
* @see MongoOptions for the default values for all options
*/ */
public class MongoURI { public class MongoURI {
/**
* The prefix for mongodb URIs.
*/
public static final String MONGODB_PREFIX = "mongodb://"; public static final String MONGODB_PREFIX = "mongodb://";
private final MongoClientURI mongoClientURI;
private final MongoOptions mongoOptions;
/** /**
* Creates a MongoURI described by a String. * Creates a MongoURI from a string.
* examples
* mongodb://127.0.0.1
* mongodb://fred:foobar@127.0.0.1/
* @param uri the URI * @param uri the URI
* @dochub connections * @dochub connections
*
* @deprecated Replaced by {@link MongoClientURI#MongoClientURI(String)}
*
*/ */
public MongoURI( String uri ){ @Deprecated
_uri = uri; public MongoURI( String uri ) {
if ( ! uri.startsWith( MONGODB_PREFIX ) ) this.mongoClientURI = new MongoClientURI(uri, new MongoClientOptions.Builder().legacyDefaults());
throw new IllegalArgumentException( "uri needs to start with " + MONGODB_PREFIX ); mongoOptions = new MongoOptions(mongoClientURI.getOptions());
uri = uri.substring(MONGODB_PREFIX.length());
String serverPart;
String nsPart;
String optionsPart;
{
int idx = uri.lastIndexOf( "/" );
if ( idx < 0 ){
serverPart = uri;
nsPart = null;
optionsPart = null;
}
else {
serverPart = uri.substring( 0 , idx );
nsPart = uri.substring( idx + 1 );
idx = nsPart.indexOf( "?" );
if ( idx >= 0 ){
optionsPart = nsPart.substring( idx + 1 );
nsPart = nsPart.substring( 0 , idx );
}
else {
optionsPart = null;
}
}
}
{ // _username,_password,_hosts
List<String> all = new LinkedList<String>();
int idx = serverPart.indexOf( "@" );
if ( idx > 0 ){
String authPart = serverPart.substring( 0 , idx );
serverPart = serverPart.substring( idx + 1 );
idx = authPart.indexOf( ":" );
_username = authPart.substring( 0, idx );
_password = authPart.substring( idx + 1 ).toCharArray();
}
else {
_username = null;
_password = null;
}
for ( String s : serverPart.split( "," ) )
all.add( s );
_hosts = Collections.unmodifiableList( all );
}
if ( nsPart != null ){ // _database,_collection
int idx = nsPart.indexOf( "." );
if ( idx < 0 ){
_database = nsPart;
_collection = null;
}
else {
_database = nsPart.substring( 0 , idx );
_collection = nsPart.substring( idx + 1 );
}
}
else {
_database = null;
_collection = null;
}
if ( optionsPart != null && optionsPart.length() > 0 ) parseOptions( optionsPart );
} }
@SuppressWarnings("deprecation") @Deprecated
private void parseOptions( String optionsPart ){ public MongoURI(final MongoClientURI mongoClientURI) {
for ( String _part : optionsPart.split( "&|;" ) ){ this.mongoClientURI = mongoClientURI;
int idx = _part.indexOf( "=" ); mongoOptions = new MongoOptions(mongoClientURI.getOptions());
if ( idx >= 0 ){
String key = _part.substring( 0, idx ).toLowerCase();
String value = _part.substring( idx + 1 );
if ( key.equals( "maxpoolsize" ) ) _options.connectionsPerHost = Integer.parseInt( value );
else if ( key.equals( "minpoolsize" ) )
LOGGER.warning( "Currently No support in Java driver for Min Pool Size." );
else if ( key.equals( "waitqueuemultiple" ) )
_options.threadsAllowedToBlockForConnectionMultiplier = Integer.parseInt( value );
else if ( key.equals( "waitqueuetimeoutms" ) ) _options.maxWaitTime = Integer.parseInt( value );
else if ( key.equals( "connecttimeoutms" ) ) _options.connectTimeout = Integer.parseInt( value );
else if ( key.equals( "sockettimeoutms" ) ) _options.socketTimeout = Integer.parseInt( value );
else if ( key.equals( "autoconnectretry" ) ) _options.autoConnectRetry = _parseBoolean( value );
else if ( key.equals( "slaveok" ) ) _options.slaveOk = _parseBoolean( value );
else if ( key.equals( "safe" ) ) _options.safe = _parseBoolean( value );
else if ( key.equals( "w" ) ) _options.w = Integer.parseInt( value );
else if ( key.equals( "wtimeout" ) ) _options.wtimeout = Integer.parseInt( value );
else if ( key.equals( "fsync" ) ) _options.fsync = _parseBoolean( value );
else LOGGER.warning( "Unknown or Unsupported Option '" + value + "'" );
}
}
}
boolean _parseBoolean( String _in ){
String in = _in.trim();
if ( in != null && in.length() > 0 && ( in.equals( "1" ) || in.toLowerCase().equals( "true" ) || in.toLowerCase()
.equals( "yes" ) ) )
return true;
else return false;
} }
// --------------------------------- // ---------------------------------
@ -174,7 +155,7 @@ public class MongoURI {
* @return * @return
*/ */
public String getUsername(){ public String getUsername(){
return _username; return mongoClientURI.getUsername();
} }
/** /**
@ -182,7 +163,7 @@ public class MongoURI {
* @return * @return
*/ */
public char[] getPassword(){ public char[] getPassword(){
return _password; return mongoClientURI.getPassword();
} }
/** /**
@ -190,7 +171,7 @@ public class MongoURI {
* @return * @return
*/ */
public List<String> getHosts(){ public List<String> getHosts(){
return _hosts; return mongoClientURI.getHosts();
} }
/** /**
@ -198,7 +179,7 @@ public class MongoURI {
* @return * @return
*/ */
public String getDatabase(){ public String getDatabase(){
return _database; return mongoClientURI.getDatabase();
} }
/** /**
@ -206,85 +187,84 @@ public class MongoURI {
* @return * @return
*/ */
public String getCollection(){ public String getCollection(){
return _collection; return mongoClientURI.getCollection();
} }
/** /**
* Gets the options * Gets the credentials
* @return *
* @since 2.11.0
*/
public MongoCredential getCredentials() {
return mongoClientURI.getCredentials();
}
/**
* Gets the options. This method will return the same instance of {@code MongoOptions} for every call, so it's
* possible to mutate the returned instance to change the defaults.
* @return the mongo options
*/ */
public MongoOptions getOptions(){ public MongoOptions getOptions(){
return _options; return mongoOptions;
} }
/** /**
* creates a Mongo instance based on the URI * creates a Mongo instance based on the URI
* @return * @return a new Mongo instance. There is no caching, so each call will create a new instance, each of which
* must be closed manually.
* @throws MongoException * @throws MongoException
* @throws UnknownHostException * @throws UnknownHostException
*/ */
@SuppressWarnings("deprecation")
public Mongo connect() public Mongo connect()
throws MongoException , UnknownHostException { throws UnknownHostException {
// TODO caching? // TODO caching?
return new Mongo( this ); // Note: we can't change this to new MongoClient(this) as that would silently change the default write concern.
return new Mongo(this);
} }
/** /**
* returns the DB object from a newly created Mongo instance based on this URI * returns the DB object from a newly created Mongo instance based on this URI
* @return * @return the database specified in the URI. This will implicitly create a new Mongo instance,
* which must be closed manually.
* @throws MongoException * @throws MongoException
* @throws UnknownHostException * @throws UnknownHostException
*/ */
public DB connectDB() public DB connectDB() throws UnknownHostException {
throws MongoException , UnknownHostException { return connect().getDB(getDatabase());
// TODO auth
return connect().getDB( _database );
} }
/** /**
* returns the URI's DB object from a given Mongo instance * returns the URI's DB object from a given Mongo instance
* @param m * @param mongo the Mongo instance to get the database from.
* @return * @return the database specified in this URI
*/ */
public DB connectDB( Mongo m ){ public DB connectDB( Mongo mongo ){
// TODO auth return mongo.getDB( getDatabase() );
return m.getDB( _database );
} }
/** /**
* returns the URI's Collection from a given DB object * returns the URI's Collection from a given DB object
* @param db * @param db the database to get the collection from
* @return * @return
*/ */
public DBCollection connectCollection( DB db ){ public DBCollection connectCollection( DB db ){
return db.getCollection( _collection ); return db.getCollection( getCollection() );
} }
/** /**
* returns the URI's Collection from a given Mongo instance * returns the URI's Collection from a given Mongo instance
* @param m * @param mongo the mongo instance to get the collection from
* @return * @return the collection specified in this URI
*/ */
public DBCollection connectCollection( Mongo m ){ public DBCollection connectCollection( Mongo mongo ){
return connectDB( m ).getCollection( _collection ); return connectDB( mongo ).getCollection( getCollection() );
} }
// --------------------------------- // ---------------------------------
final String _username;
final char[] _password;
final List<String> _hosts;
final String _database;
final String _collection;
final MongoOptions _options = new MongoOptions();
final String _uri;
static final Logger LOGGER = Logger.getLogger( "com.mongodb.MongoURI" );
@Override @Override
public String toString() { public String toString() {
return _uri; return mongoClientURI.toString();
} }
} }

View File

@ -0,0 +1,134 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* A connection to a set of mongos servers.
*/
class MongosStatus extends ConnectionStatus {
private static final Logger logger = Logger.getLogger("com.mongodb.MongosStatus");
MongosStatus(Mongo mongo, List<ServerAddress> mongosAddresses) {
super(mongosAddresses, mongo);
_updater = new MongosUpdater();
}
@Override
boolean hasServerUp() {
return preferred != null;
}
@Override
Node ensureMaster() {
checkClosed();
return getPreferred();
}
@Override
List<ServerAddress> getServerAddressList() {
return new ArrayList<ServerAddress>(_mongosAddresses);
}
class MongosUpdater extends BackgroundUpdater {
MongosUpdater() {
super("MongosStatus:MongosUpdater");
}
@Override
public void run() {
List<MongosNode> mongosNodes = getMongosNodes();
try {
while (!Thread.interrupted()) {
try {
MongosNode bestThisPass = null;
for (MongosNode cur : mongosNodes) {
cur.update();
if (cur._ok) {
if (bestThisPass == null || (cur._pingTimeMS < bestThisPass._pingTimeMS)) {
bestThisPass = cur;
}
}
}
setPreferred(bestThisPass);
} catch (Exception e) {
logger.log(Level.WARNING, "couldn't do update pass", e);
}
int sleepTime = preferred == null ? updaterIntervalNoMasterMS : updaterIntervalMS;
Thread.sleep(sleepTime);
}
} catch (InterruptedException e) {
logger.log(Level.INFO, "Exiting background thread");
// Allow thread to exit
}
}
private List<MongosNode> getMongosNodes() {
List<MongosNode> mongosNodes = new ArrayList<MongosNode>(_mongosAddresses.size());
for (ServerAddress serverAddress : _mongosAddresses) {
mongosNodes.add(new MongosNode(serverAddress, _mongo, _mongoOptions));
}
return mongosNodes;
}
}
static class MongosNode extends UpdatableNode {
MongosNode(final ServerAddress addr, Mongo mongo, MongoOptions mongoOptions) {
super(addr, mongo, mongoOptions);
}
@Override
protected Logger getLogger() {
return logger;
}
}
// Sends a notification every time preferred is set.
private synchronized void setPreferred(final MongosNode bestThisPass) {
if (bestThisPass == null) {
preferred = null;
} else {
preferred = new Node(bestThisPass._pingTimeMS, bestThisPass._addr, bestThisPass._maxBsonObjectSize, bestThisPass._ok);
}
notifyAll();
}
// Gets the current preferred node. If there is no preferred node, wait to get a notification before returning null.
private synchronized Node getPreferred() {
if (preferred == null) {
try {
synchronized (this) {
wait(_mongo.getMongoOptions().getConnectTimeout());
}
} catch (InterruptedException e) {
throw new MongoInterruptedException("Interrupted while waiting for next update to mongos status", e);
}
}
return preferred;
}
// The current preferred mongos Node to use as the master. This is not necessarily the node that is currently in use.
// Rather, it's the node that is preferred if there is a problem with the currently in use node.
private volatile Node preferred;
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.mongodb.util.Util;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
class NativeAuthenticationHelper {
static DBObject getAuthCommand(String userName, char[] password, String nonce) {
return getAuthCommand(userName, createHash(userName, password), nonce);
}
static DBObject getAuthCommand(String userName, byte[] authHash, String nonce) {
String key = nonce + userName + new String(authHash);
BasicDBObject cmd = new BasicDBObject();
cmd.put("authenticate", 1);
cmd.put("user", userName);
cmd.put("nonce", nonce);
cmd.put("key", Util.hexMD5(key.getBytes()));
return cmd;
}
static BasicDBObject getNonceCommand() {
return new BasicDBObject("getnonce", 1);
}
static byte[] createHash(String userName, char[] password) {
ByteArrayOutputStream bout = new ByteArrayOutputStream(userName.length() + 20 + password.length);
try {
bout.write(userName.getBytes());
bout.write(":mongo:".getBytes());
for (final char ch : password) {
if (ch >= 128)
throw new IllegalArgumentException("can't handle non-ascii passwords yet");
bout.write((byte) ch);
}
} catch (IOException ioe) {
throw new RuntimeException("impossible", ioe);
}
return Util.hexMD5(bout.toByteArray()).getBytes();
}
private NativeAuthenticationHelper() {
}
}

View File

@ -1,5 +1,3 @@
// OutMessage.java
/** /**
* Copyright (C) 2008 10gen Inc. * Copyright (C) 2008 10gen Inc.
* *
@ -18,115 +16,243 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import java.io.IOException;
import java.io.OutputStream;
import java.util.concurrent.atomic.AtomicInteger;
import com.massivecraft.mcore.xlib.bson.BSONObject; import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.BasicBSONEncoder; import com.massivecraft.mcore.xlib.bson.BasicBSONEncoder;
import com.massivecraft.mcore.xlib.bson.io.PoolOutputBuffer; import com.massivecraft.mcore.xlib.bson.io.PoolOutputBuffer;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Collection;
import java.util.concurrent.atomic.AtomicInteger;
class OutMessage extends BasicBSONEncoder { class OutMessage extends BasicBSONEncoder {
static AtomicInteger ID = new AtomicInteger(1); enum OpCode {
OP_UPDATE(2001),
OP_INSERT(2002),
OP_QUERY(2004),
OP_GETMORE(2005),
OP_DELETE(2006),
OP_KILL_CURSORS(2007);
static OutMessage query( Mongo m , int options , String ns , int numToSkip , int batchSize , DBObject query , DBObject fields ){ OpCode(int value) {
return query( m, options, ns, numToSkip, batchSize, query, fields, ReadPreference.PRIMARY ); this.value = value;
}
private final int value;
public int getValue() {
return value;
}
} }
static OutMessage query( Mongo m , int options , String ns , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref ){ static AtomicInteger REQUEST_ID = new AtomicInteger(1);
return query( m, options, ns, numToSkip, batchSize, query, fields, readPref, DefaultDBEncoder.FACTORY.create());
public static OutMessage insert(final DBCollection collection, final DBEncoder encoder, WriteConcern concern) {
OutMessage om = new OutMessage(collection, OpCode.OP_INSERT, encoder);
om.writeInsertPrologue(concern);
return om;
} }
static OutMessage query( Mongo m , int options , String ns , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref, DBEncoder enc ){ public static OutMessage update(final DBCollection collection, final DBEncoder encoder,
OutMessage out = new OutMessage( m , 2004, enc ); final boolean upsert, final boolean multi, final DBObject query, final DBObject o) {
OutMessage om = new OutMessage(collection, OpCode.OP_UPDATE, encoder, query);
om.writeUpdate(upsert, multi, query, o);
out._appendQuery( options , ns , numToSkip , batchSize , query , fields, readPref); return om;
return out;
} }
OutMessage( Mongo m ){ public static OutMessage remove(final DBCollection collection, final DBEncoder encoder, final DBObject query) {
this( m , DefaultDBEncoder.FACTORY.create() ); OutMessage om = new OutMessage(collection, OpCode.OP_DELETE, encoder, query);
om.writeRemove();
return om;
} }
OutMessage( Mongo m , int op ){ static OutMessage query( DBCollection collection , int options , int numToSkip , int batchSize , DBObject query , DBObject fields ){
this( m ); return query( collection, options, numToSkip, batchSize, query, fields, ReadPreference.primary() );
reset( op );
} }
OutMessage( Mongo m , DBEncoder encoder ) { static OutMessage query( DBCollection collection , int options , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref ){
_encoder = encoder; return query( collection, options, numToSkip, batchSize, query, fields, readPref, DefaultDBEncoder.FACTORY.create());
}
static OutMessage query( DBCollection collection , int options , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref, DBEncoder enc ){
OutMessage om = new OutMessage(collection, enc, query, options, readPref);
om.writeQuery(fields, numToSkip, batchSize);
return om;
}
static OutMessage getMore(DBCollection collection, long cursorId, int batchSize) {
OutMessage om = new OutMessage(collection, OpCode.OP_GETMORE);
om.writeGetMore(cursorId, batchSize);
return om;
}
static OutMessage killCursors(Mongo mongo, int numCursors) {
OutMessage om = new OutMessage(mongo , OpCode.OP_KILL_CURSORS);
om.writeKillCursorsPrologue(numCursors);
return om;
}
private OutMessage( Mongo m , OpCode opCode ){
this(null, m, opCode, null);
}
private OutMessage(final DBCollection collection, final OpCode opCode) {
this(collection, opCode, null);
}
private OutMessage(final DBCollection collection, final OpCode opCode, final DBEncoder enc) {
this(collection, collection.getDB().getMongo(), opCode, enc);
}
private OutMessage(final DBCollection collection, final Mongo m, final OpCode opCode, final DBEncoder enc) {
this(collection, m, opCode, enc, null, -1, null);
}
private OutMessage(final DBCollection collection, final OpCode opCode, final DBEncoder enc, final DBObject query) {
this(collection, collection.getDB().getMongo(), opCode, enc, query, 0, null);
}
private OutMessage(final DBCollection collection, final DBEncoder enc, final DBObject query, final int options, final ReadPreference readPref) {
this(collection, collection.getDB().getMongo(), OpCode.OP_QUERY, enc, query, options, readPref);
}
private OutMessage(final DBCollection collection, final Mongo m, OpCode opCode, final DBEncoder enc, final DBObject query, final int options, final ReadPreference readPref) {
_collection = collection;
_mongo = m; _mongo = m;
_buffer = _mongo == null ? new PoolOutputBuffer() : _mongo._bufferPool.get(); _encoder = enc;
_buffer = _mongo._bufferPool.get();
_buffer.reset(); _buffer.reset();
set(_buffer);
set( _buffer ); _id = REQUEST_ID.getAndIncrement();
_opCode = opCode;
writeMessagePrologue(opCode);
if (query == null) {
_query = null;
_queryOptions = 0;
} else {
_query = query;
int allOptions = options;
if (readPref != null && readPref.isSlaveOk()) {
allOptions |= Bytes.QUERYOPTION_SLAVEOK;
}
_queryOptions = allOptions;
}
} }
OutMessage( Mongo m , int op , DBEncoder enc ) { private void writeInsertPrologue(final WriteConcern concern) {
this( m , enc ); int flags = 0;
reset( op ); if (concern.getContinueOnErrorForInsert()) {
} flags |= 1;
private void _appendQuery( int options , String ns , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref){ }
_queryOptions = options; writeInt(flags);
_readPref = readPref; writeCString(_collection.getFullName());
//If the readPrefs are non-null and non-primary, set slaveOk query option
if (_readPref != null && !(_readPref instanceof ReadPreference.PrimaryReadPreference))
_queryOptions |= Bytes.QUERYOPTION_SLAVEOK;
writeInt( _queryOptions );
writeCString( ns );
writeInt( numToSkip );
writeInt( batchSize );
putObject( query );
if ( fields != null )
putObject( fields );
} }
private void reset( int op ){ private void writeUpdate(final boolean upsert, final boolean multi, final DBObject query, final DBObject o) {
done(); writeInt(0); // reserved
_buffer.reset(); writeCString(_collection.getFullName());
set( _buffer );
_id = ID.getAndIncrement(); int flags = 0;
if ( upsert ) flags |= 1;
if ( multi ) flags |= 2;
writeInt(flags);
putObject(query);
putObject(o);
}
private void writeRemove() {
writeInt(0); // reserved
writeCString(_collection.getFullName());
Collection<String> keys = _query.keySet();
if ( keys.size() == 1 && keys.iterator().next().equals( "_id" ) && _query.get( keys.iterator().next() ) instanceof ObjectId)
writeInt( 1 );
else
writeInt( 0 );
putObject(_query);
}
private void writeGetMore(final long cursorId, final int batchSize) {
writeInt(0);
writeCString(_collection.getFullName());
writeInt(batchSize);
writeLong(cursorId);
}
private void writeKillCursorsPrologue(final int numCursors) {
writeInt(0); // reserved
writeInt(numCursors);
}
private void writeQuery(final DBObject fields, final int numToSkip, final int batchSize) {
writeInt(_queryOptions);
writeCString(_collection.getFullName());
writeInt(numToSkip);
writeInt(batchSize);
putObject(_query);
if (fields != null)
putObject(fields);
}
private void writeMessagePrologue(final OpCode opCode) {
writeInt( 0 ); // length: will set this later writeInt( 0 ); // length: will set this later
writeInt( _id ); writeInt( _id );
writeInt( 0 ); // response to writeInt( 0 ); // response to
writeInt( op ); writeInt( opCode.getValue() );
} }
void prepare(){ void prepare(){
if (_buffer == null) {
throw new IllegalStateException("Already closed");
}
_buffer.writeInt( 0 , _buffer.size() ); _buffer.writeInt( 0 , _buffer.size() );
} }
void pipe( OutputStream out ) throws IOException {
if (_buffer == null) {
throw new IllegalStateException("Already closed");
}
void pipe( OutputStream out )
throws IOException {
_buffer.pipe( out ); _buffer.pipe( out );
} }
int size(){ int size() {
if (_buffer == null) {
throw new IllegalStateException("Already closed");
}
return _buffer.size(); return _buffer.size();
} }
byte[] toByteArray(){ void doneWithMessage() {
return _buffer.toByteArray(); if (_buffer == null) {
} throw new IllegalStateException("Only call this once per instance");
void doneWithMessage(){
if ( _buffer != null && _mongo != null ) {
_buffer.reset();
_mongo._bufferPool.done( _buffer );
} }
_buffer.reset();
_mongo._bufferPool.done(_buffer);
_buffer = null; _buffer = null;
_mongo = null; done();
} }
boolean hasOption( int option ){ boolean hasOption( int option ){
@ -137,30 +263,44 @@ class OutMessage extends BasicBSONEncoder {
return _id; return _id;
} }
OpCode getOpCode() {
return _opCode;
}
DBObject getQuery() {
return _query;
}
String getNamespace() {
return _collection != null ? _collection.getFullName() : null;
}
int getNumDocuments() {
return _numDocuments;
}
@Override @Override
public int putObject(BSONObject o) { public int putObject(BSONObject o) {
// check max size if (_buffer == null) {
int sz = _encoder.writeObject(_buf, o); throw new IllegalStateException("Already closed");
if (_mongo != null) {
int maxsize = _mongo.getConnector().getMaxBsonObjectSize();
maxsize = Math.max(maxsize, Bytes.MAX_OBJECT_SIZE);
if (sz > maxsize) {
throw new MongoInternalException("DBObject of size " + sz + " is over Max BSON size " + _mongo.getMaxBsonObjectSize());
}
} }
return sz;
// check max size
int objectSize = _encoder.writeObject(_buf, o);
if (objectSize > Math.max(_mongo.getConnector().getMaxBsonObjectSize(), Bytes.MAX_OBJECT_SIZE)) {
throw new MongoInternalException("DBObject of size " + objectSize + " is over Max BSON size " + _mongo.getMaxBsonObjectSize());
}
_numDocuments++;
return objectSize;
} }
private final Mongo _mongo;
public ReadPreference getReadPreference(){ private final DBCollection _collection;
return _readPref;
}
private Mongo _mongo;
private PoolOutputBuffer _buffer; private PoolOutputBuffer _buffer;
private int _id; private final int _id;
private int _queryOptions = 0; private final OpCode _opCode;
private ReadPreference _readPref = ReadPreference.PRIMARY; private final int _queryOptions;
private DBEncoder _encoder; private final DBObject _query;
private final DBEncoder _encoder;
private volatile int _numDocuments; // only one thread will modify this field, so volatile is sufficient synchronization
} }

View File

@ -23,6 +23,7 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.regex.Pattern; import java.util.regex.Pattern;
@ -31,6 +32,7 @@ import java.util.regex.Pattern;
* @author Julson Lim * @author Julson Lim
* *
*/ */
@SuppressWarnings({"unchecked", "rawtypes"})
public class QueryBuilder { public class QueryBuilder {
/** /**
@ -61,7 +63,7 @@ public class QueryBuilder {
* Adds a new key to the query if not present yet. * Adds a new key to the query if not present yet.
* Sets this key as the current key. * Sets this key as the current key.
* @param key MongoDB document key * @param key MongoDB document key
* @return Returns the current QueryBuilder * @return this
*/ */
public QueryBuilder put(String key) { public QueryBuilder put(String key) {
_currentKey = key; _currentKey = key;
@ -72,10 +74,10 @@ public class QueryBuilder {
} }
/** /**
* Equivalent to <code>QueryBuilder.put(key)</code>. Intended for compound query chains to be more readable * Equivalent to <code>QueryBuilder.put(key)</code>. Intended for compound query chains to be more readable, e.g.
* Example: QueryBuilder.start("a").greaterThan(1).and("b").lessThan(3) * {@code QueryBuilder.start("a").greaterThan(1).and("b").lessThan(3) }
* @param key MongoDB document key * @param key MongoDB document key
* @return Returns the current QueryBuilder with an appended key operand * @return this
*/ */
public QueryBuilder and(String key) { public QueryBuilder and(String key) {
return put(key); return put(key);
@ -211,6 +213,18 @@ public class QueryBuilder {
return this; return this;
} }
/**
* Equivalent to the $elemMatch operand
* @param match the object to match
* @return Returns the current QueryBuilder with an appended elemMatch operator
*/
public QueryBuilder elemMatch(final DBObject match) {
addOperand(QueryOperators.ELEM_MATCH, match);
return this;
}
/** /**
* Equivalent of the $within operand, used for geospatial operation * Equivalent of the $within operand, used for geospatial operation
* @param x x coordinate * @param x x coordinate
@ -219,8 +233,8 @@ public class QueryBuilder {
* @return * @return
*/ */
public QueryBuilder withinCenter( double x , double y , double radius ){ public QueryBuilder withinCenter( double x , double y , double radius ){
addOperand( "$within" , addOperand( QueryOperators.WITHIN ,
new BasicDBObject( "$center" , new Object[]{ new Double[]{ x , y } , radius } ) ); new BasicDBObject(QueryOperators.CENTER, new Object[]{ new Double[]{ x , y } , radius } ) );
return this; return this;
} }
@ -231,7 +245,7 @@ public class QueryBuilder {
* @return * @return
*/ */
public QueryBuilder near( double x , double y ){ public QueryBuilder near( double x , double y ){
addOperand( "$near" , addOperand(QueryOperators.NEAR,
new Double[]{ x , y } ); new Double[]{ x , y } );
return this; return this;
} }
@ -244,7 +258,7 @@ public class QueryBuilder {
* @return * @return
*/ */
public QueryBuilder near( double x , double y , double maxDistance ){ public QueryBuilder near( double x , double y , double maxDistance ){
addOperand( "$near" , addOperand( QueryOperators.NEAR ,
new Double[]{ x , y , maxDistance } ); new Double[]{ x , y , maxDistance } );
return this; return this;
} }
@ -256,7 +270,7 @@ public class QueryBuilder {
* @return * @return
*/ */
public QueryBuilder nearSphere( double longitude , double latitude ){ public QueryBuilder nearSphere( double longitude , double latitude ){
addOperand( "$nearSphere" , addOperand(QueryOperators.NEAR_SPHERE,
new Double[]{ longitude , latitude } ); new Double[]{ longitude , latitude } );
return this; return this;
} }
@ -269,7 +283,7 @@ public class QueryBuilder {
* @return * @return
*/ */
public QueryBuilder nearSphere( double longitude , double latitude , double maxDistance ){ public QueryBuilder nearSphere( double longitude , double latitude , double maxDistance ){
addOperand( "$nearSphere" , addOperand( QueryOperators.NEAR_SPHERE ,
new Double[]{ longitude , latitude , maxDistance } ); new Double[]{ longitude , latitude , maxDistance } );
return this; return this;
} }
@ -283,8 +297,8 @@ public class QueryBuilder {
* @return * @return
*/ */
public QueryBuilder withinCenterSphere( double longitude , double latitude , double maxDistance ){ public QueryBuilder withinCenterSphere( double longitude , double latitude , double maxDistance ){
addOperand( "$within" , addOperand( QueryOperators.WITHIN ,
new BasicDBObject( "$centerSphere" , new Object[]{ new Double[]{longitude , latitude} , maxDistance } ) ); new BasicDBObject(QueryOperators.CENTER_SPHERE, new Object[]{ new Double[]{longitude , latitude} , maxDistance } ) );
return this; return this;
} }
@ -298,8 +312,8 @@ public class QueryBuilder {
* @return * @return
*/ */
public QueryBuilder withinBox(double x, double y, double x2, double y2) { public QueryBuilder withinBox(double x, double y, double x2, double y2) {
addOperand( "$within" , addOperand( QueryOperators.WITHIN ,
new BasicDBObject( "$box" , new Object[] { new Double[] { x, y }, new Double[] { x2, y2 } } ) ); new BasicDBObject(QueryOperators.BOX, new Object[] { new Double[] { x, y }, new Double[] { x2, y2 } } ) );
return this; return this;
} }
@ -307,47 +321,54 @@ public class QueryBuilder {
* Equivalent to a $within operand, based on a bounding polygon represented by an array of points * Equivalent to a $within operand, based on a bounding polygon represented by an array of points
* *
* @param points an array of Double[] defining the vertices of the search area * @param points an array of Double[] defining the vertices of the search area
* @return * @return this
*/ */
public QueryBuilder withinPolygon(List<Double[]> points) { public QueryBuilder withinPolygon(List<Double[]> points) {
if(points == null || points.isEmpty() || points.size() < 3) if(points == null || points.isEmpty() || points.size() < 3)
throw new IllegalArgumentException("Polygon insufficient number of vertices defined"); throw new IllegalArgumentException("Polygon insufficient number of vertices defined");
addOperand( "$within" , addOperand( QueryOperators.WITHIN ,
new BasicDBObject( "$polygon" , points ) ); new BasicDBObject(QueryOperators.POLYGON, points ) );
return this; return this;
} }
/** /**
* Equivalent to a $or operand * Equivalent to $not meta operator. Must be followed by an operand, not a value, e.g.
* @param ors * {@code QueryBuilder.start("val").not().mod(Arrays.asList(10, 1)) }
* @return *
* @return Returns the current QueryBuilder with an appended "not" meta operator
*/
public QueryBuilder not() {
_hasNot = true;
return this;
}
/**
* Equivalent to an $or operand
* @param ors the list of conditions to or together
* @return Returns the current QueryBuilder with appended "or" operator
*/ */
@SuppressWarnings({ "unchecked", "rawtypes" })
public QueryBuilder or( DBObject ... ors ){ public QueryBuilder or( DBObject ... ors ){
List l = (List)_query.get( "$or" ); List l = (List)_query.get( QueryOperators.OR );
if ( l == null ){ if ( l == null ){
l = new ArrayList(); l = new ArrayList();
_query.put( "$or" , l ); _query.put( QueryOperators.OR , l );
} }
for ( DBObject o : ors ) Collections.addAll(l, ors);
l.add( o );
return this; return this;
} }
/** /**
* Equivalent to an $and operand * Equivalent to an $and operand
* @param ands * @param ands the list of conditions to and together
* @return * @return Returns the current QueryBuilder with appended "and" operator
*/ */
@SuppressWarnings({ "unchecked", "rawtypes" })
public QueryBuilder and( DBObject ... ands ){ public QueryBuilder and( DBObject ... ands ){
List l = (List)_query.get( "$and" ); List l = (List)_query.get( QueryOperators.AND );
if ( l == null ){ if ( l == null ){
l = new ArrayList(); l = new ArrayList();
_query.put( "$and" , l ); _query.put( QueryOperators.AND , l );
} }
for ( DBObject o : ands ) Collections.addAll(l, ands);
l.add( o );
return this; return this;
} }
@ -367,6 +388,10 @@ public class QueryBuilder {
private void addOperand(String op, Object value) { private void addOperand(String op, Object value) {
if(op == null) { if(op == null) {
if (_hasNot) {
value = new BasicDBObject(QueryOperators.NOT, value);
_hasNot = false;
}
_query.put(_currentKey, value); _query.put(_currentKey, value);
return; return;
} }
@ -375,13 +400,21 @@ public class QueryBuilder {
BasicDBObject operand; BasicDBObject operand;
if(!(storedValue instanceof DBObject)) { if(!(storedValue instanceof DBObject)) {
operand = new BasicDBObject(); operand = new BasicDBObject();
_query.put(_currentKey, operand); if (_hasNot) {
DBObject notOperand = new BasicDBObject(QueryOperators.NOT, operand);
_query.put(_currentKey, notOperand);
_hasNot = false;
} else {
_query.put(_currentKey, operand);
}
} else { } else {
operand = (BasicDBObject)_query.get(_currentKey); operand = (BasicDBObject)_query.get(_currentKey);
if (operand.get(QueryOperators.NOT) != null) {
operand = (BasicDBObject) operand.get(QueryOperators.NOT);
}
} }
operand.put(op, value); operand.put(op, value);
} }
@SuppressWarnings("serial") @SuppressWarnings("serial")
static class QueryBuilderException extends RuntimeException { static class QueryBuilderException extends RuntimeException {
QueryBuilderException(String message) { QueryBuilderException(String message) {
@ -392,5 +425,6 @@ public class QueryBuilder {
private DBObject _query; private DBObject _query;
private String _currentKey; private String _currentKey;
private boolean _hasNot;
} }

View File

@ -0,0 +1,192 @@
package com.massivecraft.mcore.xlib.mongodb;
/**
* Utility for constructing Query operation command with query, orderby, hint, explain, snapshot.
*/
class QueryOpBuilder {
static final String READ_PREFERENCE_META_OPERATOR = "$readPreference";
private DBObject query;
private DBObject orderBy;
private DBObject hintObj;
private String hintStr;
private boolean explain;
private boolean snapshot;
private DBObject readPref;
private DBObject specialFields;
public QueryOpBuilder(){
}
/**
* Adds the query clause to the operation
* @param query
* @return
*/
public QueryOpBuilder addQuery(DBObject query){
this.query = query;
return this;
}
/**
* Adds the orderby clause to the operation
* @param orderBy
* @return
*/
public QueryOpBuilder addOrderBy(DBObject orderBy){
this.orderBy = orderBy;
return this;
}
/**
* Adds the hint clause to the operation
* @param hint
* @return
*/
public QueryOpBuilder addHint(String hint){
this.hintStr = hint;
return this;
}
/**
* Adds hint clause to the operation
* @param hint
* @return
*/
public QueryOpBuilder addHint(DBObject hint){
this.hintObj = hint;
return this;
}
/**
* Adds special fields to the operation
* @param specialFields
* @return
*/
public QueryOpBuilder addSpecialFields(DBObject specialFields){
this.specialFields = specialFields;
return this;
}
/**
* Adds the explain flag to the operation
* @param explain
* @return
*/
public QueryOpBuilder addExplain(boolean explain){
this.explain = explain;
return this;
}
/**
* Adds the snapshot flag to the operation
* @param snapshot
* @return
*/
public QueryOpBuilder addSnapshot(boolean snapshot){
this.snapshot = snapshot;
return this;
}
/**
* Adds ReadPreference to the operation
* @param readPref
* @return
*/
public QueryOpBuilder addReadPreference(DBObject readPref){
this.readPref = readPref;
return this;
}
/**
* Constructs the query operation DBObject
* @return DBObject representing the query command to be sent to server
*/
public DBObject get() {
DBObject lclQuery = query;
//must always have a query
if (lclQuery == null) {
lclQuery = new BasicDBObject();
}
if (hasSpecialQueryFields()) {
DBObject queryop = (specialFields == null ? new BasicDBObject() : specialFields);
addToQueryObject(queryop, "$query", lclQuery, true);
addToQueryObject(queryop, "$orderby", orderBy, false);
if (hintStr != null)
addToQueryObject(queryop, "$hint", hintStr);
if (hintObj != null)
addToQueryObject(queryop, "$hint", hintObj);
if (explain)
queryop.put("$explain", true);
if (snapshot)
queryop.put("$snapshot", true);
if (readPref != null)
queryop.put(READ_PREFERENCE_META_OPERATOR, readPref);
return queryop;
}
return lclQuery;
}
private boolean hasSpecialQueryFields(){
if ( readPref != null )
return true;
if ( specialFields != null )
return true;
if ( orderBy != null && orderBy.keySet().size() > 0 )
return true;
if ( hintStr != null || hintObj != null || snapshot || explain)
return true;
return false;
}
/**
* Adds DBObject to the operation
* @param dbobj DBObject to add field to
* @param field name of the field
* @param obj object to add to the operation. Ignore if <code>null</code>.
* @param sendEmpty if <code>true</code> adds obj even if it's empty. Ignore if <code>false</code> and obj is empty.
* @return
*/
private void addToQueryObject(DBObject dbobj, String field, DBObject obj, boolean sendEmpty) {
if (obj == null)
return;
if (!sendEmpty && obj.keySet().size() == 0)
return;
addToQueryObject(dbobj, field, obj);
}
/**
* Adds an Object to the operation
* @param dbobj DBObject to add field to
* @param field name of the field
* @param obj Object to be added. Ignore if <code>null</code>
* @return
*/
private void addToQueryObject(DBObject dbobj, String field, Object obj) {
if (obj == null)
return;
dbobj.put(field, obj);
}
}

View File

@ -19,21 +19,55 @@ package com.massivecraft.mcore.xlib.mongodb;
/** /**
* MongoDB keywords for various query operations * MongoDB keywords for various query operations
* @author Julson Lim
* *
* @author Julson Lim
*/ */
public class QueryOperators { public class QueryOperators {
public static final String GT = "$gt"; public static final String OR = "$or";
public static final String GTE = "$gte"; public static final String AND = "$and";
public static final String LT = "$lt";
public static final String LTE = "$lte"; public static final String GT = "$gt";
public static final String NE = "$ne"; public static final String GTE = "$gte";
public static final String IN = "$in"; public static final String LT = "$lt";
public static final String NIN = "$nin"; public static final String LTE = "$lte";
public static final String MOD = "$mod";
public static final String ALL = "$all"; public static final String NE = "$ne";
public static final String SIZE = "$size"; public static final String IN = "$in";
public static final String EXISTS = "$exists"; public static final String NIN = "$nin";
public static final String WHERE = "$where"; public static final String MOD = "$mod";
public static final String NEAR = "$near"; public static final String ALL = "$all";
public static final String SIZE = "$size";
public static final String EXISTS = "$exists";
public static final String ELEM_MATCH = "$elemMatch";
// (to be implemented in QueryBuilder)
public static final String WHERE = "$where";
public static final String NOR = "$nor";
public static final String TYPE = "$type";
public static final String NOT = "$not";
// geo operators
public static final String WITHIN = "$within";
public static final String NEAR = "$near";
public static final String NEAR_SPHERE = "$nearSphere";
public static final String BOX = "$box";
public static final String CENTER = "$center";
public static final String POLYGON = "$polygon";
public static final String CENTER_SPHERE = "$centerSphere";
// (to be implemented in QueryBuilder)
public static final String MAX_DISTANCE = "$maxDistance";
public static final String UNIQUE_DOCS = "$uniqueDocs";
// meta query operators (to be implemented in QueryBuilder)
public static final String RETURN_KEY = "$returnKey";
public static final String MAX_SCAN = "$maxScan";
public static final String ORDER_BY = "$orderby";
public static final String EXPLAIN = "$explain";
public static final String SNAPSHOT = "$snapshot";
public static final String MIN = "$min";
public static final String MAX = "$max";
public static final String SHOW_DISK_LOC = "$showDiskLoc";
public static final String HINT = "$hint";
public static final String COMMENT = "$comment";
} }

View File

@ -55,6 +55,7 @@ import com.massivecraft.mcore.xlib.bson.types.ObjectId;
/** /**
* This object wraps the binary object format ("BSON") used for the transport of serialized objects to / from the Mongo database. * This object wraps the binary object format ("BSON") used for the transport of serialized objects to / from the Mongo database.
*/ */
@SuppressWarnings({"unchecked", "rawtypes"})
public class RawDBObject implements DBObject { public class RawDBObject implements DBObject {
RawDBObject( ByteBuffer buf ){ RawDBObject( ByteBuffer buf ){
@ -75,7 +76,6 @@ public class RawDBObject implements DBObject {
return e.getObject(); return e.getObject();
} }
@SuppressWarnings({ "unchecked", "rawtypes" })
public Map toMap() { public Map toMap() {
Map m = new HashMap(); Map m = new HashMap();
Iterator i = this.keySet().iterator(); Iterator i = this.keySet().iterator();
@ -94,8 +94,7 @@ public class RawDBObject implements DBObject {
throw new RuntimeException( "read only" ); throw new RuntimeException( "read only" );
} }
@SuppressWarnings("rawtypes") public void putAll( Map m ){
public void putAll( Map m ){
throw new RuntimeException( "read only" ); throw new RuntimeException( "read only" );
} }

View File

@ -13,58 +13,331 @@
package com.massivecraft.mcore.xlib.mongodb; package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.mongodb.ReplicaSetStatus.ReplicaSetNode;
import java.util.ArrayList;
import java.util.List;
import java.util.Map; import java.util.Map;
public class ReadPreference {
public static class PrimaryReadPreference extends ReadPreference { /**
private PrimaryReadPreference() {} * An abstract class that represents preferred replica set members to which a query or command can be sent.
*
* @mongodb.driver.manual applications/replication/#replica-set-read-preference Read Preference
*/
public abstract class ReadPreference {
ReadPreference() {
}
/**
* @return <code>true</code> if this preference allows reads or commands from secondary nodes
*/
public abstract boolean isSlaveOk();
/**
* @return <code>DBObject</code> representation of this preference
*/
public abstract DBObject toDBObject();
/**
* The name of this read preference.
*
* @return the name
*/
public abstract String getName();
abstract ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set);
/**
* Preference to read from primary only.
* Cannot be combined with tags.
*
* @author breinero
*/
private static class PrimaryReadPreference extends ReadPreference {
private PrimaryReadPreference() {
}
@Override @Override
public String toString(){ public boolean isSlaveOk() {
return "ReadPreference.PRIMARY" ; return false;
} }
}
@Override
public static class SecondaryReadPreference extends ReadPreference { public String toString() {
private SecondaryReadPreference() {} return getName();
@Override }
public String toString(){
return "ReadPreference.SECONDARY"; @Override
public boolean equals(final Object o) {
return o != null && getClass() == o.getClass();
}
@Override
public int hashCode() {
return getName().hashCode();
}
@Override
ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
return set.getMaster();
}
@Override
public DBObject toDBObject() {
return new BasicDBObject("mode", getName());
}
@Override
public String getName() {
return "primary";
} }
} }
/**
* Read from a secondary if available and matches tags.
*
* @deprecated As of release 2.9, replaced by
* <code>ReadPreference.secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets)</code>
*/
@Deprecated
public static class TaggedReadPreference extends ReadPreference { public static class TaggedReadPreference extends ReadPreference {
public TaggedReadPreference( DBObject tags ) {
public TaggedReadPreference(Map<String, String> tags) {
if (tags == null || tags.size() == 0) {
throw new IllegalArgumentException("tags can not be null or empty");
}
_tags = new BasicDBObject(tags);
List<DBObject> maps = splitMapIntoMultipleMaps(_tags);
_pref = new TaggableReadPreference.SecondaryReadPreference(maps.get(0), getRemainingMaps(maps));
}
public TaggedReadPreference(DBObject tags) {
if (tags == null || tags.keySet().size() == 0) {
throw new IllegalArgumentException("tags can not be null or empty");
}
_tags = tags; _tags = tags;
List<DBObject> maps = splitMapIntoMultipleMaps(_tags);
_pref = new TaggableReadPreference.SecondaryReadPreference(maps.get(0), getRemainingMaps(maps));
} }
public TaggedReadPreference( Map<String, String> tags ) { public DBObject getTags() {
_tags = new BasicDBObject( tags ); DBObject tags = new BasicDBObject();
} for (String key : _tags.keySet())
tags.put(key, _tags.get(key));
public DBObject getTags(){ return tags;
return _tags;
} }
@Override @Override
public String toString(){ public boolean isSlaveOk() {
return getTags().toString(); return _pref.isSlaveOk();
}
@Override
ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
return _pref.getNode(set);
}
@Override
public DBObject toDBObject() {
return _pref.toDBObject();
}
@Override
public String getName() {
return _pref.getName();
}
private static List<DBObject> splitMapIntoMultipleMaps(DBObject tags) {
List<DBObject> tagList = new ArrayList<DBObject>(tags.keySet().size());
for (String key : tags.keySet()) {
tagList.add(new BasicDBObject(key, tags.get(key).toString()));
}
return tagList;
}
private DBObject[] getRemainingMaps(final List<DBObject> maps) {
if (maps.size() <= 1) {
return new DBObject[0];
}
return maps.subList(1, maps.size() - 1).toArray(new DBObject[maps.size() - 1]);
} }
private final DBObject _tags; private final DBObject _tags;
private final ReadPreference _pref;
} }
public static ReadPreference PRIMARY = new PrimaryReadPreference(); /**
* @return ReadPreference which reads from primary only
*/
public static ReadPreference primary() {
return _PRIMARY;
}
public static ReadPreference SECONDARY = new SecondaryReadPreference(); /**
* @return ReadPreference which reads primary if available.
*/
public static ReadPreference primaryPreferred() {
return _PRIMARY_PREFERRED;
}
/* /**
* @return ReadPreference which reads primary if available, otherwise a secondary respective of tags.
*/
public static TaggableReadPreference primaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets) {
return new TaggableReadPreference.PrimaryPreferredReadPreference(firstTagSet, remainingTagSets);
}
/**
* @return ReadPreference which reads secondary.
*/
public static ReadPreference secondary() {
return _SECONDARY;
}
/**
* @return ReadPreference which reads secondary respective of tags.
*/
public static TaggableReadPreference secondary(DBObject firstTagSet, DBObject... remainingTagSets) {
return new TaggableReadPreference.SecondaryReadPreference(firstTagSet, remainingTagSets);
}
/**
* @return ReadPreference which reads secondary if available, otherwise from primary.
*/
public static ReadPreference secondaryPreferred() {
return _SECONDARY_PREFERRED;
}
/**
* @return ReadPreference which reads secondary if available respective of tags, otherwise from primary irrespective of tags.
*/
public static TaggableReadPreference secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets) {
return new TaggableReadPreference.SecondaryPreferredReadPreference(firstTagSet, remainingTagSets);
}
/**
* @return ReadPreference which reads nearest node.
*/
public static ReadPreference nearest() {
return _NEAREST;
}
public static ReadPreference valueOf(String name) {
if (name == null) {
throw new IllegalArgumentException();
}
name = name.toLowerCase();
if (name.equals(_PRIMARY.getName().toLowerCase())) {
return _PRIMARY;
}
if (name.equals(_SECONDARY.getName().toLowerCase())) {
return _SECONDARY;
}
if (name.equals(_SECONDARY_PREFERRED.getName().toLowerCase())) {
return _SECONDARY_PREFERRED;
}
if (name.equals(_PRIMARY_PREFERRED.getName().toLowerCase())) {
return _PRIMARY_PREFERRED;
}
if (name.equals(_NEAREST.getName().toLowerCase())) {
return _NEAREST;
}
throw new IllegalArgumentException("No match for read preference of " + name);
}
public static TaggableReadPreference valueOf(String name, DBObject firstTagSet, final DBObject... remainingTagSets) {
if (name == null) {
throw new IllegalArgumentException();
}
name = name.toLowerCase();
if (name.equals(_SECONDARY.getName().toLowerCase())) {
return new TaggableReadPreference.SecondaryReadPreference(firstTagSet, remainingTagSets);
}
if (name.equals(_SECONDARY_PREFERRED.getName().toLowerCase())) {
return new TaggableReadPreference.SecondaryPreferredReadPreference(firstTagSet, remainingTagSets);
}
if (name.equals(_PRIMARY_PREFERRED.getName().toLowerCase())) {
return new TaggableReadPreference.PrimaryPreferredReadPreference(firstTagSet, remainingTagSets);
}
if (name.equals(_NEAREST.getName().toLowerCase())) {
return new TaggableReadPreference.NearestReadPreference(firstTagSet, remainingTagSets);
}
throw new IllegalArgumentException("No match for read preference of " + name);
}
/**
* @return ReadPreference which reads nearest node respective of tags.
*/
public static TaggableReadPreference nearest(DBObject firstTagSet, DBObject... remainingTagSets) {
return new TaggableReadPreference.NearestReadPreference(firstTagSet, remainingTagSets);
}
/**
* A primary read preference. Equivalent to calling {@code ReadPreference.primary()}.
*
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference#primary()
* @deprecated As of release 2.9.0, replaced by {@code ReadPreference.primary()}
*/
@Deprecated
public static final ReadPreference PRIMARY;
/**
* A secondary-preferred read preference. Equivalent to calling
* {@code ReadPreference.secondaryPreferred}. This reference should really have been called
* {@code ReadPreference.SECONDARY_PREFERRED}, but the naming of it preceded the idea of distinguishing
* between secondary and secondary-preferred, so for backwards compatibility, leaving the name as is with
* the behavior as it was when it was created.
*
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference#secondary()
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference#secondaryPreferred()
* @deprecated As of release 2.9.0, replaced by {@code ReadPreference.secondaryPreferred()}
*/
@Deprecated
public static final ReadPreference SECONDARY;
/**
* @deprecated As of release 2.9.0, replaced by
* {@code ReadPreference.secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets)}
*/
@Deprecated
public static ReadPreference withTags(Map<String, String> tags) { public static ReadPreference withTags(Map<String, String> tags) {
return new TaggedReadPreference( tags ); return new TaggedReadPreference( tags );
} }
/**
* @deprecated As of release 2.9.0, replaced by
* {@code ReadPreference.secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets)}
*/
@Deprecated
public static ReadPreference withTags( final DBObject tags ) { public static ReadPreference withTags( final DBObject tags ) {
return new TaggedReadPreference( tags ); return new TaggedReadPreference( tags );
} }
*/
private static final ReadPreference _PRIMARY;
private static final ReadPreference _SECONDARY;
private static final ReadPreference _SECONDARY_PREFERRED;
private static final ReadPreference _PRIMARY_PREFERRED;
private static final ReadPreference _NEAREST;
static {
_PRIMARY = new PrimaryReadPreference();
_SECONDARY = new TaggableReadPreference.SecondaryReadPreference();
_SECONDARY_PREFERRED = new TaggableReadPreference.SecondaryPreferredReadPreference();
_PRIMARY_PREFERRED = new TaggableReadPreference.PrimaryPreferredReadPreference();
_NEAREST = new TaggableReadPreference.NearestReadPreference();
PRIMARY = _PRIMARY;
SECONDARY = _SECONDARY_PREFERRED; // this is not a bug. See SECONDARY Javadoc.
}
} }

View File

@ -32,6 +32,7 @@ import com.massivecraft.mcore.xlib.bson.BSONObject;
/** /**
* This class enables to map simple Class fields to a BSON object fields * This class enables to map simple Class fields to a BSON object fields
*/ */
@SuppressWarnings({"unchecked", "rawtypes"})
public abstract class ReflectionDBObject implements DBObject { public abstract class ReflectionDBObject implements DBObject {
public Object get( String key ){ public Object get( String key ){
@ -58,7 +59,6 @@ public abstract class ReflectionDBObject implements DBObject {
return getWrapper().set( this , key , v ); return getWrapper().set( this , key , v );
} }
@SuppressWarnings({ "unchecked", "rawtypes" })
public void putAll( Map m ){ public void putAll( Map m ){
for ( Map.Entry entry : (Set<Map.Entry>)m.entrySet() ){ for ( Map.Entry entry : (Set<Map.Entry>)m.entrySet() ){
put( entry.getKey().toString() , entry.getValue() ); put( entry.getKey().toString() , entry.getValue() );
@ -91,7 +91,6 @@ public abstract class ReflectionDBObject implements DBObject {
return false; return false;
} }
@SuppressWarnings({ "unchecked", "rawtypes" })
public Map toMap() { public Map toMap() {
Map m = new HashMap(); Map m = new HashMap();
Iterator i = this.keySet().iterator(); Iterator i = this.keySet().iterator();
@ -133,8 +132,7 @@ public abstract class ReflectionDBObject implements DBObject {
* Represents a wrapper around the DBObject to interface with the Class fields * Represents a wrapper around the DBObject to interface with the Class fields
*/ */
public static class JavaWrapper { public static class JavaWrapper {
@SuppressWarnings("rawtypes") JavaWrapper( Class c ){
JavaWrapper( Class c ){
_class = c; _class = c;
_name = c.getName(); _name = c.getName();
@ -205,8 +203,7 @@ public abstract class ReflectionDBObject implements DBObject {
} }
} }
@SuppressWarnings("rawtypes") Class getInternalClass( String path ){
Class getInternalClass( String path ){
String cur = path; String cur = path;
String next = null; String next = null;
final int idx = path.indexOf( "." ); final int idx = path.indexOf( "." );
@ -228,16 +225,14 @@ public abstract class ReflectionDBObject implements DBObject {
return w.getInternalClass( next ); return w.getInternalClass( next );
} }
@SuppressWarnings("rawtypes") final Class _class;
final Class _class;
final String _name; final String _name;
final Map<String,FieldInfo> _fields; final Map<String,FieldInfo> _fields;
final Set<String> _keys; final Set<String> _keys;
} }
static class FieldInfo { static class FieldInfo {
@SuppressWarnings("rawtypes") FieldInfo( String name , Class c ){
FieldInfo( String name , Class c ){
_name = name; _name = name;
_class = c; _class = c;
} }
@ -249,8 +244,7 @@ public abstract class ReflectionDBObject implements DBObject {
} }
final String _name; final String _name;
@SuppressWarnings("rawtypes") final Class _class;
final Class _class;
Method _getter; Method _getter;
Method _setter; Method _setter;
} }
@ -260,8 +254,7 @@ public abstract class ReflectionDBObject implements DBObject {
* @param c * @param c
* @return * @return
*/ */
@SuppressWarnings("rawtypes") public static JavaWrapper getWrapperIfReflectionObject( Class c ){
public static JavaWrapper getWrapperIfReflectionObject( Class c ){
if ( ReflectionDBObject.class.isAssignableFrom( c ) ) if ( ReflectionDBObject.class.isAssignableFrom( c ) )
return getWrapper( c ); return getWrapper( c );
return null; return null;
@ -272,8 +265,7 @@ public abstract class ReflectionDBObject implements DBObject {
* @param c * @param c
* @return * @return
*/ */
@SuppressWarnings("rawtypes") public static JavaWrapper getWrapper( Class c ){
public static JavaWrapper getWrapper( Class c ){
JavaWrapper w = _wrappers.get( c ); JavaWrapper w = _wrappers.get( c );
if ( w == null ){ if ( w == null ){
w = new JavaWrapper( c ); w = new JavaWrapper( c );
@ -282,8 +274,7 @@ public abstract class ReflectionDBObject implements DBObject {
return w; return w;
} }
@SuppressWarnings("rawtypes") private static final Map<Class,JavaWrapper> _wrappers = Collections.synchronizedMap( new HashMap<Class,JavaWrapper>() );
private static final Map<Class,JavaWrapper> _wrappers = Collections.synchronizedMap( new HashMap<Class,JavaWrapper>() );
private static final Set<String> IGNORE_FIELDS = new HashSet<String>(); private static final Set<String> IGNORE_FIELDS = new HashSet<String>();
static { static {
IGNORE_FIELDS.add( "Int" ); IGNORE_FIELDS.add( "Int" );

File diff suppressed because it is too large Load Diff

View File

@ -43,8 +43,9 @@ class Response {
_len = Bits.readInt(b, pos); _len = Bits.readInt(b, pos);
pos += 4; pos += 4;
if (_len > MAX_LENGTH) if (_len > MAX_LENGTH) {
throw new IllegalArgumentException( "response too long: " + _len ); throw new IllegalArgumentException( "response too long: " + _len );
}
_id = Bits.readInt(b, pos); _id = Bits.readInt(b, pos);
pos += 4; pos += 4;

View File

@ -0,0 +1,215 @@
package com.massivecraft.mcore.xlib.mongodb;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Abstract base class for all preference which can be combined with tags
*
* @author breinero
*/
public abstract class TaggableReadPreference extends ReadPreference {
private final static List<DBObject> EMPTY = new ArrayList<DBObject>();
TaggableReadPreference() {
_tags = EMPTY;
}
TaggableReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) {
if (firstTagSet == null) {
throw new IllegalArgumentException("Must have at least one tag set");
}
_tags = new ArrayList<DBObject>();
_tags.add(firstTagSet);
Collections.addAll(_tags, remainingTagSets);
}
@Override
public boolean isSlaveOk() {
return true;
}
@Override
public DBObject toDBObject() {
DBObject readPrefObject = new BasicDBObject("mode", getName());
if (!_tags.isEmpty())
readPrefObject.put("tags", _tags);
return readPrefObject;
}
public List<DBObject> getTagSets() {
List<DBObject> tags = new ArrayList<DBObject>();
for (DBObject tagSet : _tags) {
tags.add(tagSet);
}
return tags;
}
@Override
public String toString() {
return getName() + printTags();
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final TaggableReadPreference that = (TaggableReadPreference) o;
if (!_tags.equals(that._tags)) return false;
return true;
}
@Override
public int hashCode() {
int result = _tags.hashCode();
result = 31 * result + getName().hashCode();
return result;
}
String printTags() {
return (_tags.isEmpty() ? "" : " : " + new BasicDBObject("tags", _tags));
}
private static List<ReplicaSetStatus.Tag> getTagListFromDBObject(final DBObject curTagSet) {
List<ReplicaSetStatus.Tag> tagList = new ArrayList<ReplicaSetStatus.Tag>();
for (String key : curTagSet.keySet()) {
tagList.add(new ReplicaSetStatus.Tag(key, curTagSet.get(key).toString()));
}
return tagList;
}
final List<DBObject> _tags;
/**
* Read from secondary
*
* @author breinero
*/
static class SecondaryReadPreference extends TaggableReadPreference {
SecondaryReadPreference() {
}
SecondaryReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) {
super(firstTagSet, remainingTagSets);
}
@Override
public String getName() {
return "secondary";
}
@Override
ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
if (_tags.isEmpty())
return set.getASecondary();
for (DBObject curTagSet : _tags) {
List<ReplicaSetStatus.Tag> tagList = getTagListFromDBObject(curTagSet);
ReplicaSetStatus.ReplicaSetNode node = set.getASecondary(tagList);
if (node != null) {
return node;
}
}
return null;
}
}
/**
* Read from secondary if available, otherwise from primary, irrespective of tags.
*
* @author breinero
*/
static class SecondaryPreferredReadPreference extends SecondaryReadPreference {
SecondaryPreferredReadPreference() {
}
SecondaryPreferredReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) {
super(firstTagSet, remainingTagSets);
}
@Override
public String getName() {
return "secondaryPreferred";
}
@Override
ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
ReplicaSetStatus.ReplicaSetNode node = super.getNode(set);
return (node != null) ? node : set.getMaster();
}
}
/**
* Read from nearest node respective of tags.
*
* @author breinero
*/
static class NearestReadPreference extends TaggableReadPreference {
NearestReadPreference() {
}
NearestReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) {
super(firstTagSet, remainingTagSets);
}
@Override
public String getName() {
return "nearest";
}
@Override
ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
if (_tags.isEmpty())
return set.getAMember();
for (DBObject curTagSet : _tags) {
List<ReplicaSetStatus.Tag> tagList = getTagListFromDBObject(curTagSet);
ReplicaSetStatus.ReplicaSetNode node = set.getAMember(tagList);
if (node != null) {
return node;
}
}
return null;
}
}
/**
* Read from primary if available, otherwise a secondary.
*
* @author breinero
*/
static class PrimaryPreferredReadPreference extends SecondaryReadPreference {
PrimaryPreferredReadPreference() {}
PrimaryPreferredReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) {
super(firstTagSet, remainingTagSets);
}
@Override
public String getName() {
return "primaryPreferred";
}
@Override
ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
ReplicaSetStatus.ReplicaSetNode node = set.getMaster();
return (node != null) ? node : super.getNode(set);
}
}
}

View File

@ -25,56 +25,132 @@ import java.util.HashMap;
import java.util.Map; import java.util.Map;
/** /**
* <p>WriteConcern control the write behavior for with various options, as well as exception raising on error conditions.</p> * <p>WriteConcern control the acknowledgment of write operations with various options.
*
* <p> * <p>
* <b>w</b> * <b>w</b>
* <ul> * <ul>
* <li>-1 = don't even report network errors </li> * <li>-1 = Don't even report network errors </li>
* <li> 0 = default, don't call getLastError by default </li> * <li> 0 = Don't wait for acknowledgement from the server </li>
* <li> 1 = basic, call getLastError, but don't wait for slaves</li> * <li> 1 = Wait for acknowledgement, but don't wait for secondaries to replicate</li>
* <li> 2+= wait for slaves </li> * <li> 2+= Wait for one or more secondaries to also acknowledge </li>
* </ul> * </ul>
* <b>wtimeout</b> how long to wait for slaves before failing * <b>wtimeout</b> how long to wait for slaves before failing
* <ul> * <ul>
* <li>0 = indefinite </li> * <li>0: indefinite </li>
* <li>> 0 = ms to wait </li> * <li>greater than 0: ms to wait </li>
* </ul> * </ul>
* </p> * </p>
* <p><b>fsync</b> force fsync to disk </p> * <p>
* * Other options:
* <ul>
* <li><b>j</b>: wait for group commit to journal</li>
* <li><b>fsync</b>: force fsync to disk</li>
* </ul>
* @dochub databases * @dochub databases
*/ */
public class WriteConcern implements Serializable { public class WriteConcern implements Serializable {
private static final long serialVersionUID = 1884671104750417011L; private static final long serialVersionUID = 1884671104750417011L;
/** No exceptions are raised, even for network issues */ /**
* No exceptions are raised, even for network issues.
*/
public final static WriteConcern ERRORS_IGNORED = new WriteConcern(-1);
/**
* Write operations that use this write concern will wait for acknowledgement from the primary server before returning.
* Exceptions are raised for network issues, and server errors.
* @since 2.10.0
*/
public final static WriteConcern ACKNOWLEDGED = new WriteConcern(1);
/**
* Write operations that use this write concern will return as soon as the message is written to the socket.
* Exceptions are raised for network issues, but not server errors.
* @since 2.10.0
*/
public final static WriteConcern UNACKNOWLEDGED = new WriteConcern(0);
/**
* Exceptions are raised for network issues, and server errors; the write operation waits for the server to flush
* the data to disk.
*/
public final static WriteConcern FSYNCED = new WriteConcern(true);
/**
* Exceptions are raised for network issues, and server errors; the write operation waits for the server to
* group commit to the journal file on disk.
*/
public final static WriteConcern JOURNALED = new WriteConcern( 1, 0, false, true );
/**
* Exceptions are raised for network issues, and server errors; waits for at least 2 servers for the write operation.
*/
public final static WriteConcern REPLICA_ACKNOWLEDGED= new WriteConcern(2);
/**
* No exceptions are raised, even for network issues.
* <p>
* This field has been superseded by {@code WriteConcern.ERRORS_IGNORED}, and may be deprecated in a future release.
* @see WriteConcern#ERRORS_IGNORED
*/
public final static WriteConcern NONE = new WriteConcern(-1); public final static WriteConcern NONE = new WriteConcern(-1);
/** Exceptions are raised for network issues, but not server errors */ /**
* Write operations that use this write concern will return as soon as the message is written to the socket.
* Exceptions are raised for network issues, but not server errors.
* <p>
* This field has been superseded by {@code WriteConcern.UNACKNOWLEDGED}, and may be deprecated in a future release.
* @see WriteConcern#UNACKNOWLEDGED
*/
public final static WriteConcern NORMAL = new WriteConcern(0); public final static WriteConcern NORMAL = new WriteConcern(0);
/** Exceptions are raised for network issues, and server errors; waits on a server for the write operation */ /**
* Write operations that use this write concern will wait for acknowledgement from the primary server before returning.
* Exceptions are raised for network issues, and server errors.
* <p>
* This field has been superseded by {@code WriteConcern.ACKNOWLEDGED}, and may be deprecated in a future release.
* @see WriteConcern#ACKNOWLEDGED
*/
public final static WriteConcern SAFE = new WriteConcern(1); public final static WriteConcern SAFE = new WriteConcern(1);
/** Exceptions are raised for network issues, and server errors; waits on a majority of servers for the write operation */ /**
* Exceptions are raised for network issues, and server errors; waits on a majority of servers for the write operation.
*/
public final static WriteConcern MAJORITY = new Majority(); public final static WriteConcern MAJORITY = new Majority();
/** Exceptions are raised for network issues, and server errors; the write operation waits for the server to flush the data to disk*/ /**
* Exceptions are raised for network issues, and server errors; the write operation waits for the server to flush
* the data to disk.
* <p>
* This field has been superseded by {@code WriteConcern.FSYNCED}, and may be deprecated in a future release.
* @see WriteConcern#FSYNCED
*/
public final static WriteConcern FSYNC_SAFE = new WriteConcern(true); public final static WriteConcern FSYNC_SAFE = new WriteConcern(true);
/** Exceptions are raised for network issues, and server errors; the write operation waits for the server to group commit to the journal file on disk*/ /**
* Exceptions are raised for network issues, and server errors; the write operation waits for the server to
* group commit to the journal file on disk.
* <p>
* This field has been superseded by {@code WriteConcern.JOURNALED}, and may be deprecated in a future release.
* @see WriteConcern#JOURNALED
*/
public final static WriteConcern JOURNAL_SAFE = new WriteConcern( 1, 0, false, true ); public final static WriteConcern JOURNAL_SAFE = new WriteConcern( 1, 0, false, true );
/** Exceptions are raised for network issues, and server errors; waits for at least 2 servers for the write operation*/ /**
* Exceptions are raised for network issues, and server errors; waits for at least 2 servers for the write operation.
* <p>
* This field has been superseded by {@code WriteConcern.REPLICA_ACKNOWLEDGED}, and may be deprecated in a future release.
* @see WriteConcern#REPLICA_ACKNOWLEDGED
*/
public final static WriteConcern REPLICAS_SAFE = new WriteConcern(2); public final static WriteConcern REPLICAS_SAFE = new WriteConcern(2);
// map of the constants from above for use by fromString // map of the constants from above for use by fromString
private static Map<String, WriteConcern> _namedConcerns = null; private static Map<String, WriteConcern> _namedConcerns = null;
/** /**
* Default constructor keeping all options as default * Default constructor keeping all options as default. Be careful using this constructor, as it's equivalent to
* {@code WriteConcern.UNACKNOWLEDGED}, so writes may be lost without any errors being reported.
* @see WriteConcern#UNACKNOWLEDGED
*/ */
public WriteConcern(){ public WriteConcern(){
this(0); this(0);
@ -227,12 +303,19 @@ public class WriteConcern implements Serializable {
_continueOnErrorForInsert = continueOnInsertError; _continueOnErrorForInsert = continueOnInsertError;
} }
public BasicDBObject getCommand(){ /**
* Gets the getlasterror command for this write concern.
*
* @return getlasterror command, even if <code>w <= 0</code>
*/
public BasicDBObject getCommand() {
BasicDBObject _command = new BasicDBObject( "getlasterror" , 1 ); BasicDBObject _command = new BasicDBObject( "getlasterror" , 1 );
if ( _w instanceof Integer && ( (Integer) _w > 0) || if (_w instanceof Integer && ((Integer) _w > 1) || (_w instanceof String)){
( _w instanceof String && _w != null ) ){
_command.put( "w" , _w ); _command.put( "w" , _w );
}
if (_wtimeout > 0) {
_command.put( "wtimeout" , _wtimeout ); _command.put( "wtimeout" , _wtimeout );
} }
@ -273,7 +356,7 @@ public class WriteConcern implements Serializable {
/** /**
* Gets the w parameter (the write strategy) in String format * Gets the w parameter (the write strategy) in String format
* @return * @return w as a string
*/ */
public String getWString(){ public String getWString(){
return _w.toString(); return _w.toString();
@ -335,7 +418,8 @@ public class WriteConcern implements Serializable {
for (Field f : WriteConcern.class.getFields()) for (Field f : WriteConcern.class.getFields())
if (Modifier.isStatic( f.getModifiers() ) && f.getType().equals( WriteConcern.class )) { if (Modifier.isStatic( f.getModifiers() ) && f.getType().equals( WriteConcern.class )) {
try { try {
newMap.put( f.getName().toLowerCase(), (WriteConcern) f.get( null ) ); String key = f.getName().toLowerCase();
newMap.put(key, (WriteConcern) f.get( null ) );
} catch (Exception e) { } catch (Exception e) {
throw new RuntimeException( e ); throw new RuntimeException( e );
} }
@ -350,7 +434,7 @@ public class WriteConcern implements Serializable {
} }
@Override @Override
public String toString(){ public String toString() {
return "WriteConcern " + getCommand() + " / (Continue Inserting on Errors? " + getContinueOnErrorForInsert() + ")"; return "WriteConcern " + getCommand() + " / (Continue Inserting on Errors? " + getContinueOnErrorForInsert() + ")";
} }

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* An exception representing an error reported due to a write failure.
*/
public class WriteConcernException extends MongoException {
private static final long serialVersionUID = 841056799207039974L;
private final CommandResult commandResult;
/**
* Construct a new instance with the CommandResult from getlasterror command
*
* @param commandResult the command result
*/
public WriteConcernException(final CommandResult commandResult) {
super(commandResult.getCode(), commandResult.toString());
this.commandResult = commandResult;
}
/**
* Gets the getlasterror command result document.
*
* @return the command result
*/
public CommandResult getCommandResult() {
return commandResult;
}
}

View File

@ -41,7 +41,7 @@ public class WriteResult {
WriteResult( DB db , DBPort p , WriteConcern concern ){ WriteResult( DB db , DBPort p , WriteConcern concern ){
_db = db; _db = db;
_port = p; _port = p;
_lastCall = p._calls; _lastCall = p._calls.get();
_lastConcern = concern; _lastConcern = concern;
_lazy = true; _lazy = true;
} }
@ -67,9 +67,10 @@ public class WriteResult {
/** /**
* calls {@link WriteResult#getLastError(com.mongodb.WriteConcern)} with concern=null * calls {@link WriteResult#getLastError(com.mongodb.WriteConcern)} with concern=null
* @return * @return
* @throws MongoException
*/ */
public synchronized CommandResult getLastError(){ public synchronized CommandResult getLastError(){
return getLastError(null); return getLastError(null);
} }
/** /**
@ -78,6 +79,7 @@ public class WriteResult {
* - otherwise attempts to obtain a CommandResult by calling getLastError with the concern * - otherwise attempts to obtain a CommandResult by calling getLastError with the concern
* @param concern the concern * @param concern the concern
* @return * @return
* @throws MongoException
*/ */
public synchronized CommandResult getLastError(WriteConcern concern){ public synchronized CommandResult getLastError(WriteConcern concern){
if ( _lastErrorResult != null ) { if ( _lastErrorResult != null ) {
@ -110,6 +112,7 @@ public class WriteResult {
/** /**
* Gets the error String ("err" field) * Gets the error String ("err" field)
* @return * @return
* @throws MongoException
*/ */
public String getError(){ public String getError(){
Object foo = getField( "err" ); Object foo = getField( "err" );
@ -122,6 +125,7 @@ public class WriteResult {
* Gets the "n" field, which contains the number of documents * Gets the "n" field, which contains the number of documents
* affected in the write operation. * affected in the write operation.
* @return * @return
* @throws MongoException
*/ */
public int getN(){ public int getN(){
return getLastError().getInt( "n" ); return getLastError().getInt( "n" );
@ -131,6 +135,7 @@ public class WriteResult {
* Gets a field * Gets a field
* @param name field name * @param name field name
* @return * @return
* @throws MongoException
*/ */
public Object getField( String name ){ public Object getField( String name ){
return getLastError().get( name ); return getLastError().get( name );

View File

@ -18,18 +18,20 @@
package com.massivecraft.mcore.xlib.mongodb.gridfs; package com.massivecraft.mcore.xlib.mongodb.gridfs;
import com.massivecraft.mcore.xlib.mongodb.DBObject;
import com.massivecraft.mcore.xlib.mongodb.Mongo;
import com.massivecraft.mcore.xlib.mongodb.MongoClient;
import com.massivecraft.mcore.xlib.mongodb.util.Util;
import java.io.File; import java.io.File;
import java.security.DigestInputStream; import java.security.DigestInputStream;
import java.security.MessageDigest; import java.security.MessageDigest;
import com.massivecraft.mcore.xlib.mongodb.DBObject;
import com.massivecraft.mcore.xlib.mongodb.Mongo;
import com.massivecraft.mcore.xlib.mongodb.util.Util;
/** /**
* a simple CLI for Gridfs * a simple CLI for Gridfs
*/ */
@SuppressWarnings({"unused"})
public class CLI { public class CLI {
/** /**
@ -48,10 +50,11 @@ public class CLI {
private static String db = "test"; private static String db = "test";
private static Mongo _mongo = null; private static Mongo _mongo = null;
private static Mongo getMongo() private static Mongo getMongo()
throws Exception { throws Exception {
if ( _mongo == null ) if ( _mongo == null )
_mongo = new Mongo( host ); _mongo = new MongoClient( host );
return _mongo; return _mongo;
} }
@ -63,8 +66,7 @@ public class CLI {
return _gridfs; return _gridfs;
} }
@SuppressWarnings("unused") public static void main(String[] args) throws Exception {
public static void main(String[] args) throws Exception {
if ( args.length < 1 ){ if ( args.length < 1 ){
printUsage(); printUsage();

View File

@ -68,6 +68,7 @@ public class GridFS {
* in the given database. Set the preferred WriteConcern on the give DB with DB.setWriteConcern * in the given database. Set the preferred WriteConcern on the give DB with DB.setWriteConcern
* @see com.massivecraft.mcore.xlib.mongodb.WriteConcern * @see com.massivecraft.mcore.xlib.mongodb.WriteConcern
* @param db database to work with * @param db database to work with
* @throws MongoException
*/ */
public GridFS(DB db) { public GridFS(DB db) {
this(db, DEFAULT_BUCKET); this(db, DEFAULT_BUCKET);
@ -80,6 +81,7 @@ public class GridFS {
* @see com.massivecraft.mcore.xlib.mongodb.WriteConcern * @see com.massivecraft.mcore.xlib.mongodb.WriteConcern
* @param db database to work with * @param db database to work with
* @param bucket bucket to use in the given database * @param bucket bucket to use in the given database
* @throws MongoException
*/ */
public GridFS(DB db, String bucket) { public GridFS(DB db, String bucket) {
_db = db; _db = db;
@ -109,7 +111,7 @@ public class GridFS {
* @return cursor of file objects * @return cursor of file objects
*/ */
public DBCursor getFileList(){ public DBCursor getFileList(){
return _filesCollection.find().sort(new BasicDBObject("filename",1)); return getFileList(new BasicDBObject());
} }
/** /**
@ -119,7 +121,18 @@ public class GridFS {
* @return cursor of file objects * @return cursor of file objects
*/ */
public DBCursor getFileList( DBObject query ){ public DBCursor getFileList( DBObject query ){
return _filesCollection.find( query ).sort(new BasicDBObject("filename",1)); return getFileList(query, new BasicDBObject("filename",1));
}
/**
* gets a filtered list of files stored in this gridfs, sorted by param sort
*
* @param query filter to apply
* @param sort sorting to apply
* @return cursor of file objects
*/
public DBCursor getFileList( DBObject query, DBObject sort){
return _filesCollection.find( query ).sort(sort);
} }
@ -131,6 +144,7 @@ public class GridFS {
* finds one file matching the given id. Equivalent to findOne(id) * finds one file matching the given id. Equivalent to findOne(id)
* @param id * @param id
* @return * @return
* @throws MongoException
*/ */
public GridFSDBFile find( ObjectId id ){ public GridFSDBFile find( ObjectId id ){
return findOne( id ); return findOne( id );
@ -139,6 +153,7 @@ public class GridFS {
* finds one file matching the given id. * finds one file matching the given id.
* @param id * @param id
* @return * @return
* @throws MongoException
*/ */
public GridFSDBFile findOne( ObjectId id ){ public GridFSDBFile findOne( ObjectId id ){
return findOne( new BasicDBObject( "_id" , id ) ); return findOne( new BasicDBObject( "_id" , id ) );
@ -147,6 +162,7 @@ public class GridFS {
* finds one file matching the given filename * finds one file matching the given filename
* @param filename * @param filename
* @return * @return
* @throws MongoException
*/ */
public GridFSDBFile findOne( String filename ){ public GridFSDBFile findOne( String filename ){
return findOne( new BasicDBObject( "filename" , filename ) ); return findOne( new BasicDBObject( "filename" , filename ) );
@ -155,6 +171,7 @@ public class GridFS {
* finds one file matching the given query * finds one file matching the given query
* @param query * @param query
* @return * @return
* @throws MongoException
*/ */
public GridFSDBFile findOne( DBObject query ){ public GridFSDBFile findOne( DBObject query ){
return _fix( _filesCollection.findOne( query ) ); return _fix( _filesCollection.findOne( query ) );
@ -164,26 +181,61 @@ public class GridFS {
* finds a list of files matching the given filename * finds a list of files matching the given filename
* @param filename * @param filename
* @return * @return
* @throws MongoException
*/ */
public List<GridFSDBFile> find( String filename ){ public List<GridFSDBFile> find( String filename ){
return find( new BasicDBObject( "filename" , filename ) ); return find( filename, null );
} }
/**
* finds a list of files matching the given filename
* @param filename
* @param sort
* @return
* @throws MongoException
*/
public List<GridFSDBFile> find( String filename , DBObject sort){
return find( new BasicDBObject( "filename" , filename ), sort );
}
/** /**
* finds a list of files matching the given query * finds a list of files matching the given query
* @param query * @param query
* @return * @return
* @throws MongoException
*/ */
public List<GridFSDBFile> find( DBObject query ){ public List<GridFSDBFile> find( DBObject query ){
return find(query, null);
}
/**
* finds a list of files matching the given query
* @param query
* @param sort
* @return
* @throws MongoException
*/
public List<GridFSDBFile> find( DBObject query , DBObject sort){
List<GridFSDBFile> files = new ArrayList<GridFSDBFile>(); List<GridFSDBFile> files = new ArrayList<GridFSDBFile>();
DBCursor c = _filesCollection.find( query ); DBCursor c = null;
while ( c.hasNext() ){ try {
files.add( _fix( c.next() ) ); c = _filesCollection.find( query );
if (sort != null) {
c.sort(sort);
}
while ( c.hasNext() ){
files.add( _fix( c.next() ) );
}
} finally {
if (c != null){
c.close();
}
} }
return files; return files;
} }
private GridFSDBFile _fix( Object o ){ protected GridFSDBFile _fix( Object o ){
if ( o == null ) if ( o == null )
return null; return null;
@ -203,6 +255,7 @@ public class GridFS {
/** /**
* removes the file matching the given id * removes the file matching the given id
* @param id * @param id
* @throws MongoException
*/ */
public void remove( ObjectId id ){ public void remove( ObjectId id ){
_filesCollection.remove( new BasicDBObject( "_id" , id ) ); _filesCollection.remove( new BasicDBObject( "_id" , id ) );
@ -212,6 +265,7 @@ public class GridFS {
/** /**
* removes all files matching the given filename * removes all files matching the given filename
* @param filename * @param filename
* @throws MongoException
*/ */
public void remove( String filename ){ public void remove( String filename ){
remove( new BasicDBObject( "filename" , filename ) ); remove( new BasicDBObject( "filename" , filename ) );
@ -220,6 +274,7 @@ public class GridFS {
/** /**
* removes all files matching the given query * removes all files matching the given query
* @param query * @param query
* @throws MongoException
*/ */
public void remove( DBObject query ){ public void remove( DBObject query ){
for ( GridFSDBFile f : find( query ) ){ for ( GridFSDBFile f : find( query ) ){

View File

@ -53,6 +53,7 @@ public class GridFSDBFile extends GridFSFile {
* @param filename the file name on disk * @param filename the file name on disk
* @return * @return
* @throws IOException * @throws IOException
* @throws MongoException
*/ */
public long writeTo( String filename ) throws IOException { public long writeTo( String filename ) throws IOException {
return writeTo( new File( filename ) ); return writeTo( new File( filename ) );
@ -62,6 +63,7 @@ public class GridFSDBFile extends GridFSFile {
* @param f the File object * @param f the File object
* @return * @return
* @throws IOException * @throws IOException
* @throws MongoException
*/ */
public long writeTo( File f ) throws IOException { public long writeTo( File f ) throws IOException {
@ -80,6 +82,7 @@ public class GridFSDBFile extends GridFSFile {
* @param out the OutputStream * @param out the OutputStream
* @return * @return
* @throws IOException * @throws IOException
* @throws MongoException
*/ */
public long writeTo( OutputStream out ) public long writeTo( OutputStream out )
throws IOException { throws IOException {

View File

@ -37,6 +37,7 @@ import com.massivecraft.mcore.xlib.mongodb.util.JSON;
* The abstract class representing a GridFS file * The abstract class representing a GridFS file
* @author antoine * @author antoine
*/ */
@SuppressWarnings({"unchecked", "rawtypes"})
public abstract class GridFSFile implements DBObject { public abstract class GridFSFile implements DBObject {
@ -46,6 +47,7 @@ public abstract class GridFSFile implements DBObject {
/** /**
* Saves the file entry to the files collection * Saves the file entry to the files collection
* @throws MongoException
*/ */
public void save(){ public void save(){
if ( _fs == null ) if ( _fs == null )
@ -58,7 +60,7 @@ public abstract class GridFSFile implements DBObject {
* This should be called after transferring a file. * This should be called after transferring a file.
* @throws MongoException * @throws MongoException
*/ */
public void validate() throws MongoException { public void validate(){
if ( _fs == null ) if ( _fs == null )
throw new MongoException( "no _fs" ); throw new MongoException( "no _fs" );
if ( _md5 == null ) if ( _md5 == null )
@ -147,7 +149,6 @@ public abstract class GridFSFile implements DBObject {
* note: to set aliases, call put( "aliases" , List<String> ) * note: to set aliases, call put( "aliases" , List<String> )
* @return * @return
*/ */
@SuppressWarnings("unchecked")
public List<String> getAliases(){ public List<String> getAliases(){
return (List<String>)_extradata.get( "aliases" ); return (List<String>)_extradata.get( "aliases" );
} }
@ -226,13 +227,11 @@ public abstract class GridFSFile implements DBObject {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
@SuppressWarnings("rawtypes") public void putAll( Map m ){
public void putAll( Map m ){
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
@SuppressWarnings("rawtypes") public Map toMap(){
public Map toMap(){
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
@ -252,7 +251,6 @@ public abstract class GridFSFile implements DBObject {
return keySet().contains( s ); return keySet().contains( s );
} }
@SuppressWarnings({ "unchecked", "rawtypes" })
public Set<String> keySet(){ public Set<String> keySet(){
Set<String> keys = new HashSet(); Set<String> keys = new HashSet();
keys.addAll(VALID_FIELDS); keys.addAll(VALID_FIELDS);
@ -297,7 +295,6 @@ public abstract class GridFSFile implements DBObject {
DBObject _extradata = new BasicDBObject(); DBObject _extradata = new BasicDBObject();
String _md5; String _md5;
@SuppressWarnings({ "unchecked", "rawtypes" })
final static Set<String> VALID_FIELDS = Collections.unmodifiableSet( new HashSet( Arrays.asList( new String[]{ final static Set<String> VALID_FIELDS = Collections.unmodifiableSet( new HashSet( Arrays.asList( new String[]{
"_id" , "filename" , "contentType" , "length" , "chunkSize" , "_id" , "filename" , "contentType" , "length" , "chunkSize" ,
"uploadDate" , "aliases" , "md5" "uploadDate" , "aliases" , "md5"

View File

@ -18,20 +18,20 @@
package com.massivecraft.mcore.xlib.mongodb.gridfs; package com.massivecraft.mcore.xlib.mongodb.gridfs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.MessageDigest;
import java.util.Date;
import com.massivecraft.mcore.xlib.bson.types.ObjectId; import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import com.massivecraft.mcore.xlib.mongodb.BasicDBObjectBuilder; import com.massivecraft.mcore.xlib.mongodb.BasicDBObjectBuilder;
import com.massivecraft.mcore.xlib.mongodb.DBObject; import com.massivecraft.mcore.xlib.mongodb.DBObject;
import com.massivecraft.mcore.xlib.mongodb.MongoException; import com.massivecraft.mcore.xlib.mongodb.MongoException;
import com.massivecraft.mcore.xlib.mongodb.util.SimplePool;
import com.massivecraft.mcore.xlib.mongodb.util.Util; import com.massivecraft.mcore.xlib.mongodb.util.Util;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Date;
/** /**
* This class represents a GridFS file to be written to the database * This class represents a GridFS file to be written to the database
* Operations include: * Operations include:
@ -64,7 +64,11 @@ public class GridFSInputFile extends GridFSFile {
_id = new ObjectId(); _id = new ObjectId();
_chunkSize = GridFS.DEFAULT_CHUNKSIZE; _chunkSize = GridFS.DEFAULT_CHUNKSIZE;
_uploadDate = new Date(); _uploadDate = new Date();
_messageDigester = _md5Pool.get(); try {
_messageDigester = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException("No MD5!");
}
_messageDigester.reset(); _messageDigester.reset();
_buffer = new byte[(int) _chunkSize]; _buffer = new byte[(int) _chunkSize];
} }
@ -148,6 +152,7 @@ public class GridFSInputFile extends GridFSFile {
/** /**
* calls {@link GridFSInputFile#save(long)} with the existing chunk size * calls {@link GridFSInputFile#save(long)} with the existing chunk size
* @throws MongoException
*/ */
@Override @Override
public void save() { public void save() {
@ -160,6 +165,7 @@ public class GridFSInputFile extends GridFSFile {
* *
* @param chunkSize * @param chunkSize
* Size of chunks for file in bytes. * Size of chunks for file in bytes.
* @throws MongoException
*/ */
public void save( long chunkSize ) { public void save( long chunkSize ) {
if (_outputStream != null) if (_outputStream != null)
@ -185,6 +191,7 @@ public class GridFSInputFile extends GridFSFile {
* @throws IOException * @throws IOException
* on problems reading the new entry's * on problems reading the new entry's
* {@link java.io.InputStream}. * {@link java.io.InputStream}.
* @throws MongoException
*/ */
public int saveChunks() throws IOException { public int saveChunks() throws IOException {
return saveChunks( _chunkSize ); return saveChunks( _chunkSize );
@ -201,6 +208,7 @@ public class GridFSInputFile extends GridFSFile {
* @throws IOException * @throws IOException
* on problems reading the new entry's * on problems reading the new entry's
* {@link java.io.InputStream}. * {@link java.io.InputStream}.
* @throws MongoException
*/ */
public int saveChunks( long chunkSize ) throws IOException { public int saveChunks( long chunkSize ) throws IOException {
if (_outputStream != null) if (_outputStream != null)
@ -249,10 +257,9 @@ public class GridFSInputFile extends GridFSFile {
* Dumps a new chunk into the chunks collection. Depending on the flag, also * Dumps a new chunk into the chunks collection. Depending on the flag, also
* partial buffers (at the end) are going to be written immediately. * partial buffers (at the end) are going to be written immediately.
* *
* @param data
* Data for chunk.
* @param writePartial * @param writePartial
* Write also partial buffers full. * Write also partial buffers full.
* @throws MongoException
*/ */
private void _dumpBuffer( boolean writePartial ) { private void _dumpBuffer( boolean writePartial ) {
if ( ( _currentBufferPosition < _chunkSize ) && !writePartial ) { if ( ( _currentBufferPosition < _chunkSize ) && !writePartial ) {
@ -314,7 +321,6 @@ public class GridFSInputFile extends GridFSFile {
private void _finishData() { private void _finishData() {
if (!_savedChunks) { if (!_savedChunks) {
_md5 = Util.toHex( _messageDigester.digest() ); _md5 = Util.toHex( _messageDigester.digest() );
_md5Pool.done( _messageDigester );
_messageDigester = null; _messageDigester = null;
_length = _totalBytes; _length = _totalBytes;
_savedChunks = true; _savedChunks = true;
@ -337,25 +343,6 @@ public class GridFSInputFile extends GridFSFile {
private MessageDigest _messageDigester = null; private MessageDigest _messageDigester = null;
private OutputStream _outputStream = null; private OutputStream _outputStream = null;
/**
* A pool of {@link java.security.MessageDigest} objects.
*/
static SimplePool<MessageDigest> _md5Pool
= new SimplePool<MessageDigest>( "md5" , 10 , -1 , false , false ) {
/**
* {@inheritDoc}
*
* @see com.massivecraft.mcore.xlib.mongodb.util.SimplePool#createNew()
*/
protected MessageDigest createNew() {
try {
return MessageDigest.getInstance( "MD5" );
} catch ( java.security.NoSuchAlgorithmException e ) {
throw new RuntimeException( "your system doesn't have md5!" );
}
}
};
/** /**
* An output stream implementation that can be used to successively write to * An output stream implementation that can be used to successively write to
* a GridFS file. * a GridFS file.

View File

@ -0,0 +1,312 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.tools;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.JMException;
import javax.management.MBeanException;
import javax.management.MBeanServerConnection;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import javax.management.openmbean.CompositeData;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import java.io.CharArrayWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.lang.management.ManagementFactory;
import java.util.Set;
/**
* A simple class that formats Mongo Java driver connection pool statistics in an easily-accessible JSON format.
* It can be used to get statistics on connection pool in the same VM by using the no-args constructor, or in any
* VM by using the constructor that takes an MBeanServerConnection.
* <p>
* This class also exposes a command line interface modeled after mongostat. For usage, run:
* <pre> java -cp mongo.jar com.mongodb.util.management.jmx.ConnectionPoolStat --help}</pre>
*
* @mongodb.driver.manual reference/mongostat mongostat
*
*/
public class ConnectionPoolStat {
/**
* Use the given MBean server connection to access statistics for connection pools.
*
* @param mBeanConnection the MBean server to connect to
*/
public ConnectionPoolStat(MBeanServerConnection mBeanConnection) {
this.mBeanConnection = mBeanConnection;
}
/**
* Use the platform MBean server. This is useful if you want to access statistics
* for connection pools in the same virtual machine.
*
* @see java.lang.management.ManagementFactory#getPlatformMBeanServer()
*/
public ConnectionPoolStat() {
this.mBeanConnection = ManagementFactory.getPlatformMBeanServer();
}
/**
* Gets the statistics for all Mongo connection pools registered with the MBean server used
* by this instance. The format will always be JSON, but the specific JSON fields may change in a
* future release. An example of the output, which should not be taken as a specification:
*
* <pre>
{ pools : [
{ objectName: 'com.mongodb:type=ConnectionPool,host=localhost/127.0.0.1,port=27018,instance=1',
host: 'localhost', port: 27018, maxSize: 10, total: 10, inUse: 3,
inUseConnections: [
{ namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-19', durationMS: 843, localPort: 64062 },
{ namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-1', durationMS: 4331, localPort: 64095 },
{ namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-16', durationMS: 4343, localPort: 64087 }
]
},
{ objectName: 'com.mongodb:type=ConnectionPool,host=localhost/127.0.0.1,port=27017,instance=1',
host: 'localhost', port: 27017, maxSize: 10, total: 10, inUse: 2,
inUseConnections: [
{ namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-5', durationMS: 920, localPort: 64093 },
{ namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-11', durationMS: 1468, localPort: 64068 },
]
}
]
}</pre>
*
* @return JSON-formatted stats for all connection pools registered in JMX
* @throws JMException for any JMX-related exceptions
* @throws IOException for any I/O exceptions
*/
public String getStats() throws JMException, IOException {
CharArrayWriter charArrayWriter = new CharArrayWriter();
PrintWriter printWriter = new PrintWriter(charArrayWriter);
print(printWriter);
return charArrayWriter.toString();
}
/**
* Command line interface for displaying connection pool stats. In order to connect to a remote JMX server to
* get these stats, currently you must set com.sun.management.jmxremote.port system property on the remote server
* and specify that port using the --port argument.
*
* @param args program arguments
* @throws Exception JMX-related exceptions
* @see ConnectionPoolStat#printUsage()
*/
public static void main(String[] args) throws Exception {
String host = "localhost";
int port = -1;
long rowCount = 0;
int sleepTime = 1000;
int pos = 0;
for (; pos < args.length; pos++) {
if (args[pos].equals("--help")) {
printUsage();
System.exit(0);
} else if (args[pos].equals("--host") || args[pos].equals("-h")) {
host = args[++pos];
} else if (args[pos].equals("--port")) {
port = getIntegerArg(args[++pos], "--port");
} else if (args[pos].equals("--rowcount") || args[pos].equals("-n")) {
rowCount = getIntegerArg(args[++pos], "--rowCount");
} else if (args[pos].startsWith("-")) {
printErrorAndUsageAndExit("unknown option " + args[pos]);
}
else {
sleepTime = getIntegerArg(args[pos++], "sleep time") * 1000;
break;
}
}
if (pos != args.length) {
printErrorAndUsageAndExit("too many positional options");
}
if (port == -1 && !host.contains(":")) {
printErrorAndUsageAndExit("port is required");
}
String hostAndPort = (port != -1) ? host + ":" + port : host;
if (rowCount == 0) {
rowCount = Long.MAX_VALUE;
}
JMXServiceURL u = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://" + hostAndPort + "/jmxrmi");
JMXConnector connector = JMXConnectorFactory.connect(u);
MBeanServerConnection mBeanConnection = connector.getMBeanServerConnection();
try {
ConnectionPoolStat printer = new ConnectionPoolStat(mBeanConnection);
for (int i = 0; i < rowCount; i++) {
System.out.println(printer.getStats());
if (i != rowCount - 1) {
Thread.sleep(sleepTime);
}
}
} finally {
connector.close();
}
}
private static int getIntegerArg(String arg, String argName) {
try {
return Integer.parseInt(arg);
} catch (NumberFormatException e) {
printErrorAndUsageAndExit(argName + " arg must be an integer");
}
throw new IllegalStateException();
}
private static void printErrorAndUsageAndExit(final String error) {
System.err.println("ERROR: " + error);
System.out.println();
printUsage();
System.exit(1);
}
private static void printUsage() {
System.out.println("View live MongoDB connection pool statistics from a remote JMX server.");
System.out.println();
System.out.println("usage: java com.mongodb.tools.ConnectionPoolStat [options] [sleep time");
System.out.println("sleep time: time to wait (in seconds) between calls. Defaults to 1");
System.out.println("options:");
System.out.println(" --help produce help message");
System.out.println(" --port arg JMX remote port. Required. Can also use --host hostname:port");
System.out.println(" -h [ --host ] arg JMX remote host. Defaults to localhost");
System.out.println(" -n [ --rowcount ] arg number of times to print stats (0 for indefinite)");
System.out.println();
System.out.println("Fields");
System.out.println(" objectName - name of the JMX bean for this connection pool");
System.out.println(" host - host of the mongod/mongos server");
System.out.println(" port - port of the mongod/mongos server");
System.out.println(" maxSize - max # of connections allowed");
System.out.println(" total - # of connections allocated");
System.out.println(" inUse - # of connections in use");
System.out.println(" inUseConnections - list of all in use connections");
System.out.println(" inUseConnections.namespace - namespace on which connection is operating");
System.out.println(" inUseConnections.opCode - operation connection is executing");
System.out.println(" inUseConnections.query - query the connection is executing (for query/update/remove)");
System.out.println(" inUseConnections.numDocuments - # of documents in the message (mostly relevant for batch inserts)");
System.out.println(" inUseConnections.threadName - name of thread on which connection is executing");
System.out.println(" inUseConnections.durationMS - duration that the operation has been executing so far");
System.out.println(" inUseConnections.localPort - local port of the connection");
}
private void print(PrintWriter pw) throws JMException, IOException {
Set<ObjectName> beanSet = mBeanConnection.queryNames(new ObjectName("com.mongodb:type=ConnectionPool,*"), null);
pw.println("{ pools : [");
int i = 0;
for (ObjectName objectName : beanSet) {
pw.print(" { ");
printAttribute("ObjectName", objectName.toString(), pw);
pw.println();
pw.print(" ");
printAttribute("Host", objectName, pw);
printAttribute("Port", objectName, pw);
printAttribute("MaxSize", objectName, pw);
printStatistics(pw, objectName);
pw.println(" }" + (i == beanSet.size() - 1 ? "" : ","));
i++;
}
pw.println(" ]");
pw.println("}");
}
private void printStatistics(final PrintWriter pw, final ObjectName objectName) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException {
String key = "Statistics";
CompositeData statistics = (CompositeData) mBeanConnection.getAttribute(objectName, key);
printSimpleStatistics(pw, statistics);
printInUseConnections(statistics, pw);
}
private void printSimpleStatistics(final PrintWriter pw, final CompositeData statistics) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException {
printCompositeDataAttribute("total", statistics, pw);
printCompositeDataAttribute("inUse", statistics, pw);
pw.println();
}
private void printInUseConnections(final CompositeData statistics, final PrintWriter pw) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException {
String key = "inUseConnections";
CompositeData[] compositeDataArray = (CompositeData[]) statistics.get(key);
pw.println(" " + getKeyString(key) + ": [");
for (int i = 0; i < compositeDataArray.length; i++) {
CompositeData compositeData = compositeDataArray[i];
pw.print(" { ");
printCompositeDataAttribute("namespace", compositeData, pw);
printCompositeDataAttribute("opCode", compositeData, pw);
printCompositeDataAttribute("query", compositeData, pw, StringType.JSON);
printCompositeDataAttribute("numDocuments", compositeData, pw);
printCompositeDataAttribute("threadName", compositeData, pw);
printCompositeDataAttribute("durationMS", compositeData, pw);
printCompositeDataAttribute("localPort", compositeData, pw, Position.LAST);
pw.println(" }" + (i == compositeDataArray.length -1 ? "" : ", "));
}
pw.println(" ]");
}
private void printCompositeDataAttribute(String key, final CompositeData compositeData, final PrintWriter pw) {
printCompositeDataAttribute(key, compositeData, pw, Position.REGULAR);
}
private void printCompositeDataAttribute(String key, final CompositeData compositeData, final PrintWriter pw, Position position) {
printCompositeDataAttribute(key, compositeData, pw, position, StringType.REGULAR);
}
private void printCompositeDataAttribute(final String key, final CompositeData compositeData, final PrintWriter pw, final StringType stringType) {
printCompositeDataAttribute(key, compositeData, pw, Position.REGULAR, stringType);
}
private void printCompositeDataAttribute(String key, final CompositeData compositeData, final PrintWriter pw, Position position, StringType stringType) {
printAttribute(key, compositeData.get(key), pw, position, stringType);
}
private void printAttribute(final String key, final ObjectName objectName, final PrintWriter pw) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException {
printAttribute(key, mBeanConnection.getAttribute(objectName, key), pw);
}
private void printAttribute(final String key, final Object value, final PrintWriter pw) {
printAttribute(key, value, pw, Position.REGULAR, StringType.REGULAR);
}
private void printAttribute(final String key, final Object value, final PrintWriter pw, Position position, StringType stringType) {
if (value != null ) {
pw.print(getKeyString(key) + ": " + getValueString(value, stringType) + (position == Position.LAST ? "" : ", "));
}
}
private String getKeyString(final String key) {
return Character.toLowerCase(key.charAt(0)) + key.substring(1);
}
private String getValueString(final Object value, final StringType stringType) {
if (value instanceof String && stringType == StringType.REGULAR) {
return "" + "'" + value + "'";
}
return value.toString();
}
enum StringType { REGULAR, JSON }
enum Position { REGULAR, LAST}
private final MBeanServerConnection mBeanConnection;
}

View File

@ -35,54 +35,97 @@ package com.massivecraft.mcore.xlib.mongodb.util;
/** /**
* Provides Base64 encoding and decoding </a>. * Provides Base64 encoding and decoding </a>.
* * <p/>
* <p> * <p>
* This class implements Base64 encoding * This class implements Base64 encoding
* * <p/>
* Thanks to Apache Commons project. This class refactored from org.apache.commons.codec.binary * Thanks to Apache Commons project. This class refactored from org.apache.commons.codec.binary
* * <p/>
* Original Thanks to "commons" project in ws.apache.org for this code. * Original Thanks to "commons" project in ws.apache.org for this code.
* http://svn.apache.org/repos/asf/webservices/commons/trunk/modules/util/ * http://svn.apache.org/repos/asf/webservices/commons/trunk/modules/util/
* </p> * </p>
*
*/ */
public class Base64Codec { public class Base64Codec {
private static int BYTES_PER_UNENCODED_BLOCK = 3; private static final int BYTES_PER_UNENCODED_BLOCK = 3;
private static int BYTES_PER_ENCODED_BLOCK = 4; private static final int BYTES_PER_ENCODED_BLOCK = 4;
/** Mask used to extract 6 bits, used when encoding */ /**
* Mask used to extract 6 bits, used when encoding
*/
private static final int SixBitMask = 0x3f; private static final int SixBitMask = 0x3f;
/** padding char */ /**
* padding char
*/
private static final byte PAD = '='; private static final byte PAD = '=';
/** /**
* This array is a lookup table that translates 6-bit positive integer index values into their "Base64 Alphabet" * This array is a lookup table that translates 6-bit positive integer index values into their "Base64 Alphabet"
* equivalents as specified in Table 1 of RFC 2045. * equivalents as specified in Table 1 of RFC 2045.
*
*/ */
private static final byte[] EncodeTable = { 'A', 'B', 'C', 'D', 'E', 'F', private static final byte[] EncodeTable = {'A', 'B', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5',
'6', '7', '8', '9', '+', '/' }; '6', '7', '8', '9', '+', '/'};
private static final int[] DecodeTable = new int[128];
static {
for (int i = 0; i < EncodeTable.length; i++) {
DecodeTable[EncodeTable[i]] = i;
}
}
/**
* Translates the specified Base64 string into a byte array.
*
* @param s the Base64 string (not null)
* @return the byte array (not null)
*/
public byte[] decode(String s) {
int delta = s.endsWith("==") ? 2 : s.endsWith("=") ? 1 : 0;
byte[] buffer = new byte[s.length() * BYTES_PER_UNENCODED_BLOCK / BYTES_PER_ENCODED_BLOCK - delta];
int mask = 0xFF;
int pos = 0;
for (int i = 0; i < s.length(); i += BYTES_PER_ENCODED_BLOCK) {
int c0 = DecodeTable[s.charAt(i)];
int c1 = DecodeTable[s.charAt(i + 1)];
buffer[pos++] = (byte) (((c0 << 2) | (c1 >> 4)) & mask);
if (pos >= buffer.length) {
return buffer;
}
int c2 = DecodeTable[s.charAt(i + 2)];
buffer[pos++] = (byte) (((c1 << 4) | (c2 >> 2)) & mask);
if (pos >= buffer.length) {
return buffer;
}
int c3 = DecodeTable[s.charAt(i + 3)];
buffer[pos++] = (byte) (((c2 << 6) | c3) & mask);
}
return buffer;
}
/**
* Translates the specified byte array into Base64 string.
*
* @param in the byte array (not null)
* @return the translated Base64 string (not null)
*/
public String encode(byte[] in) { public String encode(byte[] in) {
int modulus = 0; int modulus = 0;
int bitWorkArea = 0; int bitWorkArea = 0;
int numEncodedBytes = (in.length/BYTES_PER_UNENCODED_BLOCK)*BYTES_PER_ENCODED_BLOCK int numEncodedBytes = (in.length / BYTES_PER_UNENCODED_BLOCK) * BYTES_PER_ENCODED_BLOCK
+ ((in.length%BYTES_PER_UNENCODED_BLOCK == 0 )?0:4); + ((in.length % BYTES_PER_UNENCODED_BLOCK == 0) ? 0 : 4);
byte[] buffer = new byte[numEncodedBytes]; byte[] buffer = new byte[numEncodedBytes];
int pos = 0; int pos = 0;
for (int i = 0; i < in.length; i++) { for (int b : in) {
modulus = (modulus+1) % BYTES_PER_UNENCODED_BLOCK; modulus = (modulus + 1) % BYTES_PER_UNENCODED_BLOCK;
int b = in[i];
if (b < 0) if (b < 0)
b += 256; b += 256;
@ -97,19 +140,19 @@ public class Base64Codec {
} }
switch (modulus) { // 0-2 switch (modulus) { // 0-2
case 1 : // 8 bits = 6 + 2 case 1: // 8 bits = 6 + 2
buffer[pos++] = EncodeTable[(bitWorkArea >> 2) & SixBitMask]; // top 6 bits buffer[pos++] = EncodeTable[(bitWorkArea >> 2) & SixBitMask]; // top 6 bits
buffer[pos++] = EncodeTable[(bitWorkArea << 4) & SixBitMask]; // remaining 2 buffer[pos++] = EncodeTable[(bitWorkArea << 4) & SixBitMask]; // remaining 2
buffer[pos++] = PAD; buffer[pos++] = PAD;
buffer[pos++] = PAD; buffer[pos] = PAD; // Last entry no need to ++
break; break;
case 2 : // 16 bits = 6 + 6 + 4 case 2: // 16 bits = 6 + 6 + 4
buffer[pos++] = EncodeTable[(bitWorkArea >> 10) & SixBitMask]; buffer[pos++] = EncodeTable[(bitWorkArea >> 10) & SixBitMask];
buffer[pos++] = EncodeTable[(bitWorkArea >> 4) & SixBitMask]; buffer[pos++] = EncodeTable[(bitWorkArea >> 4) & SixBitMask];
buffer[pos++] = EncodeTable[(bitWorkArea << 2) & SixBitMask]; buffer[pos++] = EncodeTable[(bitWorkArea << 2) & SixBitMask];
buffer[pos++] = PAD; buffer[pos] = PAD; // Last entry no need to ++
break; break;
} }
return new String(buffer); return new String(buffer);

View File

@ -31,6 +31,7 @@ import java.util.List;
* *
* @author breinero * @author breinero
*/ */
@SuppressWarnings({"rawtypes"})
class ClassMapBasedObjectSerializer extends AbstractObjectSerializer { class ClassMapBasedObjectSerializer extends AbstractObjectSerializer {
/** /**
@ -41,8 +42,7 @@ class ClassMapBasedObjectSerializer extends AbstractObjectSerializer {
* This means that it is only necessary assign ObjectSerializers to base classes. @see org.bson.util.ClassMap * This means that it is only necessary assign ObjectSerializers to base classes. @see org.bson.util.ClassMap
* @param serializer performs the serialization mapping specific to the @param key type * @param serializer performs the serialization mapping specific to the @param key type
*/ */
@SuppressWarnings("rawtypes") void addObjectSerializer(Class c, ObjectSerializer serializer) {
void addObjectSerializer(Class c, ObjectSerializer serializer) {
_serializers.put(c , serializer); _serializers.put(c , serializer);
} }

View File

@ -0,0 +1,61 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util;
import com.massivecraft.mcore.xlib.mongodb.InUseConnectionBean;
/**
* A bean representing connection pool statistics.
*/
public class ConnectionPoolStatisticsBean {
private final int total;
private final int inUse;
private final InUseConnectionBean[] inUseConnections;
public ConnectionPoolStatisticsBean(final int total, final int inUse, final InUseConnectionBean[] inUseConnections) {
//To change body of created methods use File | Settings | File Templates.
this.total = total;
this.inUse = inUse;
this.inUseConnections = inUseConnections;
}
/**
* Gets the total number of pool members, including idle and and in-use members.
*
* @return total number of members
*/
public int getTotal() {
return total;
}
/**
* Gets the number of pool members that are currently in use.
*
* @return number of in-use members
*/
public int getInUse() {
return inUse;
}
/**
* Gets an array of beans describing all the connections that are currently in use.
*
* @return array of in-use connection beans
*/
public InUseConnectionBean[] getInUseConnections() {
return inUseConnections;
}
}

View File

@ -28,7 +28,7 @@ import com.massivecraft.mcore.xlib.mongodb.DBObject;
public class JSON { public class JSON {
/** /**
* Serializes an object into it's JSON form. * Serializes an object into its JSON form.
* <p> * <p>
* This method delegates serialization to <code>JSONSerializers.getLegacy</code> * This method delegates serialization to <code>JSONSerializers.getLegacy</code>
* *
@ -43,7 +43,7 @@ public class JSON {
} }
/** /**
* Serializes an object into it's JSON form * Serializes an object into its JSON form.
* <p> * <p>
* This method delegates serialization to <code>JSONSerializers.getLegacy</code> * This method delegates serialization to <code>JSONSerializers.getLegacy</code>
* *

View File

@ -18,6 +18,16 @@
package com.massivecraft.mcore.xlib.mongodb.util; package com.massivecraft.mcore.xlib.mongodb.util;
import com.massivecraft.mcore.xlib.bson.BSON;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.BasicBSONCallback;
import com.massivecraft.mcore.xlib.bson.types.*;
import com.massivecraft.mcore.xlib.mongodb.BasicDBList;
import com.massivecraft.mcore.xlib.mongodb.BasicDBObject;
import com.massivecraft.mcore.xlib.mongodb.DBObject;
import com.massivecraft.mcore.xlib.mongodb.DBRef;
import java.text.ParsePosition; import java.text.ParsePosition;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.Date; import java.util.Date;
@ -26,25 +36,10 @@ import java.util.SimpleTimeZone;
import java.util.UUID; import java.util.UUID;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import com.massivecraft.mcore.xlib.bson.BSON;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.BasicBSONCallback;
import com.massivecraft.mcore.xlib.bson.types.BSONTimestamp;
import com.massivecraft.mcore.xlib.bson.types.Code;
import com.massivecraft.mcore.xlib.bson.types.CodeWScope;
import com.massivecraft.mcore.xlib.bson.types.MaxKey;
import com.massivecraft.mcore.xlib.bson.types.MinKey;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import com.massivecraft.mcore.xlib.mongodb.BasicDBList;
import com.massivecraft.mcore.xlib.mongodb.BasicDBObject;
import com.massivecraft.mcore.xlib.mongodb.DBObject;
import com.massivecraft.mcore.xlib.mongodb.DBRef;
public class JSONCallback extends BasicBSONCallback { public class JSONCallback extends BasicBSONCallback {
@Override @Override
public BSONObject create(){ public BSONObject create() {
return new BasicDBObject(); return new BasicDBObject();
} }
@ -53,104 +48,75 @@ public class JSONCallback extends BasicBSONCallback {
return new BasicDBList(); return new BasicDBList();
} }
public void objectStart(boolean array, String name){ public void objectStart(boolean array, String name) {
_lastArray = array; _lastArray = array;
super.objectStart( array , name ); super.objectStart(array, name);
} }
public Object objectDone(){ public Object objectDone() {
String name = curName(); String name = curName();
Object o = super.objectDone(); Object o = super.objectDone();
BSONObject b = (BSONObject)o; if (_lastArray) {
return o;
}
BSONObject b = (BSONObject) o;
// override the object if it's a special type // override the object if it's a special type
if (!_lastArray) { if (b.containsField("$oid")) {
if (b.containsField("$oid")) { o = new ObjectId((String) b.get("$oid"));
o = new ObjectId((String) b.get("$oid")); } else if (b.containsField("$date")) {
if (!isStackEmpty()) { if (b.get("$date") instanceof Number) {
gotObjectId(name, (ObjectId) o); o = new Date(((Number) b.get("$date")).longValue());
} else { } else {
setRoot(o); SimpleDateFormat format = new SimpleDateFormat(_msDateFormat);
} format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT")));
} else if (b.containsField("$date")) { o = format.parse(b.get("$date").toString(), new ParsePosition(0));
if(b.get("$date") instanceof Number){ if (o == null) {
o = new Date(((Number)b.get("$date")).longValue()); // try older format with no ms
}else { format = new SimpleDateFormat(_secDateFormat);
SimpleDateFormat format = new SimpleDateFormat(_msDateFormat);
format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT"))); format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT")));
o = format.parse(b.get("$date").toString(), new ParsePosition(0)); o = format.parse(b.get("$date").toString(), new ParsePosition(0));
}
}
} else if (b.containsField("$regex")) {
o = Pattern.compile((String) b.get("$regex"),
BSON.regexFlags((String) b.get("$options")));
} else if (b.containsField("$ts")) { //Legacy timestamp format
Integer ts = ((Number) b.get("$ts")).intValue();
Integer inc = ((Number) b.get("$inc")).intValue();
o = new BSONTimestamp(ts, inc);
} else if (b.containsField("$timestamp")) {
BSONObject tsObject = (BSONObject) b.get("$timestamp");
Integer ts = ((Number) tsObject.get("t")).intValue();
Integer inc = ((Number) tsObject.get("i")).intValue();
o = new BSONTimestamp(ts, inc);
} else if (b.containsField("$code")) {
if (b.containsField("$scope")) {
o = new CodeWScope((String) b.get("$code"), (DBObject) b.get("$scope"));
} else {
o = new Code((String) b.get("$code"));
}
} else if (b.containsField("$ref")) {
o = new DBRef(null, (String) b.get("$ref"), b.get("$id"));
} else if (b.containsField("$minKey")) {
o = new MinKey();
} else if (b.containsField("$maxKey")) {
o = new MaxKey();
} else if (b.containsField("$uuid")) {
o = UUID.fromString((String) b.get("$uuid"));
} else if (b.containsField("$binary")) {
int type = (Integer) b.get("$type");
byte[] bytes = (new Base64Codec()).decode((String) b.get("$binary"));
o = new Binary((byte) type, bytes);
}
if (o == null) { if (!isStackEmpty()) {
// try older format with no ms _put(name, o);
format = new SimpleDateFormat(_secDateFormat); } else {
format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT"))); o = !BSON.hasDecodeHooks() ? o : BSON.applyDecodingHooks( o );
o = format.parse(b.get("$date").toString(), new ParsePosition(0)); setRoot(o);
} }
}
if (!isStackEmpty()) {
cur().put(name, o);
} else {
setRoot(o);
}
} else if ( b.containsField( "$regex" ) ) {
o = Pattern.compile( (String)b.get( "$regex" ),
BSON.regexFlags( (String)b.get( "$options" )) );
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
} else if ( b.containsField( "$ts" ) ) {
Long ts = ((Number)b.get("$ts")).longValue();
Long inc = ((Number)b.get("$inc")).longValue();
o = new BSONTimestamp(ts.intValue(), inc.intValue());
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
} else if ( b.containsField( "$code" ) ) {
if (b.containsField("$scope")) {
o = new CodeWScope((String)b.get("$code"), (DBObject)b.get("$scope"));
} else {
o = new Code((String)b.get("$code"));
}
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
} else if ( b.containsField( "$ref" ) ) {
o = new DBRef(null, (String)b.get("$ref"), b.get("$id"));
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
} else if ( b.containsField( "$minKey" ) ) {
o = new MinKey();
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
} else if ( b.containsField( "$maxKey" ) ) {
o = new MaxKey();
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
} else if ( b.containsField( "$uuid" ) ) {
o = UUID.fromString((String)b.get("$uuid"));
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
}
}
return o; return o;
} }

View File

@ -406,8 +406,8 @@ public class JSONSerializers {
public void serialize(Object obj, StringBuilder buf) { public void serialize(Object obj, StringBuilder buf) {
BSONTimestamp t = (BSONTimestamp) obj; BSONTimestamp t = (BSONTimestamp) obj;
BasicDBObject temp = new BasicDBObject(); BasicDBObject temp = new BasicDBObject();
temp.put("$t", Integer.valueOf(t.getTime())); temp.put("t", Integer.valueOf(t.getTime()));
temp.put("$i", Integer.valueOf(t.getInc())); temp.put("i", Integer.valueOf(t.getInc()));
BasicDBObject timestampObj = new BasicDBObject(); BasicDBObject timestampObj = new BasicDBObject();
timestampObj.put("$timestamp", temp); timestampObj.put("$timestamp", temp);
serializer.serialize(timestampObj, buf); serializer.serialize(timestampObj, buf);

View File

@ -88,6 +88,11 @@ public class MyAsserts {
_assertEquals( a , b == null ? null : b.toString() ); _assertEquals( a , b == null ? null : b.toString() );
} }
public static void assertSame(Object a, Object b) {
if ( a != b )
throw new MyAssert( a + " != " + b );
}
public static void assertEquals( Object a , Object b ){ public static void assertEquals( Object a , Object b ){
_assertEquals( a , b ); _assertEquals( a , b );
} }
@ -119,6 +124,13 @@ public class MyAsserts {
throw new MyAssert("These arrays are different, but they might be big so not printing them here"); throw new MyAssert("These arrays are different, but they might be big so not printing them here");
} }
public static void assertArrayEquals(char[] expected, char[] result) {
if (Arrays.equals(expected, result))
return;
throw new MyAssert("These arrays are different, but they might be big so not printing them here");
}
public static void assertNotEquals( Object a , Object b ){ public static void assertNotEquals( Object a , Object b ){
if ( a == null ){ if ( a == null ){
if ( b != null ) if ( b != null )

View File

@ -1,7 +1,5 @@
// SimplePool.java
/** /**
* Copyright (C) 2008 10gen Inc. * Copyright (C) 2008-2012 10gen Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -19,79 +17,47 @@
package com.massivecraft.mcore.xlib.mongodb.util; package com.massivecraft.mcore.xlib.mongodb.util;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.HashSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Set;
import java.util.concurrent.Semaphore; import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import javax.management.Attribute; /**
import javax.management.AttributeList; * This class is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases.
import javax.management.DynamicMBean; */
import javax.management.MBeanAttributeInfo; public abstract class SimplePool<T> {
import javax.management.MBeanInfo;
public abstract class SimplePool<T> implements DynamicMBean {
static final boolean TRACK_LEAKS = Boolean.getBoolean( "MONGO-TRACKLEAKS" );
static final long _sleepTime = 2;
/**
* See full constructor docs
*/
public SimplePool( String name , int maxToKeep , int maxTotal ){
this( name , maxToKeep , maxTotal , false , false );
}
/** Initializes a new pool of objects. /** Initializes a new pool of objects.
* @param name name for the pool * @param name name for the pool
* @param maxToKeep max to hold to at any given time. if < 0 then no limit * @param size max to hold to at any given time. if < 0 then no limit
* @param maxTotal max to have allocated at any point. if there are no more, get() will block
* @param trackLeaks if leaks should be tracked
*/ */
public SimplePool( String name , int maxToKeep , int maxTotal , boolean trackLeaks , boolean debug ){ public SimplePool(String name, int size){
_name = name; _name = name;
_maxToKeep = maxToKeep; _size = size;
_maxTotal = maxTotal; _sem = new Semaphore(size);
_trackLeaks = trackLeaks || TRACK_LEAKS;
_debug = debug;
_mbeanInfo = new MBeanInfo( this.getClass().getName() , _name ,
new MBeanAttributeInfo[]{
new MBeanAttributeInfo( "name" , "java.lang.String" , "name of pool" , true , false , false ) ,
new MBeanAttributeInfo( "size" , "java.lang.Integer" , "total size of pool" , true , false , false ) ,
new MBeanAttributeInfo( "available" , "java.lang.Integer" , "total connections available" , true , false , false ) ,
new MBeanAttributeInfo( "inUse" , "java.lang.Integer" , "number connections in use right now" , true , false , false ) ,
new MBeanAttributeInfo( "everCreated" , "java.lang.Integer" , "number connections ever created" , true , false , false )
} , null , null , null );
} }
/** Creates a new object of this pool's type. /** Creates a new object of this pool's type. Implementations should throw a runtime exception if unable to create.
* @return the new object. * @return the new object.
*/ */
protected abstract T createNew(); protected abstract T createNew();
/** /**
* callback to determine if an object is ok to be added back to the pool or used * override this if you need to do any cleanup
* will be called when something is put back into the queue and when it comes out
* @return true if the object is ok to be added back to pool
*/ */
public boolean ok( T t ){ public void cleanup( T t ) {
return true;
} }
/** /**
* override this if you need to do any cleanup * Pick a member of {@code _avail}. This method is called with a lock held on {@code _avail}, so it may be used safely.
*
* @param recommended the recommended member to choose.
* @param couldCreate true if there is room in the pool to create a new object
* @return >= 0 the one to use, -1 create a new one
*/ */
public void cleanup( T t ){} protected int pick( int recommended , boolean couldCreate ){
return recommended;
/**
* @return >= 0 the one to use, -1 don't use any
*/
protected int pick( int iThink , boolean couldCreate ){
return iThink;
} }
/** /**
@ -100,49 +66,36 @@ public abstract class SimplePool<T> implements DynamicMBean {
* @param t Object to add * @param t Object to add
*/ */
public void done( T t ){ public void done( T t ){
done( t , ok( t ) ); synchronized ( this ) {
if (_closed) {
cleanup(t);
return;
}
assertConditions();
if (!_out.remove(t)) {
throw new RuntimeException("trying to put something back in the pool wasn't checked out");
}
_avail.add(t);
}
_sem.release();
} }
void done( T t , boolean ok ){ private void assertConditions() {
if ( _trackLeaks ){ assert getTotal() <= getMaxSize();
synchronized ( _where ){
_where.remove( _hash( t ) );
}
}
if ( ! ok ){
synchronized ( _avail ){
_all.remove( t );
}
return;
}
synchronized ( _avail ){
if ( _maxToKeep < 0 || _avail.size() < _maxToKeep ){
for ( int i=0; i<_avail.size(); i++ )
if ( _avail.get( i ) == t )
throw new RuntimeException( "trying to put something back in the pool that's already there" );
// if all doesn't contain it, it probably means this was cleared, so we don't want it
if ( _all.contains( t ) ){
_avail.add( t );
_waiting.release();
}
}
else {
cleanup( t );
}
}
} }
public void remove( T t ){ public void remove( T t ) {
done( t , false ); done(t);
} }
/** Gets an object from the pool - will block if none are available /** Gets an object from the pool - will block if none are available
* @return An object from the pool * @return An object from the pool
*/ */
public T get(){ public T get() throws InterruptedException {
return get(-1); return get(-1);
} }
@ -151,210 +104,100 @@ public abstract class SimplePool<T> implements DynamicMBean {
* negative - forever * negative - forever
* 0 - return immediately no matter what * 0 - return immediately no matter what
* positive ms to wait * positive ms to wait
* @return An object from the pool * @return An object from the pool, or null if can't get one in the given waitTime
*/ */
public T get( long waitTime ){ public T get(long waitTime) throws InterruptedException {
final T t = _get( waitTime ); if (!permitAcquired(waitTime)) {
if ( t != null ){ return null;
if ( _trackLeaks ){
Throwable stack = new Throwable();
stack.fillInStackTrace();
synchronized ( _where ){
_where.put( _hash( t ) , stack );
}
}
} }
return t;
}
private int _hash( T t ){ synchronized (this) {
return System.identityHashCode( t ); assertConditions();
}
private T _get( long waitTime ){ int toTake = pick(_avail.size() - 1, getTotal() < getMaxSize());
long totalSlept = 0; T t;
while ( true ){ if (toTake >= 0) {
synchronized ( _avail ){ t = _avail.remove(toTake);
} else {
boolean couldCreate = _maxTotal <= 0 || _all.size() < _maxTotal; t = createNewAndReleasePermitIfFailure();
while ( _avail.size() > 0 ){
int toTake = _avail.size() - 1;
toTake = pick( toTake, couldCreate );
if ( toTake >= 0 ){
T t = _avail.remove( toTake );
if ( ok( t ) ){
_debug( "got an old one" );
return t;
}
_debug( "old one was not ok" );
_all.remove( t );
continue;
}
else if ( ! couldCreate ) {
throw new IllegalStateException( "can't pick nothing if can't create" );
}
break;
}
if ( couldCreate ){
_everCreated++;
T t = createNew();
_all.add( t );
return t;
}
if ( _trackLeaks && _trackPrintCount++ % 200 == 0 ){
_wherePrint();
_trackPrintCount = 1;
}
} }
_out.add(t);
if ( waitTime == 0 ) return t;
return null;
if ( waitTime > 0 && totalSlept >= waitTime )
return null;
long start = System.currentTimeMillis();
try {
_waiting.tryAcquire( _sleepTime , TimeUnit.MILLISECONDS );
}
catch ( InterruptedException ie ){
}
totalSlept += ( System.currentTimeMillis() - start );
} }
} }
private void _wherePrint(){ private T createNewAndReleasePermitIfFailure() {
StringBuilder buf = new StringBuilder( toString() ).append( " waiting \n" ); try {
synchronized ( _where ){ T newMember = createNew();
for ( Throwable t : _where.values() ){ if (newMember == null) {
buf.append( "--\n" ); throw new IllegalStateException("null pool members are not allowed");
final StackTraceElement[] st = t.getStackTrace();
for ( int i=0; i<st.length; i++ )
buf.append( " " ).append( st[i] ).append( "\n" );
buf.append( "----\n" );
} }
return newMember;
} catch (RuntimeException e) {
_sem.release();
throw e;
} catch (Error e) {
_sem.release();
throw e;
}
}
private boolean permitAcquired(final long waitTime) throws InterruptedException {
if (waitTime > 0) {
return _sem.tryAcquire(waitTime, TimeUnit.MILLISECONDS);
} else if (waitTime < 0) {
_sem.acquire();
return true;
} else {
return _sem.tryAcquire();
} }
System.out.println( buf );
} }
/** Clears the pool of all objects. */ /** Clears the pool of all objects. */
protected void clear(){ protected synchronized void close(){
synchronized( _avail ){ _closed = true;
for ( T t : _avail ) for (T t : _avail)
cleanup( t ); cleanup(t);
_avail.clear(); _avail.clear();
_all.clear(); _out.clear();
synchronized ( _where ){
_where.clear(); // is this correct
}
}
} }
public int total(){ public String getName() {
return _all.size(); return _name;
} }
public int inUse(){ public synchronized int getTotal(){
return _all.size() - _avail.size(); return _avail.size() + _out.size();
} }
public Iterator<T> getAll(){ public synchronized int getInUse(){
return _all.getAll().iterator(); return _out.size();
} }
public int available(){ public synchronized int getAvailable(){
if ( _maxTotal <= 0 ) return _avail.size();
throw new IllegalStateException( "this pool has an infinite number of things available" );
return _maxTotal - inUse();
} }
public int everCreated(){ public int getMaxSize(){
return _everCreated; return _size;
} }
private void _debug( String msg ){ public synchronized String toString(){
if( _debug )
System.out.println( "SimplePool [" + _name + "] : " + msg );
}
public int maxToKeep(){
return _maxToKeep;
}
public Object getAttribute(String attribute){
if ( attribute.equals( "name" ) )
return _name;
if ( attribute.equals( "size" ) )
return _maxToKeep;
if ( attribute.equals( "available" ) )
return available();
if ( attribute.equals( "inUse" ) )
return inUse();
if ( attribute.equals( "everCreated" ) )
return _everCreated;
System.err.println( "com.mongo.util.SimplePool unknown attribute: " + attribute );
throw new RuntimeException( "unknown attribute: " + attribute );
}
public AttributeList getAttributes(String[] attributes){
AttributeList l = new AttributeList();
for ( int i=0; i<attributes.length; i++ ){
String name = attributes[i];
l.add( new Attribute( name , getAttribute( name ) ) );
}
return l;
}
public MBeanInfo getMBeanInfo(){
return _mbeanInfo;
}
public Object invoke(String actionName, Object[] params, String[] signature){
throw new RuntimeException( "not allowed to invoke anything" );
}
public void setAttribute(Attribute attribute){
throw new RuntimeException( "not allowed to set anything" );
}
public AttributeList setAttributes(AttributeList attributes){
throw new RuntimeException( "not allowed to set anything" );
}
public String toString(){
StringBuilder buf = new StringBuilder(); StringBuilder buf = new StringBuilder();
buf.append( "pool: " ).append( _name ) buf.append("pool: ").append(_name)
.append( " maxToKeep: " ).append( _maxToKeep ) .append(" maxToKeep: ").append(_size)
.append( " maxTotal: " ).append( _maxToKeep ) .append(" avail ").append(_avail.size())
.append( " where " ).append( _where.size() ) .append(" out ").append(_out.size())
.append( " avail " ).append( _avail.size() )
.append( " all " ).append( _all.size() )
; ;
return buf.toString(); return buf.toString();
} }
protected final String _name; protected final String _name;
protected final int _maxToKeep; protected final int _size;
protected final int _maxTotal;
protected final boolean _trackLeaks;
protected final boolean _debug;
protected final MBeanInfo _mbeanInfo;
private final List<T> _avail = new ArrayList<T>();
protected final List<T> _availSafe = Collections.unmodifiableList( _avail );
private final WeakBag<T> _all = new WeakBag<T>();
private final Map<Integer,Throwable> _where = new HashMap<Integer,Throwable>();
private final Semaphore _waiting = new Semaphore(0);
private int _everCreated = 0;
private int _trackPrintCount = 0;
protected final List<T> _avail = new ArrayList<T>();
protected final Set<T> _out = new HashSet<T>();
private final Semaphore _sem;
private boolean _closed;
} }

View File

@ -22,11 +22,9 @@ public class StringBuilderPool extends SimplePool<StringBuilder> {
/** Initializes a pool of a given number of StringBuilders, each of a certain size. /** Initializes a pool of a given number of StringBuilders, each of a certain size.
* @param maxToKeep the number of string builders in the pool * @param maxToKeep the number of string builders in the pool
* @param maxSize the size of each string builder
*/ */
public StringBuilderPool( String name , int maxToKeep , int maxSize ){ public StringBuilderPool( String name , int maxToKeep ){
super( "StringBuilderPool-" + name , maxToKeep , -1 ); super( "StringBuilderPool-" + name , maxToKeep );
_maxSize = maxSize;
} }
/** Create a new string builder. /** Create a new string builder.
@ -41,7 +39,7 @@ public class StringBuilderPool extends SimplePool<StringBuilder> {
* @return if it is not too big * @return if it is not too big
*/ */
public boolean ok( StringBuilder buf ){ public boolean ok( StringBuilder buf ){
if ( buf.length() > _maxSize ) if ( buf.length() > getMaxSize() )
return false; return false;
buf.setLength( 0 ); buf.setLength( 0 );
return true; return true;
@ -50,6 +48,4 @@ public class StringBuilderPool extends SimplePool<StringBuilder> {
protected long memSize( StringBuilder buf ){ protected long memSize( StringBuilder buf ){
return buf.length() * 2; return buf.length() * 2;
} }
final int _maxSize;
} }

View File

@ -24,6 +24,12 @@ import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
/**
* A thread pool implementation.
*
* @deprecated This class is no longer in use and should not have been public. It may be removed in a future release.
*/
@Deprecated
public abstract class ThreadPool<T> { public abstract class ThreadPool<T> {
/** Initializes a new thread pool with a given name and number of threads. /** Initializes a new thread pool with a given name and number of threads.

View File

@ -26,7 +26,7 @@ import java.util.List;
/** /**
* if its not obvious what a weak bag should do, then, well... * if its not obvious what a weak bag should do, then, well...
* very very not thead safe * very very not thread safe
*/ */
public class WeakBag<T> implements Iterable<T> { public class WeakBag<T> implements Iterable<T> {

View File

@ -0,0 +1,29 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util.management;
/**
*
* This class is NOT part of the public API. It may change at any time without notification.
*/
public class JMException extends Exception {
static final long serialVersionUID = -2052972874393271421L;
public JMException(Throwable cause) {
super(cause);
}
}

View File

@ -0,0 +1,28 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util.management;
/**
* This class is NOT part of the public API. It may change at any time without notification.
*/
public interface MBeanServer {
boolean isRegistered(String mBeanName) throws JMException;
void unregisterMBean(String mBeanName) throws JMException;
void registerMBean(Object mBean, String mBeanName) throws JMException;
}

View File

@ -0,0 +1,43 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util.management;
import com.massivecraft.mcore.xlib.mongodb.util.management.jmx.JMXMBeanServer;
/**
* This class is NOT part of the public API. It may change at any time without notification.
*
* This class is used to insulate the rest of the driver from the possibility that JMX is not available,
* as currently is the case on Android VM
*/
public class MBeanServerFactory {
static {
MBeanServer tmp;
try {
tmp = new JMXMBeanServer();
} catch (Throwable e) {
tmp = new NullMBeanServer();
}
mBeanServer = tmp;
}
public static MBeanServer getMBeanServer() {
return mBeanServer;
}
private static final MBeanServer mBeanServer;
}

View File

@ -0,0 +1,35 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util.management;
/**
* This class is NOT part of the public API. It may change at any time without notification.
*/
public class NullMBeanServer implements MBeanServer {
@Override
public boolean isRegistered(String mBeanName) {
return false;
}
@Override
public void unregisterMBean(String mBeanName) {
}
@Override
public void registerMBean(Object mBean, String mBeanName) {
}
}

View File

@ -0,0 +1,67 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util.management.jmx;
import com.massivecraft.mcore.xlib.mongodb.util.management.JMException;
import com.massivecraft.mcore.xlib.mongodb.util.management.MBeanServer;
import javax.management.*;
import java.lang.management.ManagementFactory;
/**
* This class is NOT part of the public API. It may change at any time without notification.
*/
public class JMXMBeanServer implements MBeanServer {
@Override
public boolean isRegistered(String mBeanName) throws JMException {
return server.isRegistered(createObjectName(mBeanName));
}
@Override
public void unregisterMBean(String mBeanName) throws JMException {
try {
server.unregisterMBean(createObjectName(mBeanName));
} catch (InstanceNotFoundException e) {
throw new JMException(e);
} catch (MBeanRegistrationException e) {
throw new JMException(e);
}
}
@Override
public void registerMBean(Object mBean, String mBeanName) throws JMException {
try {
server.registerMBean(mBean, createObjectName(mBeanName));
} catch (InstanceAlreadyExistsException e) {
throw new JMException(e);
} catch (MBeanRegistrationException e) {
throw new JMException(e);
} catch (NotCompliantMBeanException e) {
throw new JMException(e);
}
}
private ObjectName createObjectName(String mBeanName) throws JMException {
try {
return new ObjectName(mBeanName);
} catch (MalformedObjectNameException e) {
throw new JMException(e);
}
}
private final javax.management.MBeanServer server = ManagementFactory.getPlatformMBeanServer();
}