Update MongoDB Java driver to 2.11.1

This commit is contained in:
Olof Larsson 2013-05-01 11:06:39 +02:00
parent bc92d94f89
commit 41e581ee71
100 changed files with 7991 additions and 2555 deletions

View File

@ -14,7 +14,6 @@ import org.bukkit.inventory.PlayerInventory;
import com.massivecraft.mcore.adapter.InventoryAdapter;
import com.massivecraft.mcore.adapter.ItemStackAdapter;
import com.massivecraft.mcore.adapter.MongoURIAdapter;
import com.massivecraft.mcore.adapter.ObjectIdAdapter;
import com.massivecraft.mcore.adapter.PlayerInventoryAdapter;
import com.massivecraft.mcore.adapter.UUIDAdapter;
@ -28,6 +27,7 @@ import com.massivecraft.mcore.ps.PS;
import com.massivecraft.mcore.ps.PSAdapter;
import com.massivecraft.mcore.store.Coll;
import com.massivecraft.mcore.store.Db;
import com.massivecraft.mcore.store.ExamineThread;
import com.massivecraft.mcore.store.MStore;
import com.massivecraft.mcore.usys.Aspect;
import com.massivecraft.mcore.usys.AspectColl;
@ -40,7 +40,6 @@ import com.massivecraft.mcore.util.TimeUnit;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import com.massivecraft.mcore.xlib.gson.Gson;
import com.massivecraft.mcore.xlib.gson.GsonBuilder;
import com.massivecraft.mcore.xlib.mongodb.MongoURI;
public class MCore extends MPlugin
{
@ -72,7 +71,6 @@ public class MCore extends MPlugin
.setPrettyPrinting()
.disableHtmlEscaping()
.excludeFieldsWithModifiers(Modifier.TRANSIENT)
.registerTypeAdapter(MongoURI.class, MongoURIAdapter.get())
.registerTypeAdapter(ObjectId.class, ObjectIdAdapter.get())
.registerTypeAdapter(UUID.class, UUIDAdapter.get())
.registerTypeAdapter(ItemStack.class, ItemStackAdapter.get())
@ -121,6 +119,9 @@ public class MCore extends MPlugin
// Note this one must be before preEnable. dooh.
Coll.instances.clear();
// Start the examine thread
ExamineThread.get().start();
if ( ! preEnable()) return;
// Load Server Config
@ -217,4 +218,11 @@ public class MCore extends MPlugin
}
}
@Override
public void onDisable()
{
super.onDisable();
ExamineThread.get().interrupt();
}
}

View File

@ -56,7 +56,6 @@ public abstract class MPlugin extends JavaPlugin implements Listener
for (Coll<?> coll : Coll.instances)
{
if (coll.getPlugin() != this) continue;
coll.examineThread().interrupt();
coll.syncAll(); // TODO: Save outwards only? We may want to avoid loads at this stage...
Coll.instances.remove(coll);
}

View File

@ -1,53 +0,0 @@
package com.massivecraft.mcore.adapter;
import java.lang.reflect.Type;
import com.massivecraft.mcore.xlib.gson.JsonDeserializationContext;
import com.massivecraft.mcore.xlib.gson.JsonDeserializer;
import com.massivecraft.mcore.xlib.gson.JsonElement;
import com.massivecraft.mcore.xlib.gson.JsonParseException;
import com.massivecraft.mcore.xlib.gson.JsonPrimitive;
import com.massivecraft.mcore.xlib.gson.JsonSerializationContext;
import com.massivecraft.mcore.xlib.gson.JsonSerializer;
import com.massivecraft.mcore.xlib.mongodb.MongoURI;
public class MongoURIAdapter implements JsonDeserializer<MongoURI>, JsonSerializer<MongoURI>
{
// -------------------------------------------- //
// INSTANCE & CONSTRUCT
// -------------------------------------------- //
protected static MongoURIAdapter i = new MongoURIAdapter();
public static MongoURIAdapter get() { return i; }
// -------------------------------------------- //
// OVERRIDE
// -------------------------------------------- //
@Override
public JsonElement serialize(MongoURI mongoURI, Type typeOfSrc, JsonSerializationContext context)
{
return serialize(mongoURI);
}
@Override
public MongoURI deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) throws JsonParseException
{
return deserialize(json);
}
// -------------------------------------------- //
// STATIC LOGIC
// -------------------------------------------- //
public static JsonElement serialize(MongoURI mongoURI)
{
return new JsonPrimitive(mongoURI.toString());
}
public static MongoURI deserialize(JsonElement json)
{
return new MongoURI(json.getAsString());
}
}

View File

@ -16,7 +16,6 @@ import org.bukkit.plugin.Plugin;
import com.massivecraft.mcore.MCore;
import com.massivecraft.mcore.MPlugin;
import com.massivecraft.mcore.NaturalOrderComparator;
import com.massivecraft.mcore.Predictate;
import com.massivecraft.mcore.store.accessor.Accessor;
import com.massivecraft.mcore.store.idstrategy.IdStrategy;
@ -599,9 +598,6 @@ public class Coll<E> implements CollInterface<E>
this.syncSuspects();
}
protected ExamineThread<E> examineThread;
@Override public Thread examineThread() { return this.examineThread; }
// -------------------------------------------- //
// CONSTRUCT
// -------------------------------------------- //
@ -637,11 +633,6 @@ public class Coll<E> implements CollInterface<E>
}
this.collDriverObject = db.getCollDriverObject(this);
if (idComparator == null)
{
idComparator = NaturalOrderComparator.get();
}
// STORAGE
this.id2entity = new ConcurrentSkipListMap<String, E>(idComparator);
this.entity2id = new ConcurrentSkipListMap<E, String>(entityComparator);
@ -678,8 +669,6 @@ public class Coll<E> implements CollInterface<E>
{
if (this.inited()) return;
this.syncAll();
this.examineThread = new ExamineThread<E>(this);
this.examineThread.start();
instances.add(this);
}

View File

@ -140,8 +140,6 @@ public interface CollInterface<E>
public Runnable getTickTask();
public void onTick();
public Thread examineThread();
// -------------------------------------------- //
// CONSTRUCT
// -------------------------------------------- //

View File

@ -107,8 +107,8 @@ public class DriverGson extends DriverAbstract<JsonElement>
// Scan the collection folder for .json files
File collDir = getCollDir(coll);
if ( ! collDir.isDirectory()) return ret;
for(File file : collDir.listFiles(JsonFileFilter.get()))
if (!collDir.isDirectory()) return ret;
for (File file : collDir.listFiles(JsonFileFilter.get()))
{
ret.put(idFromFile(file), file.lastModified());
}

View File

@ -17,12 +17,13 @@ import com.massivecraft.mcore.xlib.mongodb.BasicDBObject;
import com.massivecraft.mcore.xlib.mongodb.DB;
import com.massivecraft.mcore.xlib.mongodb.DBCollection;
import com.massivecraft.mcore.xlib.mongodb.DBCursor;
import com.massivecraft.mcore.xlib.mongodb.MongoURI;
import com.massivecraft.mcore.xlib.mongodb.MongoClient;
import com.massivecraft.mcore.xlib.mongodb.MongoClientURI;
public class DriverMongo extends DriverAbstract<BasicDBObject>
{
// -------------------------------------------- //
// STATIC
// CONSTANTS
// -------------------------------------------- //
public final static String ID_FIELD = "_id";
@ -33,6 +34,33 @@ public class DriverMongo extends DriverAbstract<BasicDBObject>
public final static BasicDBObject dboKeysMtime = new BasicDBObject().append(MTIME_FIELD, 1);
public final static BasicDBObject dboKeysIdandMtime = new BasicDBObject().append(ID_FIELD, 1).append(MTIME_FIELD, 1);
//----------------------------------------------//
// CONSTRUCT
//----------------------------------------------//
private DriverMongo()
{
super("mongodb");
}
// -------------------------------------------- //
// INSTANCE
// -------------------------------------------- //
protected static DriverMongo instance;
public static DriverMongo get()
{
return instance;
}
static
{
instance = new DriverMongo();
instance.registerIdStrategy(IdStrategyOid.get());
instance.registerIdStrategy(IdStrategyUuid.get());
}
// -------------------------------------------- //
// IMPLEMENTATION
// -------------------------------------------- //
@ -184,11 +212,14 @@ public class DriverMongo extends DriverAbstract<BasicDBObject>
protected DB getDbInner(String uri)
{
MongoURI muri = new MongoURI(uri);
MongoClientURI muri = new MongoClientURI(uri);
try
{
DB db = muri.connectDB();
// TODO: Create one of these per collection? Really? Perhaps I should cache.
MongoClient mongoClient = new MongoClient(muri);
DB db = mongoClient.getDB(muri.getDatabase());
if (muri.getUsername() == null) return db;
@ -207,29 +238,4 @@ public class DriverMongo extends DriverAbstract<BasicDBObject>
}
}
//----------------------------------------------//
// CONSTRUCTORS
//----------------------------------------------//
private DriverMongo()
{
super("mongodb");
}
// -------------------------------------------- //
// INSTANCE
// -------------------------------------------- //
protected static DriverMongo instance;
public static DriverMongo get()
{
return instance;
}
static
{
instance = new DriverMongo();
instance.registerIdStrategy(IdStrategyOid.get());
instance.registerIdStrategy(IdStrategyUuid.get());
}
}

View File

@ -1,31 +1,56 @@
package com.massivecraft.mcore.store;
public class ExamineThread<E> extends Thread
public class ExamineThread extends Thread
{
protected Coll<E> coll;
// -------------------------------------------- //
// INSTANCE
// -------------------------------------------- //
public ExamineThread(Coll<E> coll)
private static ExamineThread i = null;
public static ExamineThread get()
{
this.coll = coll;
this.setName("ExamineThread for "+coll.getName());
if (i == null || !i.isAlive()) i = new ExamineThread();
return i;
}
// TODO: Implement logging and/or auto adjusting system for how long the sleep should be?
// -------------------------------------------- //
// CONSTRUCT
// -------------------------------------------- //
public ExamineThread()
{
this.setName("MStore ExamineThread");
}
// -------------------------------------------- //
// FIELDS
// -------------------------------------------- //
private long lastDurationMillis = 0;
public long getLastDurationMillis() { return this.lastDurationMillis; }
// -------------------------------------------- //
// OVERRIDE
// -------------------------------------------- //
@Override
public void run()
{
while(true)
while (true)
{
try
{
//long before = System.currentTimeMillis();
long before = System.currentTimeMillis();
for (Coll<?> coll: Coll.instances)
{
coll.findSuspects();
}
long after = System.currentTimeMillis();
long duration = after-before;
this.lastDurationMillis = duration;
//long after = System.currentTimeMillis();
//coll.mplugin().log(this.getName()+ " complete. Took "+ (after-before) +"ms.");
//String message = Txt.parse("<i>ExamineThread iteration took <h>%dms<i>.", after-before);
//MCore.get().log(message);
Thread.sleep(5000);
}

View File

@ -28,6 +28,7 @@ import java.util.regex.Pattern;
import com.massivecraft.mcore.xlib.bson.util.ClassMap;
@SuppressWarnings({"rawtypes"})
public class BSON {
static final Logger LOGGER = Logger.getLogger( "org.bson.BSON" );
@ -173,7 +174,6 @@ public class BSON {
public static boolean hasDecodeHooks() { return _decodeHooks; }
@SuppressWarnings("rawtypes")
public static void addEncodingHook( Class c , Transformer t ){
_encodeHooks = true;
List<Transformer> l = _encodingHooks.get( c );
@ -184,7 +184,6 @@ public class BSON {
l.add( t );
}
@SuppressWarnings("rawtypes")
public static void addDecodingHook( Class c , Transformer t ){
_decodeHooks = true;
List<Transformer> l = _decodingHooks.get( c );
@ -223,7 +222,6 @@ public class BSON {
* Returns the encoding hook(s) associated with the specified class
*
*/
@SuppressWarnings("rawtypes")
public static List<Transformer> getEncodingHooks( Class c ){
return _encodingHooks.get( c );
}
@ -239,7 +237,6 @@ public class BSON {
/**
* Remove all encoding hooks for a specific class.
*/
@SuppressWarnings("rawtypes")
public static void removeEncodingHooks( Class c ){
_encodingHooks.remove( c );
}
@ -247,7 +244,6 @@ public class BSON {
/**
* Remove a specific encoding hook for a specific class.
*/
@SuppressWarnings("rawtypes")
public static void removeEncodingHook( Class c , Transformer t ){
getEncodingHooks( c ).remove( t );
}
@ -255,7 +251,6 @@ public class BSON {
/**
* Returns the decoding hook(s) associated with the specific class
*/
@SuppressWarnings("rawtypes")
public static List<Transformer> getDecodingHooks( Class c ){
return _decodingHooks.get( c );
}
@ -271,7 +266,6 @@ public class BSON {
/**
* Remove all decoding hooks for a specific class.
*/
@SuppressWarnings("rawtypes")
public static void removeDecodingHooks( Class c ){
_decodingHooks.remove( c );
}
@ -279,7 +273,6 @@ public class BSON {
/**
* Remove a specific encoding hook for a specific class.
*/
@SuppressWarnings("rawtypes")
public static void removeDecodingHook( Class c , Transformer t ){
getDecodingHooks( c ).remove( t );
}

View File

@ -24,6 +24,7 @@ import java.util.Set;
/**
* A key-value map that can be saved to the database.
*/
@SuppressWarnings({"rawtypes"})
public interface BSONObject {
/**
@ -44,7 +45,6 @@ public interface BSONObject {
* Sets all key/value pairs from a map into this object
* @param m the map
*/
@SuppressWarnings("rawtypes")
public void putAll( Map m );
/**
@ -58,7 +58,6 @@ public interface BSONObject {
* Returns a map representing this BSONObject.
* @return the map
*/
@SuppressWarnings("rawtypes")
public Map toMap();
/**

View File

@ -26,6 +26,7 @@ import com.massivecraft.mcore.xlib.bson.types.ObjectId;
/**
* Basic implementation of BSONDecoder interface that creates BasicBSONObject instances
*/
@SuppressWarnings({"unused"})
public class BasicBSONDecoder implements BSONDecoder {
public BSONObject readObject( byte[] b ){
try {
@ -505,7 +506,6 @@ public class BasicBSONDecoder implements BSONDecoder {
private static final String DEFAULT_ENCODING = "UTF-8";
@SuppressWarnings("unused")
private static final boolean _isAscii( final byte b ){
return b >=0 && b <= 127;
}

View File

@ -72,7 +72,7 @@ import com.massivecraft.mcore.xlib.mongodb.DBRefBase;
* this is meant to be pooled or cached
* there is some per instance memory for string conversion, etc...
*/
@SuppressWarnings("unchecked")
@SuppressWarnings({"unchecked", "rawtypes", "unused"})
public class BasicBSONEncoder implements BSONEncoder {
static final boolean DEBUG = false;
@ -123,7 +123,6 @@ public class BasicBSONEncoder implements BSONEncoder {
/**
* this is really for embedded objects
*/
@SuppressWarnings("rawtypes")
protected int putObject( String name , BSONObject o ){
if ( o == null )
@ -196,7 +195,6 @@ public class BasicBSONEncoder implements BSONEncoder {
return _buf.getPosition() - start;
}
@SuppressWarnings("rawtypes")
protected void _putObjectField( String name , Object val ){
if ( name.equals( "_transientFields" ) )
@ -287,7 +285,6 @@ public class BasicBSONEncoder implements BSONEncoder {
_buf.writeInt( sizePos , _buf.getPosition() - sizePos );
}
@SuppressWarnings("rawtypes")
private void putIterable( String name , Iterable l ){
_put( ARRAY , name );
final int sizePos = _buf.getPosition();
@ -304,7 +301,6 @@ public class BasicBSONEncoder implements BSONEncoder {
_buf.writeInt( sizePos , _buf.getPosition() - sizePos );
}
@SuppressWarnings("rawtypes")
private void putMap( String name , Map m ){
_put( OBJECT , name );
final int sizePos = _buf.getPosition();
@ -341,7 +337,6 @@ public class BasicBSONEncoder implements BSONEncoder {
_buf.writeInt( temp , _buf.getPosition() - temp );
}
@SuppressWarnings("unused")
protected void putCode( String name , Code code ){
_put( CODE , name );
int temp = _buf.getPosition();

View File

@ -36,6 +36,7 @@ import java.util.regex.Pattern;
* obj.put( "foo", "bar" );
* </pre></blockquote>
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public class BasicBSONObject extends LinkedHashMap<String,Object> implements BSONObject {
private static final long serialVersionUID = -4415279469780082174L;
@ -63,7 +64,6 @@ public class BasicBSONObject extends LinkedHashMap<String,Object> implements BSO
* Creates a DBObject from a map.
* @param m map to convert
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public BasicBSONObject(Map m) {
super(m);
}
@ -72,7 +72,6 @@ public class BasicBSONObject extends LinkedHashMap<String,Object> implements BSO
* Converts a DBObject to a map.
* @return the DBObject
*/
@SuppressWarnings("rawtypes")
public Map toMap() {
return new LinkedHashMap<String,Object>(this);
}
@ -282,7 +281,6 @@ public class BasicBSONObject extends LinkedHashMap<String,Object> implements BSO
return super.put( key , val );
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void putAll( Map m ){
for ( Map.Entry entry : (Set<Map.Entry>)m.entrySet() ){
put( entry.getKey().toString() , entry.getValue() );

View File

@ -25,6 +25,7 @@ import com.massivecraft.mcore.xlib.mongodb.LazyDBObject;
/**
*
*/
@SuppressWarnings({"rawtypes", "unused"})
public class LazyBSONCallback extends EmptyBSONCallback {
public void objectStart(){
@ -63,7 +64,6 @@ public class LazyBSONCallback extends EmptyBSONCallback {
return new LazyDBObject( data, offset, this );
}
@SuppressWarnings("rawtypes")
public List createArray( byte[] data, int offset ){
return new LazyDBList( data, offset, this );
}
@ -83,6 +83,5 @@ public class LazyBSONCallback extends EmptyBSONCallback {
}
}*/
private Object _root;
@SuppressWarnings("unused")
private static final Logger log = Logger.getLogger( "org.bson.LazyBSONCallback" );
}

View File

@ -30,6 +30,7 @@ import java.util.regex.Pattern;
* @author scotthernandez
* @author Kilroy Wuz Here
*/
@SuppressWarnings({"unchecked", "rawtypes", "unused"})
public class LazyBSONObject implements BSONObject {
public LazyBSONObject( byte[] data, LazyBSONCallback callback ){
@ -135,7 +136,6 @@ public class LazyBSONObject implements BSONObject {
return toArray(a);
}
@SuppressWarnings( "unchecked" )
@Override
public <T> T[] toArray(T[] a) {
int size = size();
@ -203,7 +203,6 @@ public class LazyBSONObject implements BSONObject {
throw new UnsupportedOperationException("Read only");
}
@SuppressWarnings("rawtypes")
@Override
public boolean equals(Object o) {
if (!(o instanceof Map.Entry))
@ -269,14 +268,12 @@ public class LazyBSONObject implements BSONObject {
return new LazyBSONEntryIterator();
}
@SuppressWarnings("rawtypes")
@Override
public Object[] toArray() {
Map.Entry[] array = new Map.Entry[size()];
return toArray(array);
}
@SuppressWarnings( "unchecked" )
@Override
public <T> T[] toArray(T[] a) {
int size = size();
@ -340,7 +337,6 @@ public class LazyBSONObject implements BSONObject {
throw new UnsupportedOperationException( "Object is read only" );
}
@SuppressWarnings("rawtypes")
public void putAll( Map m ){
throw new UnsupportedOperationException( "Object is read only" );
}
@ -401,7 +397,6 @@ public class LazyBSONObject implements BSONObject {
return elements;
}
@SuppressWarnings("rawtypes")
public Map toMap(){
throw new UnsupportedOperationException( "Not Supported" );
}
@ -686,6 +681,5 @@ public class LazyBSONObject implements BSONObject {
protected final BSONByteBuffer _input; // TODO - Guard this with synchronicity?
// callback is kept to create sub-objects on the fly
protected final LazyBSONCallback _callback;
@SuppressWarnings("unused")
private static final Logger log = Logger.getLogger( "org.bson.LazyBSONObject" );
}

View File

@ -12,8 +12,8 @@ import com.massivecraft.mcore.xlib.mongodb.util.JSON;
* @author scotthernandez
*
*/
@SuppressWarnings({"unused"})
public class LazyDBList extends LazyBSONList implements DBObject {
@SuppressWarnings("unused")
private static final long serialVersionUID = -4415279469780082174L;
public LazyDBList(byte[] data, LazyBSONCallback callback) { super(data, callback); }

View File

@ -18,7 +18,6 @@
package com.massivecraft.mcore.xlib.bson.io;
import java.io.*;
import java.util.*;

View File

@ -27,7 +27,7 @@ import java.util.Date;
* <b>time</b> is seconds since epoch
* <b>inc<b> is an ordinal
*/
public class BSONTimestamp implements Serializable {
public class BSONTimestamp implements Comparable<BSONTimestamp>, Serializable {
private static final long serialVersionUID = -3268482672267936464L;
@ -60,6 +60,25 @@ public class BSONTimestamp implements Serializable {
return "TS time:" + _time + " inc:" + _inc;
}
@Override
public int compareTo(BSONTimestamp ts) {
if(getTime() != ts.getTime()) {
return getTime() - ts.getTime();
}
else{
return getInc() - ts.getInc();
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + _inc;
result = prime * result + getTime();
return result;
}
@Override
public boolean equals(Object obj) {
if (obj == this)
@ -73,4 +92,5 @@ public class BSONTimestamp implements Serializable {
final int _inc;
final Date _time;
}

View File

@ -48,6 +48,7 @@ import java.util.*;
* </pre></blockquote>
* </p>
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public class BasicBSONList extends ArrayList<Object> implements BSONObject {
private static final long serialVersionUID = -4415279469780082174L;
@ -80,7 +81,6 @@ public class BasicBSONList extends ArrayList<Object> implements BSONObject {
return v;
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void putAll( Map m ){
for ( Map.Entry entry : (Set<Map.Entry>)m.entrySet() ){
put( entry.getKey().toString() , entry.getValue() );
@ -137,7 +137,6 @@ public class BasicBSONList extends ArrayList<Object> implements BSONObject {
return new StringRangeSet(size());
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public Map toMap() {
Map m = new HashMap();
Iterator i = this.keySet().iterator();

View File

@ -24,9 +24,10 @@ package com.massivecraft.mcore.xlib.bson.types;
import com.massivecraft.mcore.xlib.bson.BSON;
import java.io.Serializable;
import java.util.Arrays;
/**
generic binary holder
* generic binary holder
*/
public class Binary implements Serializable {
@ -34,34 +35,64 @@ public class Binary implements Serializable {
/**
* Creates a Binary object with the default binary type of 0
*
* @param data raw data
*/
public Binary( byte[] data ){
public Binary(byte[] data) {
this(BSON.B_GENERAL, data);
}
/**
* Creates a Binary object
*
* @param type type of the field as encoded in BSON
* @param data raw data
*/
public Binary( byte type , byte[] data ){
public Binary(byte type, byte[] data) {
_type = type;
_data = data;
}
public byte getType(){
public byte getType() {
return _type;
}
public byte[] getData(){
public byte[] getData() {
return _data;
}
public int length(){
public int length() {
return _data.length;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Binary)) {
return false;
}
Binary binary = (Binary) o;
if (_type != binary._type) {
return false;
}
if (!Arrays.equals(_data, binary._data)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = (int) _type;
result = 31 * result + (_data != null ? Arrays.hashCode(_data) : 0);
return result;
}
final byte _type;
final byte[] _data;
}

View File

@ -0,0 +1,58 @@
package com.massivecraft.mcore.xlib.mongodb;
public class AggregationOutput {
/**
* returns an iterator to the results of the aggregation
* @return
*/
public Iterable<DBObject> results() {
return _resultSet;
}
/**
* returns the command result of the aggregation
* @return
*/
public CommandResult getCommandResult(){
return _commandResult;
}
/**
* returns the original aggregation command
* @return
*/
public DBObject getCommand() {
return _cmd;
}
/**
* returns the address of the server used to execute the aggregation
* @return
*/
public ServerAddress getServerUsed() {
return _commandResult.getServerUsed();
}
/**
* string representation of the aggregation command
*/
public String toString(){
return _commandResult.toString();
}
@SuppressWarnings("unchecked")
public AggregationOutput(DBObject cmd, CommandResult raw) {
_commandResult = raw;
_cmd = cmd;
if(raw.containsField("result"))
_resultSet = (Iterable<DBObject>) raw.get( "result" );
else
throw new IllegalArgumentException("result undefined");
}
protected final CommandResult _commandResult;
protected final DBObject _cmd;
protected final Iterable<DBObject> _resultSet;
}

View File

@ -18,12 +18,12 @@
package com.massivecraft.mcore.xlib.mongodb;
import java.util.Map;
import com.massivecraft.mcore.xlib.bson.BasicBSONObject;
import com.massivecraft.mcore.xlib.mongodb.util.JSON;
import java.util.Map;
/**
* a basic implementation of bson object that is mongo specific.
* A <code>DBObject</code> can be created as follows, using this class:
@ -32,6 +32,7 @@ import com.massivecraft.mcore.xlib.mongodb.util.JSON;
* obj.put( "foo", "bar" );
* </pre></blockquote>
*/
@SuppressWarnings({"rawtypes"})
public class BasicDBObject extends BasicBSONObject implements DBObject {
private static final long serialVersionUID = -4415279469780082174L;
@ -63,7 +64,6 @@ public class BasicDBObject extends BasicBSONObject implements DBObject {
* Creates an object from a map.
* @param m map to convert
*/
@SuppressWarnings("rawtypes")
public BasicDBObject(Map m) {
super(m);
}
@ -106,5 +106,5 @@ public class BasicDBObject extends BasicBSONObject implements DBObject {
return newobj;
}
private boolean _isPartialObject = false;
private boolean _isPartialObject;
}

View File

@ -27,6 +27,7 @@ import java.util.Map;
* example:
* BasicDBObjectBuilder.start().add( "name" , "eliot" ).add( "number" , 17 ).get()
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public class BasicDBObjectBuilder {
/**
@ -59,7 +60,6 @@ public class BasicDBObjectBuilder {
* @param m map to use
* @return the new builder
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public static BasicDBObjectBuilder start(Map m){
BasicDBObjectBuilder b = new BasicDBObjectBuilder();
Iterator<Map.Entry> i = m.entrySet().iterator();

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* An exception indicating a failed command.
*/
public class CommandFailureException extends MongoException {
private static final long serialVersionUID = -1180715413196161037L;
private final CommandResult commandResult;
/**
* Construct a new instance with the CommandResult from a failed command
*
* @param commandResult the result
*/
public CommandFailureException(CommandResult commandResult){
super(ServerError.getCode(commandResult), commandResult.toString());
this.commandResult = commandResult;
}
/**
* Gets the getlasterror command result document.
*
* @return the command result
*/
public CommandResult getCommandResult() {
return commandResult;
}
}

View File

@ -25,14 +25,9 @@ package com.massivecraft.mcore.xlib.mongodb;
public class CommandResult extends BasicDBObject {
CommandResult(ServerAddress srv) {
this(null, srv);
}
CommandResult(DBObject cmd, ServerAddress srv) {
if (srv == null) {
throw new IllegalArgumentException("server address is null");
}
_cmd = cmd;
_host = srv;
//so it is shown in toString/debug
put("serverUsed", srv.toString());
@ -61,47 +56,28 @@ public class CommandResult extends BasicDBObject {
* @return The error message or null
*/
public String getErrorMessage(){
Object foo = get( "errmsg" );
if ( foo == null )
Object errorMessage = get( "errmsg" );
if ( errorMessage == null )
return null;
return foo.toString();
return errorMessage.toString();
}
/**
* utility method to create an exception with the command name
* @return The mongo exception or null
*/
public MongoException getException(){
if ( !ok() ) {
StringBuilder buf = new StringBuilder();
String cmdName;
if (_cmd != null) {
cmdName = _cmd.keySet().iterator().next();
buf.append( "command failed [" ).append( cmdName ).append( "]: " );
} else {
buf.append( "operation failed: ");
public MongoException getException() {
if ( !ok() ) { // check for command failure
return new CommandFailureException( this );
} else if ( hasErr() ) { // check for errors reported by getlasterror command
if (getCode() == 11000 || getCode() == 11001 || getCode() == 12582) {
return new MongoException.DuplicateKey(this);
}
buf.append( toString() );
return new CommandFailure( this , buf.toString() );
} else {
// GLE check
if ( hasErr() ) {
Object foo = get( "err" );
int code = getCode();
String s = foo.toString();
if ( code == 11000 || code == 11001 || s.startsWith( "E11000" ) || s.startsWith( "E11001" ) )
return new MongoException.DuplicateKey( code , s );
return new MongoException( code , s );
else {
return new WriteConcernException(this);
}
}
//all good, should never get here.
return null;
}
@ -109,7 +85,7 @@ public class CommandResult extends BasicDBObject {
* returns the "code" field, as an int
* @return -1 if there is no code
*/
private int getCode(){
int getCode() {
int code = -1;
if ( get( "code" ) instanceof Number )
code = ((Number)get("code")).intValue();
@ -129,7 +105,7 @@ public class CommandResult extends BasicDBObject {
* throws an exception containing the cmd name, in case the command failed, or the "err/code" information
* @throws MongoException
*/
public void throwOnError() throws MongoException {
public void throwOnError() {
if ( !ok() || hasErr() ){
throw getException();
}
@ -139,15 +115,7 @@ public class CommandResult extends BasicDBObject {
return _host;
}
private final DBObject _cmd;
private final ServerAddress _host;
private static final long serialVersionUID = 1L;
static class CommandFailure extends MongoException {
private static final long serialVersionUID = 1L;
CommandFailure( CommandResult res , String msg ){
super( ServerError.getCode( res ) , msg );
}
}
}

View File

@ -0,0 +1,252 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Base class for classes that manage connections to mongo instances as background tasks.
*/
abstract class ConnectionStatus {
ConnectionStatus(List<ServerAddress> mongosAddresses, Mongo mongo) {
_mongoOptions = mongoOptionsDefaults.copy();
_mongoOptions.socketFactory = mongo._options.socketFactory;
this._mongosAddresses = new ArrayList<ServerAddress>(mongosAddresses);
this._mongo = mongo;
}
protected BackgroundUpdater _updater;
protected final Mongo _mongo;
protected final List<ServerAddress> _mongosAddresses;
protected volatile boolean _closed;
protected final MongoOptions _mongoOptions;
protected static int updaterIntervalMS;
protected static int updaterIntervalNoMasterMS;
@SuppressWarnings("deprecation")
protected static final MongoOptions mongoOptionsDefaults = new MongoOptions();
protected static final float latencySmoothFactor;
protected static final DBObject isMasterCmd = new BasicDBObject("ismaster", 1);
/**
* Start the updater if there is one
*/
void start() {
if (_updater != null) {
_updater.start();
}
}
/**
* Stop the updater if there is one
*/
void close() {
_closed = true;
if (_updater != null) {
_updater.interrupt();
}
}
/**
* Gets the list of addresses for this connection.
*/
abstract List<ServerAddress> getServerAddressList();
/**
* Whether there is least one server up.
*/
abstract boolean hasServerUp();
/**
* Ensures that we have the current master, if there is one. If the current snapshot of the replica set
* has no master, this method waits one cycle to find a new master, and returns it if found, or null if not.
*
* @return address of the current master, or null if there is none
*/
abstract Node ensureMaster();
/**
* Whether this connection has been closed.
*/
void checkClosed() {
if (_closed)
throw new IllegalStateException("ReplicaSetStatus closed");
}
static {
updaterIntervalMS = Integer.parseInt(System.getProperty("com.mongodb.updaterIntervalMS", "5000"));
updaterIntervalNoMasterMS = Integer.parseInt(System.getProperty("com.mongodb.updaterIntervalNoMasterMS", "10"));
mongoOptionsDefaults.connectTimeout = Integer.parseInt(System.getProperty("com.mongodb.updaterConnectTimeoutMS", "20000"));
mongoOptionsDefaults.socketTimeout = Integer.parseInt(System.getProperty("com.mongodb.updaterSocketTimeoutMS", "20000"));
latencySmoothFactor = Float.parseFloat(System.getProperty("com.mongodb.latencySmoothFactor", "4"));
}
static class Node {
Node(float pingTime, ServerAddress addr, int maxBsonObjectSize, boolean ok) {
this._pingTime = pingTime;
this._addr = addr;
this._maxBsonObjectSize = maxBsonObjectSize;
this._ok = ok;
}
public boolean isOk() {
return _ok;
}
public int getMaxBsonObjectSize() {
return _maxBsonObjectSize;
}
public ServerAddress getServerAddress() {
return _addr;
}
protected final ServerAddress _addr;
protected final float _pingTime;
protected final boolean _ok;
protected final int _maxBsonObjectSize;
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final Node node = (Node) o;
if (_maxBsonObjectSize != node._maxBsonObjectSize) return false;
if (_ok != node._ok) return false;
if (Float.compare(node._pingTime, _pingTime) != 0) return false;
if (!_addr.equals(node._addr)) return false;
return true;
}
@Override
public int hashCode() {
int result = _addr.hashCode();
result = 31 * result + (_pingTime != +0.0f ? Float.floatToIntBits(_pingTime) : 0);
result = 31 * result + (_ok ? 1 : 0);
result = 31 * result + _maxBsonObjectSize;
return result;
}
public String toJSON() {
StringBuilder buf = new StringBuilder();
buf.append("{");
buf.append("address:'").append(_addr).append("', ");
buf.append("ok:").append(_ok).append(", ");
buf.append("ping:").append(_pingTime).append(", ");
buf.append("maxBsonObjectSize:").append(_maxBsonObjectSize).append(", ");
buf.append("}");
return buf.toString();
}
}
static class BackgroundUpdater extends Thread {
public BackgroundUpdater(final String name) {
super(name);
setDaemon(true);
}
}
static abstract class UpdatableNode {
UpdatableNode(final ServerAddress addr, Mongo mongo, MongoOptions mongoOptions) {
this._addr = addr;
this._mongo = mongo;
this._mongoOptions = mongoOptions;
this._port = new DBPort(addr, null, mongoOptions);
}
public CommandResult update() {
CommandResult res = null;
try {
long start = System.nanoTime();
res = _port.runCommand(_mongo.getDB("admin"), isMasterCmd);
long end = System.nanoTime();
float newPingMS = (end - start) / 1000000F;
if (!successfullyContacted)
_pingTimeMS = newPingMS;
else
_pingTimeMS = _pingTimeMS + ((newPingMS - _pingTimeMS) / latencySmoothFactor);
getLogger().log(Level.FINE, "Latency to " + _addr + " actual=" + newPingMS + " smoothed=" + _pingTimeMS);
successfullyContacted = true;
if (res == null) {
throw new MongoInternalException("Invalid null value returned from isMaster");
}
if (!_ok) {
getLogger().log(Level.INFO, "Server seen up: " + _addr);
}
_ok = true;
// max size was added in 1.8
if (res.containsField("maxBsonObjectSize")) {
_maxBsonObjectSize = (Integer) res.get("maxBsonObjectSize");
} else {
_maxBsonObjectSize = Bytes.MAX_OBJECT_SIZE;
}
} catch (Exception e) {
if (!((_ok) ? true : (Math.random() > 0.1))) {
return res;
}
final StringBuilder logError = (new StringBuilder("Server seen down: ")).append(_addr);
if (e instanceof IOException) {
logError.append(" - ").append(IOException.class.getName());
if (e.getMessage() != null) {
logError.append(" - message: ").append(e.getMessage());
}
getLogger().log(Level.WARNING, logError.toString());
} else {
getLogger().log(Level.WARNING, logError.toString(), e);
}
_ok = false;
}
return res;
}
protected abstract Logger getLogger();
final ServerAddress _addr;
final MongoOptions _mongoOptions;
final Mongo _mongo;
DBPort _port; // we have our own port so we can set different socket options and don't have to worry about the pool
boolean successfullyContacted = false;
boolean _ok = false;
float _pingTimeMS = 0;
int _maxBsonObjectSize;
}
}

View File

@ -18,6 +18,11 @@
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.mongodb.DBApiLayer.Result;
import com.massivecraft.mcore.xlib.mongodb.util.Util;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
@ -28,15 +33,26 @@ import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import com.massivecraft.mcore.xlib.mongodb.DBApiLayer.Result;
import com.massivecraft.mcore.xlib.mongodb.util.Util;
/**
* an abstract class that represents a logical database on a server
* @dochub databases
*/
public abstract class DB {
private static final Set<String> _obedientCommands = new HashSet<String>();
static {
_obedientCommands.add("group");
_obedientCommands.add("aggregate");
_obedientCommands.add("collstats");
_obedientCommands.add("dbstats");
_obedientCommands.add("count");
_obedientCommands.add("distinct");
_obedientCommands.add("geonear");
_obedientCommands.add("geosearch");
_obedientCommands.add("geowalk");
}
/**
* @param mongo the mongo instance
* @param name the database name
@ -47,6 +63,44 @@ public abstract class DB {
_options = new Bytes.OptionHolder( _mongo._netOptions );
}
/**
* Determines the read preference that should be used for the given command.
* @param command the <code>DBObject</code> representing the command
* @param requestedPreference the preference requested by the client.
* @return the read preference to use for the given command. It will never return null.
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference
*/
ReadPreference getCommandReadPreference(DBObject command, ReadPreference requestedPreference){
String comString = command.keySet().iterator().next();
if (comString.equals("getnonce") || comString.equals("authenticate")) {
return ReadPreference.primaryPreferred();
}
boolean primaryRequired;
// explicitly check mapreduce commands are inline
if(comString.equals("mapreduce")) {
Object out = command.get("out");
if (out instanceof BSONObject ){
BSONObject outMap = (BSONObject) out;
primaryRequired = outMap.get("inline") == null;
}
else
primaryRequired = true;
} else {
primaryRequired = !_obedientCommands.contains(comString.toLowerCase());
}
if (primaryRequired) {
return ReadPreference.primary();
} else if (requestedPreference == null) {
return ReadPreference.primary();
} else {
return requestedPreference;
}
}
/**
* starts a new "consistent request".
* Following this call and until requestDone() is called, all db operations should use the same underlying connection.
@ -95,6 +149,7 @@ public abstract class DB {
* @param name the name of the collection to return
* @param options options
* @return the collection
* @throws MongoException
*/
public DBCollection createCollection( String name, DBObject options ){
if ( options != null ){
@ -140,21 +195,52 @@ public abstract class DB {
* @throws MongoException
* @dochub commands
*/
public CommandResult command( DBObject cmd ) throws MongoException{
public CommandResult command( DBObject cmd ){
return command( cmd, 0 );
}
public CommandResult command( DBObject cmd, DBEncoder encoder ) throws MongoException{
/**
* Executes a database command.
* This method calls {@link DB#command(com.massivecraft.mcore.xlib.mongodb.DBObject, int, com.massivecraft.mcore.xlib.mongodb.DBEncoder) } with 0 as query option.
* @see <a href="http://mongodb.onconfluence.com/display/DOCS/List+of+Database+Commands">List of Commands</a>
* @param cmd dbobject representing the command to execute
* @param encoder
* @return result of command from the database
* @throws MongoException
* @dochub commands
*/
public CommandResult command( DBObject cmd, DBEncoder encoder ){
return command( cmd, 0, encoder );
}
public CommandResult command( DBObject cmd , int options, DBEncoder encoder )
throws MongoException {
return command(cmd, options, null, encoder);
/**
* Executes a database command.
* This method calls {@link DB#command(com.massivecraft.mcore.xlib.mongodb.DBObject, int, com.massivecraft.mcore.xlib.mongodb.ReadPreference, com.massivecraft.mcore.xlib.mongodb.DBEncoder) } with a null readPrefs.
* @see <a href="http://mongodb.onconfluence.com/display/DOCS/List+of+Database+Commands">List of Commands</a>
* @param cmd dbobject representing the command to execute
* @param options query options to use
* @param encoder
* @return result of command from the database
* @throws MongoException
* @dochub commands
*/
public CommandResult command( DBObject cmd , int options, DBEncoder encoder ){
return command(cmd, options, getReadPreference(), encoder);
}
public CommandResult command( DBObject cmd , int options, ReadPreference readPrefs )
throws MongoException {
/**
* Executes a database command.
* This method calls {@link DB#command(com.massivecraft.mcore.xlib.mongodb.DBObject, int, com.massivecraft.mcore.xlib.mongodb.ReadPreference, com.massivecraft.mcore.xlib.mongodb.DBEncoder) } with a default encoder.
* @see <a href="http://mongodb.onconfluence.com/display/DOCS/List+of+Database+Commands">List of Commands</a>
* @param cmd dbobject representing the command to execute
* @param options query options to use
* @param readPrefs ReadPreferences for this command (nodes selection is the biggest part of this)
* @return result of command from the database
* @throws MongoException
* @dochub commands
*/
public CommandResult command( DBObject cmd , int options, ReadPreference readPrefs ){
return command(cmd, options, readPrefs, DefaultDBEncoder.FACTORY.create());
}
@ -164,12 +250,14 @@ public abstract class DB {
* @param cmd dbobject representing the command to execute
* @param options query options to use
* @param readPrefs ReadPreferences for this command (nodes selection is the biggest part of this)
* @param encoder
* @return result of command from the database
* @dochub commands
* @throws MongoException
* @dochub commands
*/
public CommandResult command( DBObject cmd , int options, ReadPreference readPrefs, DBEncoder encoder )
throws MongoException {
public CommandResult command( DBObject cmd , int options, ReadPreference readPrefs, DBEncoder encoder ){
readPrefs = getCommandReadPreference(cmd, readPrefs);
cmd = wrapCommand(cmd, readPrefs);
Iterator<DBObject> i =
getCollection("$cmd").__find(cmd, new BasicDBObject(), 0, -1, 0, options, readPrefs ,
@ -179,24 +267,40 @@ public abstract class DB {
DBObject res = i.next();
ServerAddress sa = (i instanceof Result) ? ((Result) i).getServerAddress() : null;
CommandResult cr = new CommandResult(cmd, sa);
CommandResult cr = new CommandResult(sa);
cr.putAll( res );
return cr;
}
// Only append $readPreference meta-operator if connected to a mongos, read preference is not primary
// or secondary preferred,
// and command is an instance of BasicDBObject. The last condition is unfortunate, but necessary in case
// the encoder is not capable of encoding a BasicDBObject
// Due to issues with compatibility between different versions of mongos, also wrap the command in a
// $query field, so that the $readPreference is not rejected
private DBObject wrapCommand(DBObject cmd, final ReadPreference readPrefs) {
if (getMongo().isMongosConnection() &&
!(ReadPreference.primary().equals(readPrefs) || ReadPreference.secondaryPreferred().equals(readPrefs)) &&
cmd instanceof BasicDBObject) {
cmd = new BasicDBObject("$query", cmd)
.append(QueryOpBuilder.READ_PREFERENCE_META_OPERATOR, readPrefs.toDBObject());
}
return cmd;
}
/**
* Executes a database command.
* @see <a href="http://mongodb.onconfluence.com/display/DOCS/List+of+Database+Commands">List of Commands</a>
* @param cmd dbobject representing the command to execute
* @param options query options to use
* @return result of command from the database
* @dochub commands
* @throws MongoException
* @dochub commands
*/
public CommandResult command( DBObject cmd , int options )
throws MongoException {
public CommandResult command( DBObject cmd , int options ){
return command(cmd, options, getReadPreference());
}
/**
* Executes a database command.
* This method constructs a simple dbobject and calls {@link DB#command(com.massivecraft.mcore.xlib.mongodb.DBObject) }
@ -204,9 +308,9 @@ public abstract class DB {
* @param cmd command to execute
* @return result of command from the database
* @throws MongoException
* @dochub commands
*/
public CommandResult command( String cmd )
throws MongoException {
public CommandResult command( String cmd ){
return command( new BasicDBObject( cmd , Boolean.TRUE ) );
}
@ -218,9 +322,9 @@ public abstract class DB {
* @param options query options to use
* @return result of command from the database
* @throws MongoException
* @dochub commands
*/
public CommandResult command( String cmd, int options )
throws MongoException {
public CommandResult command( String cmd, int options ){
return command( new BasicDBObject( cmd , Boolean.TRUE ), options );
}
@ -232,8 +336,7 @@ public abstract class DB {
* @return The command result
* @throws MongoException
*/
public CommandResult doEval( String code , Object ... args )
throws MongoException {
public CommandResult doEval( String code , Object ... args ){
return command( BasicDBObjectBuilder.start()
.add( "$eval" , code )
@ -250,8 +353,7 @@ public abstract class DB {
* @return The object
* @throws MongoException
*/
public Object eval( String code , Object ... args )
throws MongoException {
public Object eval( String code , Object ... args ){
CommandResult res = doEval( code , args );
res.throwOnError();
@ -261,6 +363,7 @@ public abstract class DB {
/**
* Returns the result of "dbstats" command
* @return
* @throws MongoException
*/
public CommandResult getStats() {
return command("dbstats");
@ -288,8 +391,7 @@ public abstract class DB {
* @return the names of collections in this database
* @throws MongoException
*/
public Set<String> getCollectionNames()
throws MongoException {
public Set<String> getCollectionNames(){
DBCollection namespaces = getCollection("system.namespaces");
if (namespaces == null)
@ -330,6 +432,7 @@ public abstract class DB {
* Checks to see if a collection by name %lt;name&gt; exists.
* @param collectionName The collection to test for existence
* @return false if no collection by that name exists, true if a match to an existing collection was found
* @throws MongoException
*/
public boolean collectionExists(String collectionName)
{
@ -376,8 +479,7 @@ public abstract class DB {
* @return DBObject with error and status information
* @throws MongoException
*/
public CommandResult getLastError()
throws MongoException {
public CommandResult getLastError(){
return command(new BasicDBObject("getlasterror", 1));
}
@ -387,8 +489,7 @@ public abstract class DB {
* @return
* @throws MongoException
*/
public CommandResult getLastError( com.massivecraft.mcore.xlib.mongodb.WriteConcern concern )
throws MongoException {
public CommandResult getLastError( com.massivecraft.mcore.xlib.mongodb.WriteConcern concern ){
return command( concern.getCommand() );
}
@ -400,8 +501,7 @@ public abstract class DB {
* @return The command result
* @throws MongoException
*/
public CommandResult getLastError( int w , int wtimeout , boolean fsync )
throws MongoException {
public CommandResult getLastError( int w , int wtimeout , boolean fsync ){
return command( (new com.massivecraft.mcore.xlib.mongodb.WriteConcern( w, wtimeout , fsync )).getCommand() );
}
@ -452,8 +552,7 @@ public abstract class DB {
* Drops this database. Removes all data on disk. Use with caution.
* @throws MongoException
*/
public void dropDatabase()
throws MongoException {
public void dropDatabase(){
CommandResult res = command(new BasicDBObject("dropDatabase", 1));
res.throwOnError();
@ -467,102 +566,95 @@ public abstract class DB {
* @dochub authenticate
*/
public boolean isAuthenticated() {
return ( _username != null );
return getAuthenticationCredentials() != null;
}
/**
* Authenticates to db with the given name and password
* Authenticates to db with the given credentials. If this method (or {@code authenticateCommand} has already been
* called with the same credentials and the authentication test succeeded, this method will return true. If this method
* has already been called with different credentials and the authentication test succeeded,
* this method will throw an {@code IllegalStateException}. If this method has already been called with any credentials
* and the authentication test failed, this method will re-try the authentication test with the
* given credentials.
*
* @param username name of user for this database
* @param passwd password of user for this database
* @param password password of user for this database
* @return true if authenticated, false otherwise
* @throws MongoException
* @throws MongoException if authentication failed due to invalid user/pass, or other exceptions like I/O
* @throws IllegalStateException if authentiation test has already succeeded with different credentials
* @see #authenticateCommand(String, char[])
* @dochub authenticate
*/
public boolean authenticate(String username, char[] passwd )
throws MongoException {
if ( username == null || passwd == null )
throw new NullPointerException( "username can't be null" );
if ( _username != null )
throw new IllegalStateException( "can't call authenticate twice on the same DBObject" );
String hash = _hash( username , passwd );
CommandResult res = _doauth( username , hash.getBytes() );
if ( !res.ok())
return false;
_username = username;
_authhash = hash.getBytes();
return true;
public boolean authenticate(String username, char[] password ){
return authenticateCommandHelper(username, password).failure == null;
}
/**
* Authenticates to db with the given name and password
* Authenticates to db with the given credentials. If this method (or {@code authenticate} has already been
* called with the same credentials and the authentication test succeeded, this method will return true. If this method
* has already been called with different credentials and the authentication test succeeded,
* this method will throw an {@code IllegalStateException}. If this method has already been called with any credentials
* and the authentication test failed, this method will re-try the authentication test with the
* given credentials.
*
*
* @param username name of user for this database
* @param passwd password of user for this database
* @param password password of user for this database
* @return the CommandResult from authenticate command
* @throws MongoException if authentication failed due to invalid user/pass, or other exceptions like I/O
* @throws IllegalStateException if authentiation test has already succeeded with different credentials
* @see #authenticate(String, char[])
* @dochub authenticate
*/
public CommandResult authenticateCommand(String username, char[] passwd )
throws MongoException {
if ( username == null || passwd == null )
throw new NullPointerException( "username can't be null" );
if ( _username != null )
throw new IllegalStateException( "can't call authenticate twice on the same DBObject" );
String hash = _hash( username , passwd );
CommandResult res = _doauth( username , hash.getBytes() );
res.throwOnError();
_username = username;
_authhash = hash.getBytes();
return res;
public synchronized CommandResult authenticateCommand(String username, char[] password ){
CommandResultPair commandResultPair = authenticateCommandHelper(username, password);
if (commandResultPair.failure != null) {
throw commandResultPair.failure;
}
return commandResultPair.result;
}
/*
boolean reauth(){
if ( _username == null || _authhash == null )
throw new IllegalStateException( "no auth info!" );
return _doauth( _username , _authhash );
private CommandResultPair authenticateCommandHelper(String username, char[] password) {
MongoCredential credentials =
MongoCredential.createMongoCRCredential(username, getName(), password);
if (getAuthenticationCredentials() != null) {
if (getAuthenticationCredentials().equals(credentials)) {
if (authenticationTestCommandResult != null) {
return new CommandResultPair(authenticationTestCommandResult);
}
} else {
throw new IllegalStateException("can't authenticate twice on the same database");
}
*/
DBObject _authCommand( String nonce ){
if ( _username == null || _authhash == null )
throw new IllegalStateException( "no auth info!" );
return _authCommand( nonce , _username , _authhash );
}
static DBObject _authCommand( String nonce , String username , byte[] hash ){
String key = nonce + username + new String( hash );
BasicDBObject cmd = new BasicDBObject();
cmd.put("authenticate", 1);
cmd.put("user", username);
cmd.put("nonce", nonce);
cmd.put("key", Util.hexMD5(key.getBytes()));
return cmd;
try {
authenticationTestCommandResult = doAuthenticate(credentials);
return new CommandResultPair(authenticationTestCommandResult);
} catch (CommandFailureException commandFailureException) {
return new CommandResultPair(commandFailureException);
}
}
private CommandResult _doauth( String username , byte[] hash ){
CommandResult res = command(new BasicDBObject("getnonce", 1));
res.throwOnError();
class CommandResultPair {
CommandResult result;
CommandFailureException failure;
DBObject cmd = _authCommand( res.getString( "nonce" ) , username , hash );
return command(cmd);
public CommandResultPair(final CommandResult result) {
this.result = result;
}
public CommandResultPair(final CommandFailureException failure) {
this.failure = failure;
}
}
abstract CommandResult doAuthenticate(MongoCredential credentials);
/**
* Adds a new user for this db
* @param username
* @param passwd
* @throws MongoException
*/
public WriteResult addUser( String username , char[] passwd ){
return addUser(username, passwd, false);
@ -573,6 +665,7 @@ public abstract class DB {
* @param username
* @param passwd
* @param readOnly if true, user will only be able to read
* @throws MongoException
*/
public WriteResult addUser( String username , char[] passwd, boolean readOnly ){
DBCollection c = getCollection( "system.users" );
@ -587,6 +680,7 @@ public abstract class DB {
/**
* Removes a user for this db
* @param username
* @throws MongoException
*/
public WriteResult removeUser( String username ){
DBCollection c = getCollection( "system.users" );
@ -629,8 +723,7 @@ public abstract class DB {
* @return DBObject with error and status information
* @throws MongoException
*/
public CommandResult getPreviousError()
throws MongoException {
public CommandResult getPreviousError(){
return command(new BasicDBObject("getpreverror", 1));
}
@ -639,8 +732,7 @@ public abstract class DB {
* Used to clear all errors such that {@link DB#getPreviousError()} will return no error.
* @throws MongoException
*/
public void resetError()
throws MongoException {
public void resetError(){
command(new BasicDBObject("reseterror", 1));
}
@ -648,8 +740,7 @@ public abstract class DB {
* For testing purposes only - this method forces an error to help test error handling
* @throws MongoException
*/
public void forceError()
throws MongoException {
public void forceError(){
command(new BasicDBObject("forceerror", 1));
}
@ -673,8 +764,8 @@ public abstract class DB {
/**
* Makes it possible to execute "read" queries on a slave node
*
* @deprecated Replaced with ReadPreference.SECONDARY
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference.SECONDARY
* @deprecated Replaced with {@code ReadPreference.secondaryPreferred()}
* @see ReadPreference#secondaryPreferred()
*/
@Deprecated
public void slaveOk(){
@ -712,8 +803,11 @@ public abstract class DB {
return _options.get();
}
public abstract void cleanCursors( boolean force ) throws MongoException;
public abstract void cleanCursors( boolean force );
MongoCredential getAuthenticationCredentials() {
return getMongo().getAuthority().getCredentialsStore().get(getName());
}
final Mongo _mongo;
final String _name;
@ -723,7 +817,7 @@ public abstract class DB {
private com.massivecraft.mcore.xlib.mongodb.ReadPreference _readPref;
final Bytes.OptionHolder _options;
String _username;
byte[] _authhash = null;
// cached authentication command result, to return in case of multiple calls to authenticateCommand with the
// same credentials
private volatile CommandResult authenticationTestCommandResult;
}

View File

@ -155,6 +155,7 @@ public class DBAddress extends ServerAddress {
* creates a DBAddress pointing to a different database on the same server
* @param name database name
* @return
* @throws MongoException
*/
public DBAddress getSister( String name ){
try {

View File

@ -18,31 +18,24 @@
package com.massivecraft.mcore.xlib.mongodb;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import com.massivecraft.mcore.xlib.mongodb.util.JSON;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.logging.Level;
import java.util.logging.Logger;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import com.massivecraft.mcore.xlib.mongodb.util.JSON;
/** Database API
* This cannot be directly instantiated, but the functions are available
* through instances of Mongo.
*/
public class DBApiLayer extends DB {
static final boolean D = Boolean.getBoolean( "DEBUG.DB" );
/** The maximum number of cursors allowed */
static final int NUM_CURSORS_BEFORE_KILL = 100;
static final int NUM_CURSORS_PER_BATCH = 20000;
@ -52,18 +45,18 @@ public class DBApiLayer extends DB {
static final Logger TRACE_LOGGER = Logger.getLogger( "com.mongodb.TRACE" );
static final Level TRACE_LEVEL = Boolean.getBoolean( "DB.TRACE" ) ? Level.INFO : Level.FINEST;
static final boolean willTrace(){
static boolean willTrace(){
return TRACE_LOGGER.isLoggable( TRACE_LEVEL );
}
static final void trace( String s ){
static void trace( String s ){
TRACE_LOGGER.log( TRACE_LEVEL , s );
}
static int chooseBatchSize(int batchSize, int limit, int fetched) {
int bs = Math.abs(batchSize);
int remaining = limit > 0 ? limit - fetched : 0;
int res = 0;
int res;
if (bs == 0 && remaining > 0)
res = remaining;
else if (bs > 0 && remaining == 0)
@ -122,14 +115,12 @@ public class DBApiLayer extends DB {
return old != null ? old : c;
}
String _removeRoot( String ns ){
if ( ! ns.startsWith( _rootPlusDot ) )
return ns;
return ns.substring( _root.length() + 1 );
}
public void cleanCursors( boolean force )
throws MongoException {
/**
* @param force true if should clean regardless of number of dead cursors
* @throws MongoException
*/
public void cleanCursors( boolean force ){
int sz = _deadCursorIds.size();
@ -161,15 +152,11 @@ public class DBApiLayer extends DB {
}
}
void killCursors( ServerAddress addr , List<Long> all )
throws MongoException {
void killCursors( ServerAddress addr , List<Long> all ){
if ( all == null || all.size() == 0 )
return;
OutMessage om = new OutMessage( _mongo , 2007 );
om.writeInt( 0 ); // reserved
om.writeInt( Math.min( NUM_CURSORS_PER_BATCH , all.size() ) );
OutMessage om = OutMessage.killCursors(_mongo, Math.min( NUM_CURSORS_PER_BATCH , all.size()));
int soFar = 0;
int totalSoFar = 0;
@ -181,9 +168,7 @@ public class DBApiLayer extends DB {
if ( soFar >= NUM_CURSORS_PER_BATCH ){
_connector.say( this , om ,com.massivecraft.mcore.xlib.mongodb.WriteConcern.NONE );
om = new OutMessage( _mongo , 2007 );
om.writeInt( 0 ); // reserved
om.writeInt( Math.min( NUM_CURSORS_PER_BATCH , all.size() - totalSoFar ) );
om = OutMessage.killCursors(_mongo, Math.min( NUM_CURSORS_PER_BATCH , all.size() - totalSoFar));
soFar = 0;
}
}
@ -191,6 +176,11 @@ public class DBApiLayer extends DB {
_connector.say( this , om ,com.massivecraft.mcore.xlib.mongodb.WriteConcern.NONE , addr );
}
@Override
CommandResult doAuthenticate(MongoCredential credentials) {
return _connector.authenticate(credentials);
}
class MyCollection extends DBCollection {
MyCollection( String name ){
super( DBApiLayer.this , name );
@ -201,36 +191,38 @@ public class DBApiLayer extends DB {
}
@Override
public void drop() throws MongoException {
public void drop(){
_collections.remove(getName());
super.drop();
}
public WriteResult insert(DBObject[] arr, com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder )
throws MongoException {
return insert( arr, true, concern, encoder );
public WriteResult insert(List<DBObject> list, com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ){
if (concern == null) {
throw new IllegalArgumentException("Write concern can not be null");
}
protected WriteResult insert(DBObject[] arr, boolean shouldApply , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder )
throws MongoException {
return insert(list, true, concern, encoder);
}
protected WriteResult insert(List<DBObject> list, boolean shouldApply , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ){
if (encoder == null)
encoder = DefaultDBEncoder.FACTORY.create();
if ( willTrace() ) {
for (DBObject o : arr) {
for (DBObject o : list) {
trace( "save: " + _fullNameSpace + " " + JSON.serialize( o ) );
}
}
if ( shouldApply ){
for ( int i=0; i<arr.length; i++ ){
DBObject o=arr[i];
apply( o );
_checkObject( o , false , false );
Object id = o.get( "_id" );
if ( id instanceof ObjectId ){
((ObjectId)id).notNew();
for (DBObject o : list) {
apply(o);
_checkObject(o, false, false);
Object id = o.get("_id");
if (id instanceof ObjectId) {
((ObjectId) id).notNew();
}
}
}
@ -239,16 +231,12 @@ public class DBApiLayer extends DB {
int cur = 0;
int maxsize = _mongo.getMaxBsonObjectSize();
while ( cur < arr.length ){
OutMessage om = new OutMessage( _mongo , 2002, encoder );
while ( cur < list.size() ) {
int flags = 0;
if ( concern.getContinueOnErrorForInsert() ) flags |= 1;
om.writeInt( flags );
om.writeCString( _fullNameSpace );
OutMessage om = OutMessage.insert( this , encoder, concern );
for ( ; cur<arr.length; cur++ ){
DBObject o = arr[cur];
for ( ; cur < list.size(); cur++ ){
DBObject o = list.get(cur);
om.putObject( o );
// limit for batch insert is 4 x maxbson on server, use 2 x to be safe
@ -264,50 +252,38 @@ public class DBApiLayer extends DB {
return last;
}
public WriteResult remove( DBObject o , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder )
throws MongoException {
public WriteResult remove( DBObject o , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ){
if (concern == null) {
throw new IllegalArgumentException("Write concern can not be null");
}
if (encoder == null)
encoder = DefaultDBEncoder.FACTORY.create();
if ( willTrace() ) trace( "remove: " + _fullNameSpace + " " + JSON.serialize( o ) );
OutMessage om = new OutMessage( _mongo , 2006, encoder );
om.writeInt( 0 ); // reserved
om.writeCString( _fullNameSpace );
Collection<String> keys = o.keySet();
if ( keys.size() == 1 &&
keys.iterator().next().equals( "_id" ) &&
o.get( keys.iterator().next() ) instanceof ObjectId )
om.writeInt( 1 );
else
om.writeInt( 0 );
om.putObject( o );
OutMessage om = OutMessage.remove(this, encoder, o);
return _connector.say( _db , om , concern );
}
@Override
Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize, int limit , int options, ReadPreference readPref, DBDecoder decoder )
throws MongoException {
Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize, int limit , int options, ReadPreference readPref, DBDecoder decoder ){
return __find(ref, fields, numToSkip, batchSize, limit, options, readPref, decoder, DefaultDBEncoder.FACTORY.create());
}
@Override
Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options,
ReadPreference readPref, DBDecoder decoder, DBEncoder encoder ) throws MongoException {
ReadPreference readPref, DBDecoder decoder, DBEncoder encoder ){
if ( ref == null )
ref = new BasicDBObject();
if ( willTrace() ) trace( "find: " + _fullNameSpace + " " + JSON.serialize( ref ) );
OutMessage query = OutMessage.query( _mongo , options , _fullNameSpace , numToSkip , chooseBatchSize(batchSize, limit, 0) , ref , fields, readPref,
OutMessage query = OutMessage.query( this , options , numToSkip , chooseBatchSize(batchSize, limit, 0) , ref , fields, readPref,
encoder);
Response res = _connector.call( _db , this , query , null , 2, readPref, decoder );
@ -323,38 +299,36 @@ public class DBApiLayer extends DB {
}
@Override
public WriteResult update( DBObject query , DBObject o , boolean upsert , boolean multi , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder )
throws MongoException {
public WriteResult update( DBObject query , DBObject o , boolean upsert , boolean multi , com.massivecraft.mcore.xlib.mongodb.WriteConcern concern, DBEncoder encoder ){
if (o == null) {
throw new IllegalArgumentException("update can not be null");
}
if (concern == null) {
throw new IllegalArgumentException("Write concern can not be null");
}
if (encoder == null)
encoder = DefaultDBEncoder.FACTORY.create();
if (o != null && !o.keySet().isEmpty()) {
if (!o.keySet().isEmpty()) {
// if 1st key doesn't start with $, then object will be inserted as is, need to check it
String key = o.keySet().iterator().next();
if (!key.startsWith("$"))
_checkObject(o, false, false);
}
if ( willTrace() ) trace( "update: " + _fullNameSpace + " " + JSON.serialize( query ) + " " + JSON.serialize( o ) );
if ( willTrace() ) {
trace( "update: " + _fullNameSpace + " " + JSON.serialize( query ) + " " + JSON.serialize( o ) );
}
OutMessage om = new OutMessage( _mongo , 2001, encoder );
om.writeInt( 0 ); // reserved
om.writeCString( _fullNameSpace );
int flags = 0;
if ( upsert ) flags |= 1;
if ( multi ) flags |= 2;
om.writeInt( flags );
om.putObject( query );
om.putObject( o );
OutMessage om = OutMessage.update(this, encoder, upsert, multi, query, o);
return _connector.say( _db , om , concern );
}
public void createIndex( final DBObject keys, final DBObject options, DBEncoder encoder )
throws MongoException {
public void createIndex( final DBObject keys, final DBObject options, DBEncoder encoder ){
if (encoder == null)
encoder = DefaultDBEncoder.FACTORY.create();
@ -367,7 +341,7 @@ public class DBApiLayer extends DB {
MyCollection idxs = DBApiLayer.this.doGetCollection( "system.indexes" );
//query first, maybe we should do an update w/upsert? -- need to test performance and lock behavior
if ( idxs.findOne( full ) == null )
idxs.insert( new DBObject[] { full }, false, WriteConcern.SAFE, encoder );
idxs.insert(Arrays.asList(full), false, WriteConcern.SAFE, encoder);
}
final String _fullNameSpace;
@ -383,19 +357,22 @@ public class DBApiLayer extends DB {
_host = res._host;
_decoder = decoder;
init( res );
// Only enable finalizer if cursor finalization is enabled and there is actually a cursor that needs killing
_optionalFinalizer = _mongo.getMongoOptions().isCursorFinalizerEnabled() && res.cursor() != 0 ?
new OptionalFinalizer() : null;
}
private void init( Response res ){
if ( ( res._flags & Bytes.RESULTFLAG_CURSORNOTFOUND ) > 0 ){
throw new MongoException.CursorNotFound(_curResult.cursor(), res.serverUsed());
}
_totalBytes += res._len;
_curResult = res;
_cur = res.iterator();
_sizes.add( res.size() );
_numFetched += res.size();
if ( ( res._flags & Bytes.RESULTFLAG_CURSORNOTFOUND ) > 0 ){
throw new MongoException.CursorNotFound(res._cursor, res.serverUsed());
}
if (res._cursor != 0 && _limit > 0 && _limit - _numFetched <= 0) {
// fetched all docs within limit, close cursor server-side
killCursor();
@ -408,7 +385,7 @@ public class DBApiLayer extends DB {
}
if ( ! _curResult.hasGetMore( _options ) )
throw new RuntimeException( "no more" );
throw new NoSuchElementException("no more");
_advance();
return next();
@ -433,7 +410,8 @@ public class DBApiLayer extends DB {
if ((_curResult._flags & Bytes.RESULTFLAG_AWAITCAPABLE) == 0) {
try {
Thread.sleep(500);
} catch (Exception e) {
} catch (InterruptedException e) {
throw new MongoInterruptedException(e);
}
}
}
@ -447,12 +425,8 @@ public class DBApiLayer extends DB {
if ( _curResult.cursor() <= 0 )
throw new RuntimeException( "can't advance a cursor <= 0" );
OutMessage m = new OutMessage( _mongo , 2005 );
m.writeInt( 0 );
m.writeCString( _collection._fullNameSpace );
m.writeInt( chooseBatchSize(_batchSize, _limit, _numFetched) );
m.writeLong( _curResult.cursor() );
OutMessage m = OutMessage.getMore(_collection, _curResult.cursor(),
chooseBatchSize(_batchSize, _limit, _numFetched));
Response res = _connector.call( DBApiLayer.this , _collection , m , _host, _decoder );
_numGetMores++;
@ -475,18 +449,6 @@ public class DBApiLayer extends DB {
return "DBCursor";
}
protected void finalize() throws Throwable {
if (_curResult != null) {
long curId = _curResult.cursor();
_curResult = null;
_cur = null;
if (curId != 0) {
_deadCursorIds.add(new DeadCursor(curId, _host));
}
}
super.finalize();
}
public long totalBytes(){
return _totalBytes;
}
@ -537,6 +499,10 @@ public class DBApiLayer extends DB {
return _host;
}
boolean hasFinalizer() {
return _optionalFinalizer != null;
}
Response _curResult;
Iterator<DBObject> _cur;
int _batchSize;
@ -551,6 +517,23 @@ public class DBApiLayer extends DB {
private List<Integer> _sizes = new ArrayList<Integer>();
private int _numFetched = 0;
// This allows us to easily enable/disable finalizer for cleaning up un-closed cursors
private final OptionalFinalizer _optionalFinalizer;
private class OptionalFinalizer {
@Override
protected void finalize() {
if (_curResult != null) {
long curId = _curResult.cursor();
_curResult = null;
_cur = null;
if (curId != 0) {
_deadCursorIds.add(new DeadCursor(curId, _host));
}
}
}
}
} // class Result
static class DeadCursor {
@ -571,5 +554,4 @@ public class DBApiLayer extends DB {
ConcurrentLinkedQueue<DeadCursor> _deadCursorIds = new ConcurrentLinkedQueue<DeadCursor>();
static final List<DBObject> EMPTY = Collections.unmodifiableList( new LinkedList<DBObject>() );
}

View File

@ -19,7 +19,12 @@
package com.massivecraft.mcore.xlib.mongodb;
// Mongo
import com.massivecraft.mcore.xlib.bson.LazyDBList;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
@ -28,18 +33,16 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
/** This class provides a skeleton implementation of a database collection.
* <p>A typical invocation sequence is thus
* <blockquote><pre>
* Mongo mongo = new Mongo( new DBAddress( "localhost", 127017 ) );
* DB db = mongo.getDB( "mydb" );
* DBCollection collection = db.getCollection( "test" );
* MongoClient mongoClient = new MongoClient(new ServerAddress("localhost", 27017));
* DB db = mongo.getDB("mydb");
* DBCollection collection = db.getCollection("test");
* </pre></blockquote>
* @dochub collections
*/
@SuppressWarnings("unchecked")
@SuppressWarnings({"unchecked", "rawtypes"})
public abstract class DBCollection {
/**
@ -53,7 +56,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub insert
*/
public WriteResult insert(DBObject[] arr , WriteConcern concern ) throws MongoException {
public WriteResult insert(DBObject[] arr , WriteConcern concern ){
return insert( arr, concern, getDBEncoder());
}
@ -69,7 +72,9 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub insert
*/
public abstract WriteResult insert(DBObject[] arr , WriteConcern concern, DBEncoder encoder) throws MongoException;
public WriteResult insert(DBObject[] arr , WriteConcern concern, DBEncoder encoder) {
return insert(Arrays.asList(arr), concern, encoder);
}
/**
* Inserts a document into the database.
@ -82,9 +87,8 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub insert
*/
public WriteResult insert(DBObject o , WriteConcern concern )
throws MongoException {
return insert( new DBObject[]{ o } , concern );
public WriteResult insert(DBObject o , WriteConcern concern ){
return insert( Arrays.asList(o) , concern );
}
/**
@ -97,8 +101,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub insert
*/
public WriteResult insert(DBObject ... arr)
throws MongoException {
public WriteResult insert(DBObject ... arr){
return insert( arr , getWriteConcern() );
}
@ -112,8 +115,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub insert
*/
public WriteResult insert(WriteConcern concern, DBObject ... arr)
throws MongoException {
public WriteResult insert(WriteConcern concern, DBObject ... arr){
return insert( arr, concern );
}
@ -127,8 +129,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub insert
*/
public WriteResult insert(List<DBObject> list )
throws MongoException {
public WriteResult insert(List<DBObject> list ){
return insert( list, getWriteConcern() );
}
@ -143,11 +144,23 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub insert
*/
public WriteResult insert(List<DBObject> list, WriteConcern concern )
throws MongoException {
return insert( list.toArray( new DBObject[list.size()] ) , concern );
public WriteResult insert(List<DBObject> list, WriteConcern concern ){
return insert(list, concern, getDBEncoder() );
}
/**
* Saves document(s) to the database.
* if doc doesn't have an _id, one will be added
* you can get the _id that was added from doc after the insert
*
* @param list list of documents to save
* @param concern the write concern
* @return
* @throws MongoException
* @dochub insert
*/
public abstract WriteResult insert(List<DBObject> list, WriteConcern concern, DBEncoder encoder);
/**
* Performs an update operation.
* @param q search query for old object to update
@ -161,7 +174,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub update
*/
public WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi , WriteConcern concern ) throws MongoException {
public WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi , WriteConcern concern ){
return update( q, o, upsert, multi, concern, getDBEncoder());
}
@ -179,7 +192,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub update
*/
public abstract WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi , WriteConcern concern, DBEncoder encoder ) throws MongoException ;
public abstract WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi , WriteConcern concern, DBEncoder encoder );
/**
* calls {@link DBCollection#update(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, boolean, boolean, com.massivecraft.mcore.xlib.mongodb.WriteConcern)} with default WriteConcern.
@ -192,8 +205,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub update
*/
public WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi )
throws MongoException {
public WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi ){
return update( q , o , upsert , multi , getWriteConcern() );
}
@ -205,7 +217,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub update
*/
public WriteResult update( DBObject q , DBObject o ) throws MongoException {
public WriteResult update( DBObject q , DBObject o ){
return update( q , o , false , false );
}
@ -217,7 +229,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub update
*/
public WriteResult updateMulti( DBObject q , DBObject o ) throws MongoException {
public WriteResult updateMulti( DBObject q , DBObject o ){
return update( q , o , false , true );
}
@ -235,7 +247,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub remove
*/
public WriteResult remove( DBObject o , WriteConcern concern ) throws MongoException {
public WriteResult remove( DBObject o , WriteConcern concern ){
return remove( o, concern, getDBEncoder());
}
@ -248,7 +260,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub remove
*/
public abstract WriteResult remove( DBObject o , WriteConcern concern, DBEncoder encoder ) throws MongoException ;
public abstract WriteResult remove( DBObject o , WriteConcern concern, DBEncoder encoder );
/**
* calls {@link DBCollection#remove(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.WriteConcern)} with the default WriteConcern
@ -257,8 +269,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub remove
*/
public WriteResult remove( DBObject o )
throws MongoException {
public WriteResult remove( DBObject o ){
return remove( o , getWriteConcern() );
}
@ -266,10 +277,10 @@ public abstract class DBCollection {
/**
* Finds objects
*/
abstract Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options, ReadPreference readPref, DBDecoder decoder ) throws MongoException ;
abstract Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options, ReadPreference readPref, DBDecoder decoder );
abstract Iterator<DBObject> __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options,
ReadPreference readPref, DBDecoder decoder, DBEncoder encoder ) throws MongoException ;
ReadPreference readPref, DBDecoder decoder, DBEncoder encoder );
/**
@ -284,7 +295,7 @@ public abstract class DBCollection {
* @dochub find
*/
@Deprecated
public DBCursor find( DBObject query , DBObject fields , int numToSkip , int batchSize , int options ) throws MongoException{
public DBCursor find( DBObject query , DBObject fields , int numToSkip , int batchSize , int options ){
return find(query, fields, numToSkip, batchSize).addOption(options);
}
@ -317,8 +328,7 @@ public abstract class DBCollection {
* @return the object, if found, otherwise <code>null</code>
* @throws MongoException
*/
public DBObject findOne( Object obj )
throws MongoException {
public DBObject findOne( Object obj ){
return findOne(obj, null);
}
@ -330,9 +340,10 @@ public abstract class DBCollection {
* @param obj any valid object
* @param fields fields to return
* @return the object, if found, otherwise <code>null</code>
* @throws MongoException
* @dochub find
*/
public DBObject findOne( Object obj, DBObject fields ) {
public DBObject findOne( Object obj, DBObject fields ){
Iterator<DBObject> iterator = __find( new BasicDBObject("_id", obj), fields, 0, -1, 0, getOptions(), getReadPreference(), getDecoder() );
return (iterator.hasNext() ? iterator.next() : null);
}
@ -347,8 +358,9 @@ public abstract class DBCollection {
* @param returnNew if true, the updated document is returned, otherwise the old document is returned (or it would be lost forever)
* @param upsert do upsert (insert if document not present)
* @return the document
* @throws MongoException
*/
public DBObject findAndModify(DBObject query, DBObject fields, DBObject sort, boolean remove, DBObject update, boolean returnNew, boolean upsert) {
public DBObject findAndModify(DBObject query, DBObject fields, DBObject sort, boolean remove, DBObject update, boolean returnNew, boolean upsert){
BasicDBObject cmd = new BasicDBObject( "findandmodify", _name);
if (query != null && !query.keySet().isEmpty())
@ -362,7 +374,7 @@ public abstract class DBCollection {
cmd.append( "remove", remove );
else {
if (update != null && !update.keySet().isEmpty()) {
// if 1st key doesnt start with $, then object will be inserted as is, need to check it
// if 1st key doesn't start with $, then object will be inserted as is, need to check it
String key = update.keySet().iterator().next();
if (key.charAt(0) != '$')
_checkObject(update, false, false);
@ -423,8 +435,9 @@ public abstract class DBCollection {
* @param sort
* @param update
* @return the old document
* @throws MongoException
*/
public DBObject findAndModify( DBObject query , DBObject sort , DBObject update){
public DBObject findAndModify( DBObject query , DBObject sort , DBObject update) {
return findAndModify( query, null, sort, false, update, false, false);
}
@ -434,8 +447,9 @@ public abstract class DBCollection {
* @param query
* @param update
* @return the old document
* @throws MongoException
*/
public DBObject findAndModify( DBObject query , DBObject update ) {
public DBObject findAndModify( DBObject query , DBObject update ){
return findAndModify( query, null, null, false, update, false, false );
}
@ -444,6 +458,7 @@ public abstract class DBCollection {
* with fields=null, sort=null, remove=true, returnNew=false, upsert=false
* @param query
* @return the removed document
* @throws MongoException
*/
public DBObject findAndRemove( DBObject query ) {
return findAndModify( query, null, null, true, null, false, false );
@ -456,8 +471,7 @@ public abstract class DBCollection {
* @param keys an object with a key set of the fields desired for the index
* @throws MongoException
*/
public void createIndex( final DBObject keys )
throws MongoException {
public void createIndex( final DBObject keys ){
createIndex( keys , defaultOptions( keys ) );
}
@ -467,7 +481,7 @@ public abstract class DBCollection {
* @param options
* @throws MongoException
*/
public void createIndex( DBObject keys , DBObject options ) throws MongoException {
public void createIndex( DBObject keys , DBObject options ){
createIndex( keys, options, getDBEncoder());
}
@ -478,11 +492,12 @@ public abstract class DBCollection {
* @param encoder the DBEncoder to use
* @throws MongoException
*/
public abstract void createIndex( DBObject keys , DBObject options, DBEncoder encoder ) throws MongoException;
public abstract void createIndex( DBObject keys , DBObject options, DBEncoder encoder );
/**
* Creates an ascending index on a field with default options, if one does not already exist.
* @param name name of field to index on
* @throws MongoException
*/
public void ensureIndex( final String name ){
ensureIndex( new BasicDBObject( name , 1 ) );
@ -493,8 +508,7 @@ public abstract class DBCollection {
* @param keys an object with a key set of the fields desired for the index
* @throws MongoException
*/
public void ensureIndex( final DBObject keys )
throws MongoException {
public void ensureIndex( final DBObject keys ){
ensureIndex( keys , defaultOptions( keys ) );
}
@ -505,8 +519,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub indexes
*/
public void ensureIndex( DBObject keys , String name )
throws MongoException {
public void ensureIndex( DBObject keys , String name ){
ensureIndex( keys , name , false );
}
@ -517,8 +530,7 @@ public abstract class DBCollection {
* @param unique if the index should be unique
* @throws MongoException
*/
public void ensureIndex( DBObject keys , String name , boolean unique )
throws MongoException {
public void ensureIndex( DBObject keys , String name , boolean unique ){
DBObject options = defaultOptions( keys );
if (name != null && name.length()>0)
options.put( "name" , name );
@ -533,8 +545,7 @@ public abstract class DBCollection {
* @param optionsIN options for the index (name, unique, etc)
* @throws MongoException
*/
public void ensureIndex( final DBObject keys , final DBObject optionsIN )
throws MongoException {
public void ensureIndex( final DBObject keys , final DBObject optionsIN ){
if ( checkReadOnly( false ) ) return;
@ -645,8 +656,7 @@ public abstract class DBCollection {
* @return the object found, or <code>null</code> if the collection is empty
* @throws MongoException
*/
public DBObject findOne()
throws MongoException {
public DBObject findOne(){
return findOne( new BasicDBObject() );
}
@ -656,9 +666,8 @@ public abstract class DBCollection {
* @return the object found, or <code>null</code> if no such object exists
* @throws MongoException
*/
public DBObject findOne( DBObject o )
throws MongoException {
return findOne( o, null, getReadPreference());
public DBObject findOne( DBObject o ){
return findOne( o, null, null, getReadPreference());
}
/**
@ -666,20 +675,58 @@ public abstract class DBCollection {
* @param o the query object
* @param fields fields to return
* @return the object found, or <code>null</code> if no such object exists
* @throws MongoException
* @dochub find
*/
public DBObject findOne( DBObject o, DBObject fields ) {
return findOne( o, fields, getReadPreference());
return findOne( o, fields, null, getReadPreference());
}
/**
* Returns a single obejct from this collection matching the query.
* @param o the query object
* @param fields fields to return
* @param orderBy fields to order by
* @return the object found, or <code>null</code> if no such object exists
* @throws MongoException
* @dochub find
*/
public DBObject findOne( DBObject o, DBObject fields, DBObject orderBy){
return findOne(o, fields, orderBy, getReadPreference());
}
/**
* Returns a single object from this collection matching the query.
* @param o the query object
* @param fields fields to return
* @param readPref
* @return the object found, or <code>null</code> if no such object exists
* @throws MongoException
* @dochub find
*/
public DBObject findOne( DBObject o, DBObject fields, ReadPreference readPref ) {
Iterator<DBObject> i = __find( o , fields , 0 , -1 , 0, getOptions(), readPref, getDecoder() );
public DBObject findOne( DBObject o, DBObject fields, ReadPreference readPref ){
return findOne(o, fields, null, readPref);
}
/**
* Returns a single object from this collection matching the query.
* @param o the query object
* @param fields fields to return
* @param orderBy fields to order by
* @return the object found, or <code>null</code> if no such object exists
* @throws MongoException
* @dochub find
*/
public DBObject findOne( DBObject o, DBObject fields, DBObject orderBy, ReadPreference readPref ){
QueryOpBuilder queryOpBuilder = new QueryOpBuilder().addQuery(o).addOrderBy(orderBy);
if (getDB().getMongo().isMongosConnection()) {
queryOpBuilder.addReadPreference(readPref.toDBObject());
}
Iterator<DBObject> i = __find(queryOpBuilder.get(), fields , 0 , -1 , 0, getOptions(), readPref, getDecoder() );
DBObject obj = (i.hasNext() ? i.next() : null);
if ( obj != null && ( fields != null && fields.keySet().size() > 0 ) ){
obj.markAsPartialObject();
@ -733,8 +780,9 @@ public abstract class DBCollection {
* @param jo the <code>DBObject</code> to save
* will add <code>_id</code> field to jo if needed
* @return
* @throws MongoException
*/
public WriteResult save( DBObject jo ) {
public WriteResult save( DBObject jo ){
return save(jo, getWriteConcern());
}
@ -745,8 +793,7 @@ public abstract class DBCollection {
* @return
* @throws MongoException
*/
public WriteResult save( DBObject jo, WriteConcern concern )
throws MongoException {
public WriteResult save( DBObject jo, WriteConcern concern ){
if ( checkReadOnly( true ) )
return null;
@ -777,8 +824,7 @@ public abstract class DBCollection {
* Drops all indices from this collection
* @throws MongoException
*/
public void dropIndexes()
throws MongoException {
public void dropIndexes(){
dropIndexes( "*" );
}
@ -788,8 +834,7 @@ public abstract class DBCollection {
* @param name the index name
* @throws MongoException
*/
public void dropIndexes( String name )
throws MongoException {
public void dropIndexes( String name ){
DBObject cmd = BasicDBObjectBuilder.start()
.add( "deleteIndexes" , getName() )
.add( "index" , name )
@ -806,8 +851,7 @@ public abstract class DBCollection {
* Drops (deletes) this collection. Use with care.
* @throws MongoException
*/
public void drop()
throws MongoException {
public void drop(){
resetIndexCache();
CommandResult res =_db.command( BasicDBObjectBuilder.start().add( "drop" , getName() ).get() );
if (res.ok() || res.getErrorMessage().equals( "ns not found" ))
@ -820,8 +864,7 @@ public abstract class DBCollection {
* @return
* @throws MongoException
*/
public long count()
throws MongoException {
public long count(){
return getCount(new BasicDBObject(), null);
}
@ -831,33 +874,52 @@ public abstract class DBCollection {
* @return
* @throws MongoException
*/
public long count(DBObject query)
throws MongoException {
public long count(DBObject query){
return getCount(query, null);
}
/**
* returns the number of documents that match a query.
* @param query query to match
* @param readPrefs ReadPreferences for this query
* @return
* @throws MongoException
*/
public long count(DBObject query, ReadPreference readPrefs ){
return getCount(query, null, readPrefs);
}
/**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject)} with an empty query and null fields.
* @return number of documents that match query
* @throws MongoException
*/
public long getCount()
throws MongoException {
public long getCount(){
return getCount(new BasicDBObject(), null);
}
/**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.ReadPreference)} with empty query and null fields.
* @param readPrefs ReadPreferences for this command
* @return number of documents that match query
* @throws MongoException
*/
public long getCount(ReadPreference readPrefs){
return getCount(new BasicDBObject(), null, readPrefs);
}
/**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject)} with null fields.
* @param query query to match
* @return
* @throws MongoException
*/
public long getCount(DBObject query)
throws MongoException {
public long getCount(DBObject query){
return getCount(query, null);
}
/**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, long, long)} with limit=0 and skip=0
* @param query query to match
@ -865,11 +927,35 @@ public abstract class DBCollection {
* @return
* @throws MongoException
*/
public long getCount(DBObject query, DBObject fields)
throws MongoException {
public long getCount(DBObject query, DBObject fields){
return getCount( query , fields , 0 , 0 );
}
/**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, long, long, com.massivecraft.mcore.xlib.mongodb.ReadPreference)} with limit=0 and skip=0
* @param query query to match
* @param fields fields to return
* @param readPrefs ReadPreferences for this command
* @return
* @throws MongoException
*/
public long getCount(DBObject query, DBObject fields, ReadPreference readPrefs){
return getCount( query , fields , 0 , 0, readPrefs );
}
/**
* calls {@link DBCollection#getCount(com.massivecraft.mcore.xlib.mongodb.DBObject, com.massivecraft.mcore.xlib.mongodb.DBObject, long, long, com.massivecraft.mcore.xlib.mongodb.ReadPreference)} with the DBCollection's ReadPreference
* @param query query to match
* @param fields fields to return
* @param limit limit the count to this value
* @param skip skip number of entries to skip
* @return
* @throws MongoException
*/
public long getCount(DBObject query, DBObject fields, long limit, long skip){
return getCount(query, fields, limit, skip, getReadPreference());
}
/**
* Returns the number of documents in the collection
* that match the specified query
@ -878,12 +964,12 @@ public abstract class DBCollection {
* @param fields fields to return
* @param limit limit the count to this value
* @param skip number of entries to skip
* @param readPrefs ReadPreferences for this command
* @return number of documents that match query and fields
* @throws MongoException
*/
public long getCount(DBObject query, DBObject fields, long limit, long skip )
throws MongoException {
public long getCount(DBObject query, DBObject fields, long limit, long skip, ReadPreference readPrefs ){
BasicDBObject cmd = new BasicDBObject();
cmd.put("count", getName());
cmd.put("query", query);
@ -896,8 +982,7 @@ public abstract class DBCollection {
if ( skip > 0 )
cmd.put( "skip" , skip );
CommandResult res = _db.command(cmd,getOptions());
CommandResult res = _db.command(cmd,getOptions(),readPrefs);
if ( ! res.ok() ){
String errmsg = res.getErrorMessage();
@ -913,14 +998,17 @@ public abstract class DBCollection {
return res.getLong("n");
}
CommandResult command(DBObject cmd, int options, ReadPreference readPrefs){
return _db.command(cmd,getOptions(),readPrefs);
}
/**
* Calls {@link DBCollection#rename(java.lang.String, boolean)} with dropTarget=false
* @param newName new collection name (not a full namespace)
* @return the new collection
* @throws MongoException
*/
public DBCollection rename( String newName )
throws MongoException {
public DBCollection rename( String newName ){
return rename(newName, false);
}
@ -931,8 +1019,7 @@ public abstract class DBCollection {
* @return the new collection
* @throws MongoException
*/
public DBCollection rename( String newName, boolean dropTarget )
throws MongoException {
public DBCollection rename( String newName, boolean dropTarget ){
CommandResult ret =
_db.getSisterDB( "admin" )
.command( BasicDBObjectBuilder.start()
@ -955,8 +1042,7 @@ public abstract class DBCollection {
* @throws MongoException
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/
public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce )
throws MongoException {
public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce ){
return group( key , cond , initial , reduce , null );
}
@ -971,12 +1057,28 @@ public abstract class DBCollection {
* @throws MongoException
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/
public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce , String finalize )
throws MongoException {
public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce , String finalize ){
GroupCommand cmd = new GroupCommand(this, key, cond, initial, reduce, finalize);
return group( cmd );
}
/**
* Applies a group operation
* @param key - { a : true }
* @param cond - optional condition on query
* @param reduce javascript reduce function
* @param initial initial value for first match on a key
* @param finalize An optional function that can operate on the result(s) of the reduce function.
* @param readPrefs ReadPreferences for this command
* @return
* @throws MongoException
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/
public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce , String finalize, ReadPreference readPrefs ){
GroupCommand cmd = new GroupCommand(this, key, cond, initial, reduce, finalize);
return group( cmd, readPrefs );
}
/**
* Applies a group operation
* @param cmd the group command
@ -985,12 +1087,23 @@ public abstract class DBCollection {
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/
public DBObject group( GroupCommand cmd ) {
CommandResult res = _db.command( cmd.toDBObject(), getOptions() );
return group(cmd, getReadPreference());
}
/**
* Applies a group operation
* @param cmd the group command
* @param readPrefs ReadPreferences for this command
* @return
* @throws MongoException
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/
public DBObject group( GroupCommand cmd, ReadPreference readPrefs ) {
CommandResult res = _db.command( cmd.toDBObject(), getOptions(), readPrefs );
res.throwOnError();
return (DBObject)res.get( "retval" );
}
/**
* @deprecated prefer the {@link DBCollection#group(com.massivecraft.mcore.xlib.mongodb.GroupCommand)} which is more standard
* Applies a group operation
@ -1000,10 +1113,9 @@ public abstract class DBCollection {
* @see <a href="http://www.mongodb.org/display/DOCS/Aggregation">http://www.mongodb.org/display/DOCS/Aggregation</a>
*/
@Deprecated
public DBObject group( DBObject args )
throws MongoException {
public DBObject group( DBObject args ){
args.put( "ns" , getName() );
CommandResult res = _db.command( new BasicDBObject( "group" , args ), getOptions() );
CommandResult res = _db.command( new BasicDBObject( "group" , args ), getOptions(), getReadPreference() );
res.throwOnError();
return (DBObject)res.get( "retval" );
}
@ -1012,27 +1124,50 @@ public abstract class DBCollection {
* find distinct values for a key
* @param key
* @return
* @throws MongoException
*/
@SuppressWarnings("rawtypes")
public List distinct( String key ){
return distinct( key , new BasicDBObject() );
}
/**
* find distinct values for a key
* @param key
* @param readPrefs
* @return
* @throws MongoException
*/
public List distinct( String key, ReadPreference readPrefs ){
return distinct( key , new BasicDBObject(), readPrefs );
}
/**
* find distinct values for a key
* @param key
* @param query query to match
* @return
* @throws MongoException
*/
@SuppressWarnings("rawtypes")
public List distinct( String key , DBObject query ){
return distinct(key, query, getReadPreference());
}
/**
* find distinct values for a key
* @param key
* @param query query to match
* @param readPrefs
* @return
* @throws MongoException
*/
public List distinct( String key , DBObject query, ReadPreference readPrefs ){
DBObject c = BasicDBObjectBuilder.start()
.add( "distinct" , getName() )
.add( "key" , key )
.add( "query" , query )
.get();
CommandResult res = _db.command( c, getOptions() );
CommandResult res = _db.command( c, getOptions(), readPrefs );
res.throwOnError();
return (List)(res.get( "values" ));
}
@ -1053,7 +1188,7 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub mapreduce
*/
public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , DBObject query ) throws MongoException{
public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , DBObject query ){
return mapReduce( new MapReduceCommand( this , map , reduce , outputTarget , MapReduceCommand.OutputType.REPLACE, query ) );
}
@ -1080,11 +1215,41 @@ public abstract class DBCollection {
* @throws MongoException
* @dochub mapreduce
*/
public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , MapReduceCommand.OutputType outputType , DBObject query )
throws MongoException{
public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , MapReduceCommand.OutputType outputType , DBObject query ){
return mapReduce( new MapReduceCommand( this , map , reduce , outputTarget , outputType , query ) );
}
/**
* performs a map reduce operation
* Specify an outputType to control job execution
* * INLINE - Return results inline
* * REPLACE - Replace the output collection with the job output
* * MERGE - Merge the job output with the existing contents of outputTarget
* * REDUCE - Reduce the job output with the existing contents of
* outputTarget
*
* @param map
* map function in javascript code
* @param outputTarget
* optional - leave null if want to use temp collection
* @param outputType
* set the type of job output
* @param reduce
* reduce function in javascript code
* @param query
* to match
* @param readPrefs
* ReadPreferences for this operation
* @return
* @throws MongoException
* @dochub mapreduce
*/
public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , MapReduceCommand.OutputType outputType , DBObject query, ReadPreference readPrefs ){
MapReduceCommand command = new MapReduceCommand( this , map , reduce , outputTarget , outputType , query );
command.setReadPreference(readPrefs);
return mapReduce( command );
}
/**
* performs a map reduce operation
*
@ -1093,14 +1258,10 @@ public abstract class DBCollection {
* @return
* @throws MongoException
*/
public MapReduceOutput mapReduce( MapReduceCommand command ) throws MongoException{
public MapReduceOutput mapReduce( MapReduceCommand command ){
DBObject cmd = command.toDBObject();
// if type in inline, then query options like slaveOk is fine
CommandResult res = null;
if (command.getOutputType() == MapReduceCommand.OutputType.INLINE)
res = _db.command( cmd, getOptions(), command.getReadPreference() != null ? command.getReadPreference() : getReadPreference() );
else
res = _db.command( cmd );
CommandResult res = _db.command( cmd, getOptions(), command.getReadPreference() != null ? command.getReadPreference() : getReadPreference() );
res.throwOnError();
return new MapReduceOutput( this , cmd, res );
}
@ -1113,19 +1274,47 @@ public abstract class DBCollection {
* @return
* @throws MongoException
*/
public MapReduceOutput mapReduce( DBObject command ) throws MongoException{
public MapReduceOutput mapReduce( DBObject command ){
if ( command.get( "mapreduce" ) == null && command.get( "mapReduce" ) == null )
throw new IllegalArgumentException( "need mapreduce arg" );
CommandResult res = _db.command( command );
CommandResult res = _db.command( command, getOptions(), getReadPreference() );
res.throwOnError();
return new MapReduceOutput( this , command, res );
}
/**
* performs an aggregation operation
*
* @param firstOp
* requisite first operation to be performed in the aggregation pipeline
*
* @param additionalOps
* additional operations to be performed in the aggregation pipeline
* @return The aggregation operation's result set
*
*/
public AggregationOutput aggregate( DBObject firstOp, DBObject ... additionalOps){
if (firstOp == null)
throw new IllegalArgumentException("aggregate can not accept null pipeline operation");
DBObject command = new BasicDBObject("aggregate", _name );
List<DBObject> pipelineOps = new ArrayList<DBObject>();
pipelineOps.add(firstOp);
Collections.addAll(pipelineOps, additionalOps);
command.put( "pipeline", pipelineOps );
CommandResult res = _db.command( command, getOptions(), getReadPreference() );
res.throwOnError();
return new AggregationOutput( command, res );
}
/**
* Return a list of the indexes for this collection. Each object
* in the list is the "info document" from MongoDB
*
* @return list of index documents
* @throws MongoException
*/
public List<DBObject> getIndexInfo() {
BasicDBObject cmd = new BasicDBObject();
@ -1147,8 +1336,7 @@ public abstract class DBCollection {
* @param keys keys of the index
* @throws MongoException
*/
public void dropIndex( DBObject keys )
throws MongoException {
public void dropIndex( DBObject keys ){
dropIndexes( genIndexName( keys ) );
}
@ -1157,27 +1345,28 @@ public abstract class DBCollection {
* @param name name of index to drop
* @throws MongoException
*/
public void dropIndex( String name )
throws MongoException {
public void dropIndex( String name ){
dropIndexes( name );
}
/**
* gets the collections statistics ("collstats" command)
* @return
* @throws MongoException
*/
public CommandResult getStats() {
return getDB().command(new BasicDBObject("collstats", getName()), getOptions());
return getDB().command(new BasicDBObject("collstats", getName()), getOptions(), getReadPreference());
}
/**
* returns whether or not this is a capped collection
* @return
* @throws MongoException
*/
public boolean isCapped() {
CommandResult stats = getStats();
Object capped = stats.get("capped");
return(capped != null && (Integer)capped == 1);
return(capped != null && ( capped.equals(1) || capped.equals(true) ) );
}
// ------
@ -1214,6 +1403,9 @@ public abstract class DBCollection {
* Checks key strings for invalid characters.
*/
private void _checkKeys( DBObject o ) {
if ( o instanceof LazyDBObject || o instanceof LazyDBList )
return;
for ( String s : o.keySet() ){
validateKey ( s );
Object inner = o.get( s );
@ -1329,7 +1521,6 @@ public abstract class DBCollection {
* @param c the class
* @throws IllegalArgumentException if <code>c</code> is not a DBObject
*/
@SuppressWarnings("rawtypes")
public void setObjectClass( Class c ){
if ( c == null ){
// reset
@ -1351,7 +1542,6 @@ public abstract class DBCollection {
* Gets the default class for objects in the collection
* @return the class
*/
@SuppressWarnings("rawtypes")
public Class getObjectClass(){
return _objectClass;
}
@ -1361,7 +1551,6 @@ public abstract class DBCollection {
* @param path
* @param c
*/
@SuppressWarnings("rawtypes")
public void setInternalClass( String path , Class c ){
_internalClass.put( path , c );
}
@ -1371,7 +1560,6 @@ public abstract class DBCollection {
* @param path
* @return
*/
@SuppressWarnings("rawtypes")
protected Class getInternalClass( String path ){
Class c = _internalClass.get( path );
if ( c != null )
@ -1427,8 +1615,8 @@ public abstract class DBCollection {
/**
* makes this query ok to run on a slave node
*
* @deprecated Replaced with ReadPreference.SECONDARY
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference.SECONDARY
* @deprecated Replaced with {@code ReadPreference.secondaryPreferred()}
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference#secondaryPreferred()
*/
@Deprecated
public void slaveOk(){
@ -1512,9 +1700,7 @@ public abstract class DBCollection {
private DBEncoderFactory _encoderFactory;
final Bytes.OptionHolder _options;
@SuppressWarnings("rawtypes")
protected Class _objectClass = null;
@SuppressWarnings("rawtypes")
private Map<String,Class> _internalClass = Collections.synchronizedMap( new HashMap<String,Class>() );
private ReflectionDBObject.JavaWrapper _wrapper = null;

View File

@ -47,7 +47,7 @@ public interface DBConnector {
* @return the write result
* @throws MongoException
*/
public WriteResult say( DB db , OutMessage m , WriteConcern concern ) throws MongoException;
public WriteResult say( DB db , OutMessage m , WriteConcern concern );
/**
* does a write operation
* @param db the database
@ -57,7 +57,7 @@ public interface DBConnector {
* @return the write result
* @throws MongoException
*/
public WriteResult say( DB db , OutMessage m , WriteConcern concern , ServerAddress hostNeeded ) throws MongoException;
public WriteResult say( DB db , OutMessage m , WriteConcern concern , ServerAddress hostNeeded );
/**
* does a read operation on the database
@ -70,7 +70,7 @@ public interface DBConnector {
* @throws MongoException
*/
public Response call( DB db , DBCollection coll , OutMessage m ,
ServerAddress hostNeeded , DBDecoder decoder ) throws MongoException;
ServerAddress hostNeeded , DBDecoder decoder );
/**
*
* does a read operation on the database
@ -82,7 +82,7 @@ public interface DBConnector {
* @return the read result
* @throws MongoException
*/
public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries ) throws MongoException;
public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries );
/**
* does a read operation on the database
@ -96,11 +96,21 @@ public interface DBConnector {
* @return the read result
* @throws MongoException
*/
public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries , ReadPreference readPref , DBDecoder decoder ) throws MongoException;
public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries , ReadPreference readPref , DBDecoder decoder );
/**
* returns true if the connector is in a usable state
* @return
*/
public boolean isOpen();
/**
* Authenticate using the given credentials.
*
* @param credentials the credentials.
* @return the result of the authentication command, if successful
* @throws CommandFailureException if the authentication failed
* @since 2.11.0
*/
public CommandResult authenticate(MongoCredential credentials);
}

View File

@ -18,11 +18,14 @@
package com.massivecraft.mcore.xlib.mongodb;
import java.io.Closeable;
import java.util.*;
import com.massivecraft.mcore.xlib.mongodb.DBApiLayer.Result;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
/** An iterator over database results.
* Doing a <code>find()</code> query on a collection returns a
@ -158,7 +161,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
/**
* Informs the database of an indexed field of the collection in order to improve performance.
* @param indexName the name of an index
* @return same DBCursort for chaining operations
* @return same DBCursor for chaining operations
*/
public DBCursor hint( String indexName ){
if ( _it != null )
@ -195,6 +198,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* "n" : the number of records that the database returned
* "millis" : how long it took the database to execute the query
* @return a <code>DBObject</code>
* @throws MongoException
* @dochub explain
*/
public DBObject explain(){
@ -261,7 +265,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* Discards a given number of elements at the beginning of the cursor.
* @param n the number of elements to skip
* @return a cursor pointing to the new first element of the results
* @throws RuntimeException if the cursor has started to be iterated through
* @throws IllegalStateException if the cursor has started to be iterated through
*/
public DBCursor skip( int n ){
if ( _it != null )
@ -294,8 +298,8 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
*
* @return a copy of the same cursor (for chaining)
*
* @deprecated Replaced with ReadPreference.SECONDARY
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference.SECONDARY
* @deprecated Replaced with {@code ReadPreference.secondaryPreferred()}
* @see ReadPreference#secondaryPreferred()
*/
@Deprecated
public DBCursor slaveOk(){
@ -342,31 +346,27 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
// ---- internal stuff ------
private void _check()
throws MongoException {
if ( _it != null )
private void _check() {
if (_it != null)
return;
_lookForHints();
DBObject foo = _query;
if (hasSpecialQueryFields()) {
foo = _specialFields == null ? new BasicDBObject() : _specialFields;
QueryOpBuilder builder = new QueryOpBuilder()
.addQuery(_query)
.addOrderBy(_orderBy)
.addHint(_hintDBObj)
.addHint(_hint)
.addExplain(_explain)
.addSnapshot(_snapshot)
.addSpecialFields(_specialFields);
_addToQueryObject(foo, "query", _query, true);
_addToQueryObject(foo, "orderby", _orderBy, false);
if (_hint != null)
_addToQueryObject(foo, "$hint", _hint);
if (_hintDBObj != null)
_addToQueryObject(foo, "$hint", _hintDBObj);
if (_explain)
foo.put("$explain", true);
if (_snapshot)
foo.put("$snapshot", true);
if (_collection.getDB().getMongo().isMongosConnection()) {
builder.addReadPreference(_readPref.toDBObject());
}
_it = _collection.__find(foo, _keysWanted, _skip, _batchSize, _limit, _options, _readPref, getDecoder());
_it = _collection.__find(builder.get(), _keysWanted, _skip, _batchSize, _limit,
_options, _readPref, getDecoder());
}
// Only create a new decoder if there is a decoder factory explicitly set on the collection. Otherwise return null
@ -400,37 +400,6 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
}
}
boolean hasSpecialQueryFields(){
if ( _specialFields != null )
return true;
if ( _orderBy != null && _orderBy.keySet().size() > 0 )
return true;
if ( _hint != null || _hintDBObj != null || _snapshot )
return true;
return _explain;
}
void _addToQueryObject( DBObject query , String field , DBObject thing , boolean sendEmpty ){
if ( thing == null )
return;
if ( ! sendEmpty && thing.keySet().size() == 0 )
return;
_addToQueryObject( query , field , thing );
}
void _addToQueryObject( DBObject query , String field , Object thing ){
if ( thing == null )
return;
query.put( field , thing );
}
void _checkType( CursorType type ){
if ( _cursorType == null ){
_cursorType = type;
@ -443,8 +412,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
throw new IllegalArgumentException( "can't switch cursor access methods" );
}
private DBObject _next()
throws MongoException {
private DBObject _next() {
if ( _cursorType == null )
_checkType( CursorType.ITERATOR );
@ -487,8 +455,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
throw new IllegalArgumentException("_it not a real result" );
}
private boolean _hasNext()
throws MongoException {
private boolean _hasNext() {
_check();
if ( _limit > 0 && _num >= _limit )
@ -512,7 +479,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return
* @throws MongoException
*/
public boolean hasNext() throws MongoException {
public boolean hasNext() {
_checkType( CursorType.ITERATOR );
return _hasNext();
}
@ -522,14 +489,14 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return the next element
* @throws MongoException
*/
public DBObject next() throws MongoException {
public DBObject next() {
_checkType( CursorType.ITERATOR );
return _next();
}
/**
* Returns the element the cursor is at.
* @return the next element
* @return the current element
*/
public DBObject curr(){
_checkType( CursorType.ITERATOR );
@ -546,8 +513,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
// ---- array api -----
void _fill( int n )
throws MongoException {
void _fill( int n ){
_checkType( CursorType.ARRAY );
while ( n >= _all.size() && _hasNext() )
_next();
@ -561,8 +527,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return the number of elements in the array
* @throws MongoException
*/
public int length()
throws MongoException {
public int length() {
_checkType( CursorType.ARRAY );
_fill( Integer.MAX_VALUE );
return _all.size();
@ -573,8 +538,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return an array of elements
* @throws MongoException
*/
public List<DBObject> toArray()
throws MongoException {
public List<DBObject> toArray(){
return toArray( Integer.MAX_VALUE );
}
@ -584,8 +548,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return an array of objects
* @throws MongoException
*/
public List<DBObject> toArray( int max )
throws MongoException {
public List<DBObject> toArray( int max ) {
_checkType( CursorType.ARRAY );
_fill( max - 1 );
return _all;
@ -596,6 +559,7 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* Iterates cursor and counts objects
* @see #count()
* @return num objects
* @throws MongoException
*/
public int itcount(){
int n = 0;
@ -613,14 +577,13 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return the number of objects
* @throws MongoException
*/
public int count()
throws MongoException {
public int count() {
if ( _collection == null )
throw new IllegalArgumentException( "why is _collection null" );
if ( _collection._db == null )
throw new IllegalArgumentException( "why is _collection._db null" );
return (int)_collection.getCount(this._query, this._keysWanted);
return (int)_collection.getCount(this._query, this._keysWanted, getReadPreference());
}
/**
@ -630,14 +593,13 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
* @return the number of objects
* @throws MongoException
*/
public int size()
throws MongoException {
public int size() {
if ( _collection == null )
throw new IllegalArgumentException( "why is _collection null" );
if ( _collection._db == null )
throw new IllegalArgumentException( "why is _collection._db null" );
return (int)_collection.getCount(this._query, this._keysWanted, this._limit, this._skip );
return (int)_collection.getCount(this._query, this._keysWanted, this._limit, this._skip, getReadPreference() );
}
@ -731,6 +693,13 @@ public class DBCursor implements Iterator<DBObject> , Iterable<DBObject>, Closea
return sb.toString();
}
boolean hasFinalizer() {
if (_it == null || ! (_it instanceof Result)) {
return false;
}
return ((Result) _it).hasFinalizer();
}
// ---- query setup ----
private final DBCollection _collection;
private final DBObject _query;

View File

@ -20,14 +20,32 @@ package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.mongodb.util.ThreadUtil;
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.GSSName;
import org.ietf.jgss.Oid;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
import java.util.logging.Logger;
@ -36,6 +54,7 @@ import java.util.logging.Logger;
* Methods implemented at the port level should throw the raw exceptions like IOException,
* so that the connector above can make appropriate decisions on how to handle.
*/
@SuppressWarnings({"unused"})
public class DBPort {
/**
@ -50,6 +69,7 @@ public class DBPort {
* creates a new DBPort
* @param addr the server address
*/
@SuppressWarnings("deprecation")
public DBPort( ServerAddress addr ){
this( addr , null , new MongoOptions() );
}
@ -70,12 +90,8 @@ public class DBPort {
return go( msg, coll );
}
Response call( OutMessage msg , DBCollection coll , DBDecoder decoder) throws IOException{
return go( msg, coll, false, null, decoder);
}
Response call( OutMessage msg , DBCollection coll , ReadPreference readPref , DBDecoder decoder) throws IOException{
return go( msg, coll, false, readPref, decoder);
Response call(OutMessage msg, DBCollection coll, DBDecoder decoder) throws IOException{
return go( msg, coll, false, decoder);
}
void say( OutMessage msg )
@ -85,14 +101,14 @@ public class DBPort {
private synchronized Response go( OutMessage msg , DBCollection coll )
throws IOException {
return go( msg , coll , false, null, null );
return go( msg , coll , false, null );
}
private synchronized Response go( OutMessage msg , DBCollection coll , DBDecoder decoder ) throws IOException{
return go( msg, coll, false, null, decoder );
return go( msg, coll, false, decoder );
}
private synchronized Response go( OutMessage msg , DBCollection coll , boolean forceReponse , ReadPreference readPref, DBDecoder decoder)
private synchronized Response go(OutMessage msg, DBCollection coll, boolean forceResponse, DBDecoder decoder)
throws IOException {
if ( _processingResponse ){
@ -105,7 +121,7 @@ public class DBPort {
}
}
_calls++;
_calls.incrementAndGet();
if ( _socket == null )
_open();
@ -115,12 +131,13 @@ public class DBPort {
try {
msg.prepare();
_activeState = new ActiveState(msg);
msg.pipe( _out );
if ( _pool != null )
_pool._everWorked = true;
if ( coll == null && ! forceReponse )
if ( coll == null && ! forceResponse )
return null;
_processingResponse = true;
@ -131,6 +148,7 @@ public class DBPort {
throw ioe;
}
finally {
_activeState = null;
_processingResponse = false;
}
}
@ -141,20 +159,17 @@ public class DBPort {
}
synchronized private Response findOne( DB db , String coll , DBObject q ) throws IOException {
OutMessage msg = OutMessage.query( db._mongo , 0 , db.getName() + "." + coll , 0 , -1 , q , null );
OutMessage msg = OutMessage.query( db.getCollection(coll) , 0 , 0 , -1 , q , null );
try {
Response res = go( msg , db.getCollection( coll ) , null );
return res;
} finally {
msg.doneWithMessage();
}
@SuppressWarnings("unused")
synchronized private Response findOne( String ns , DBObject q ) throws IOException{
OutMessage msg = OutMessage.query( null , 0 , ns , 0 , -1 , q , null );
Response res = go( msg , null , true, null, null );
return res;
}
synchronized CommandResult runCommand( DB db , DBObject cmd ) throws IOException {
Response res = findOne( db , "$cmd" , cmd );
Response res = findOne(db, "$cmd", cmd);
return convertToCommandResult(cmd, res);
}
@ -168,16 +183,16 @@ public class DBPort {
if ( data == null )
throw new MongoInternalException( "something is wrong, no command result" );
CommandResult cr = new CommandResult(cmd, res.serverUsed());
CommandResult cr = new CommandResult(res.serverUsed());
cr.putAll( data );
return cr;
}
synchronized CommandResult tryGetLastError( DB db , long last, WriteConcern concern) throws IOException {
if ( last != _calls )
if ( last != _calls.get() )
return null;
return getLastError( db , concern );
return getLastError(db, concern);
}
/**
@ -193,8 +208,7 @@ public class DBPort {
_open();
}
boolean _open()
throws IOException {
void _open() throws IOException {
long sleepTime = 100;
@ -203,11 +217,9 @@ public class DBPort {
maxAutoConnectRetryTime = _options.maxAutoConnectRetryTime;
}
boolean successfullyConnected = false;
final long start = System.currentTimeMillis();
while ( true ){
IOException lastError = null;
do {
try {
_socket = _options.socketFactory.createSocket();
_socket.connect( _addr , _options.connectTimeout );
@ -217,30 +229,28 @@ public class DBPort {
_socket.setSoTimeout( _options.socketTimeout );
_in = new BufferedInputStream( _socket.getInputStream() );
_out = _socket.getOutputStream();
return true;
successfullyConnected = true;
}
catch ( IOException ioe ){
lastError = new IOException( "couldn't connect to [" + _addr + "] bc:" + ioe );
_logger.log( Level.INFO , "connect fail to : " + _addr , ioe );
catch ( IOException e ){
close();
}
if ( ! _options.autoConnectRetry || ( _pool != null && ! _pool._everWorked ) )
throw lastError;
if (!_options.autoConnectRetry || (_pool != null && !_pool._everWorked))
throw e;
long sleptSoFar = System.currentTimeMillis() - start;
long waitSoFar = System.currentTimeMillis() - start;
if ( sleptSoFar >= maxAutoConnectRetryTime )
throw lastError;
if (waitSoFar >= maxAutoConnectRetryTime)
throw e;
if ( sleepTime + sleptSoFar > maxAutoConnectRetryTime )
sleepTime = maxAutoConnectRetryTime - sleptSoFar;
if (sleepTime + waitSoFar > maxAutoConnectRetryTime)
sleepTime = maxAutoConnectRetryTime - waitSoFar;
_logger.severe( "going to sleep and retry. total sleep time after = " + ( sleptSoFar + sleptSoFar ) + "ms this time:" + sleepTime + "ms" );
ThreadUtil.sleep( sleepTime );
_logger.log(Level.WARNING, "Exception connecting to " + serverAddress().getHost() + ": " + e +
". Total wait time so far is " + waitSoFar + " ms. Will retry after sleeping for " + sleepTime + " ms.");
ThreadUtil.sleep(sleepTime);
sleepTime *= 2;
}
} while (!successfullyConnected);
}
@Override
@ -274,11 +284,19 @@ public class DBPort {
close();
}
ActiveState getActiveState() {
return _activeState;
}
int getLocalPort() {
return _socket != null ? _socket.getLocalPort() : -1;
}
/**
* closes the underlying connection and streams
*/
protected void close(){
_authed.clear();
authenticatedDatabases.clear();
if ( _socket != null ){
try {
@ -294,35 +312,40 @@ public class DBPort {
_socket = null;
}
void checkAuth( DB db ) throws IOException {
if ( db._username == null ){
if ( db._name.equals( "admin" ) )
return;
checkAuth( db._mongo.getDB( "admin" ) );
return;
CommandResult authenticate(Mongo mongo, final MongoCredential credentials) {
Authenticator authenticator;
if (credentials.getMechanism().equals(MongoCredential.MONGODB_CR_MECHANISM)) {
authenticator = new NativeAuthenticator(mongo, credentials);
} else if (credentials.getMechanism().equals(MongoCredential.GSSAPI_MECHANISM)) {
authenticator = new GSSAPIAuthenticator(mongo, credentials);
} else {
throw new IllegalArgumentException("Unsupported authentication protocol: " + credentials.getMechanism());
}
CommandResult res = authenticator.authenticate();
authenticatedDatabases.add(credentials.getSource());
return res;
}
if ( _authed.containsKey( db ) )
return;
CommandResult res = runCommand( db , new BasicDBObject( "getnonce" , 1 ) );
res.throwOnError();
void checkAuth(Mongo mongo) throws IOException {
// get the difference between the set of credentialed databases and the set of authenticated databases on this connection
Set<String> unauthenticatedDatabases = new HashSet<String>(mongo.getAuthority().getCredentialsStore().getDatabases());
unauthenticatedDatabases.removeAll(authenticatedDatabases);
DBObject temp = db._authCommand( res.getString( "nonce" ) );
res = runCommand( db , temp );
res.throwOnError();
_authed.put( db , true );
for (String databaseName : unauthenticatedDatabases) {
authenticate(mongo, mongo.getAuthority().getCredentialsStore().get(databaseName));
}
}
/**
* Gets the pool that this port belongs to
* @return
* Gets the pool that this port belongs to.
* @return the pool that this port belongs to.
*/
public DBPortPool getPool() {
return _pool;
}
private static Logger _rootLogger = Logger.getLogger( "com.mongodb.port" );
final int _hashCode;
final ServerAddress _sa;
final InetSocketAddress _addr;
@ -331,15 +354,217 @@ public class DBPort {
final Logger _logger;
final DBDecoder _decoder;
private Socket _socket;
private InputStream _in;
private OutputStream _out;
private volatile Socket _socket;
private volatile InputStream _in;
private volatile OutputStream _out;
private boolean _processingResponse;
private volatile boolean _processingResponse;
private Map<DB,Boolean> _authed = new ConcurrentHashMap<DB, Boolean>( );
int _lastThread;
long _calls = 0;
// needs synchronization to ensure that modifications are published.
final Set<String> authenticatedDatabases = Collections.synchronizedSet(new HashSet<String>());
private static Logger _rootLogger = Logger.getLogger( "com.mongodb.port" );
volatile int _lastThread;
final AtomicLong _calls = new AtomicLong();
private volatile ActiveState _activeState;
private volatile Boolean useCRAMAuthenticationProtocol;
class ActiveState {
ActiveState(final OutMessage outMessage) {
this.outMessage = outMessage;
this.startTime = System.nanoTime();
this.threadName = Thread.currentThread().getName();
}
final OutMessage outMessage;
final long startTime;
final String threadName;
}
class GenericSaslAuthenticator extends SaslAuthenticator {
static final String CRAM_MD5 = "CRAM-MD5";
private final String mechanism;
GenericSaslAuthenticator(final Mongo mongo, MongoCredential credentials, String mechanism) {
super(mongo, credentials);
this.mechanism = mechanism;
}
@Override
protected SaslClient createSaslClient() {
try {
return Sasl.createSaslClient(new String[]{mechanism},
credential.getUserName(), MONGODB_PROTOCOL,
serverAddress().getHost(), null, new CredentialsHandlingCallbackHandler());
} catch (SaslException e) {
throw new MongoException("Exception initializing SASL client", e);
}
}
@Override
protected DB getDatabase() {
return mongo.getDB(credential.getSource());
}
@Override
public String getMechanismName() {
return mechanism;
}
class CredentialsHandlingCallbackHandler implements CallbackHandler {
public void handle(final Callback[] callbacks) throws IOException, UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
NameCallback nameCallback = (NameCallback) callback;
nameCallback.setName(credential.getUserName());
}
if (callback instanceof PasswordCallback) {
PasswordCallback passwordCallback = (PasswordCallback) callback;
String hashedPassword = new String(NativeAuthenticationHelper.createHash(
credential.getUserName(), credential.getPassword()));
passwordCallback.setPassword(hashedPassword.toCharArray());
}
}
}
}
}
class GSSAPIAuthenticator extends SaslAuthenticator {
public static final String GSSAPI_OID = "1.2.840.113554.1.2.2";
public static final String GSSAPI_MECHANISM = MongoCredential.GSSAPI_MECHANISM;
GSSAPIAuthenticator(final Mongo mongo, final MongoCredential credentials) {
super(mongo, credentials);
if (!this.credential.getMechanism().equals(MongoCredential.GSSAPI_MECHANISM)) {
throw new MongoException("Incorrect mechanism: " + this.credential.getMechanism());
}
}
@Override
protected SaslClient createSaslClient() {
try {
Map<String, Object> props = new HashMap<String, Object>();
props.put(Sasl.CREDENTIALS, getGSSCredential(credential.getUserName()));
return Sasl.createSaslClient(new String[]{GSSAPI_MECHANISM}, credential.getUserName(), MONGODB_PROTOCOL,
serverAddress().getHost(), props, null);
} catch (SaslException e) {
throw new MongoException("Exception initializing SASL client", e);
} catch (GSSException e) {
throw new MongoException("Exception initializing GSSAPI credentials", e);
}
}
@Override
protected DB getDatabase() {
return mongo.getDB(credential.getSource());
}
@Override
public String getMechanismName() {
return "GSSAPI";
}
private GSSCredential getGSSCredential(String userName) throws GSSException {
Oid krb5Mechanism = new Oid(GSSAPI_OID);
GSSManager manager = GSSManager.getInstance();
GSSName name = manager.createName(userName, GSSName.NT_USER_NAME);
return manager.createCredential(name, GSSCredential.INDEFINITE_LIFETIME,
krb5Mechanism, GSSCredential.INITIATE_ONLY);
}
}
abstract class SaslAuthenticator extends Authenticator {
public static final String MONGODB_PROTOCOL = "mongodb";
SaslAuthenticator(final Mongo mongo, MongoCredential credentials) {
super(mongo, credentials);
}
public CommandResult authenticate() {
SaslClient saslClient = createSaslClient();
try {
byte[] response = (saslClient.hasInitialResponse() ? saslClient.evaluateChallenge(new byte[0]) : null);
CommandResult res = sendSaslStart(response);
res.throwOnError();
int conversationId = (Integer) res.get("conversationId");
while (! (Boolean) res.get("done")) {
response = saslClient.evaluateChallenge((byte[]) res.get("payload"));
if (response == null) {
throw new MongoException("SASL protocol error: no client response to challenge");
}
res = sendSaslContinue(conversationId, response);
res.throwOnError();
}
return res;
} catch (IOException e) {
throw new MongoException.Network("IOException authenticating the connection", e);
} finally {
try {
saslClient.dispose();
} catch (SaslException e) {
// ignore
}
}
}
protected abstract SaslClient createSaslClient();
protected abstract DB getDatabase();
private CommandResult sendSaslStart(final byte[] outToken) throws IOException {
DBObject cmd = new BasicDBObject("saslStart", 1).
append("mechanism", getMechanismName())
.append("payload", outToken != null ? outToken : new byte[0]);
return runCommand(getDatabase(), cmd);
}
private CommandResult sendSaslContinue(final int conversationId, final byte[] outToken) throws IOException {
DB adminDB = getDatabase();
DBObject cmd = new BasicDBObject("saslContinue", 1).append("conversationId", conversationId).
append("payload", outToken);
return runCommand(adminDB, cmd);
}
public abstract String getMechanismName();
}
class NativeAuthenticator extends Authenticator {
NativeAuthenticator(Mongo mongo, MongoCredential credentials) {
super(mongo, credentials);
}
@Override
public CommandResult authenticate() {
try {
DB db = mongo.getDB(credential.getSource());
CommandResult res = runCommand(db, NativeAuthenticationHelper.getNonceCommand());
res.throwOnError();
res = runCommand(db, NativeAuthenticationHelper.getAuthCommand(credential.getUserName(),
credential.getPassword(), res.getString("nonce")));
res.throwOnError();
return res;
} catch (IOException e) {
throw new MongoException.Network("IOException authenticating the connection", e);
}
}
}
abstract class Authenticator {
protected final Mongo mongo;
protected final MongoCredential credential;
Authenticator(Mongo mongo, MongoCredential credential) {
this.mongo = mongo;
this.credential = credential;
}
abstract CommandResult authenticate();
}
}

View File

@ -18,7 +18,12 @@
package com.massivecraft.mcore.xlib.mongodb;
import java.lang.management.ManagementFactory;
import com.massivecraft.mcore.xlib.mongodb.util.ConnectionPoolStatisticsBean;
import com.massivecraft.mcore.xlib.mongodb.util.SimplePool;
import com.massivecraft.mcore.xlib.mongodb.util.management.JMException;
import com.massivecraft.mcore.xlib.mongodb.util.management.MBeanServerFactory;
import java.io.InterruptedIOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@ -28,29 +33,37 @@ import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
import javax.management.JMException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import com.massivecraft.mcore.xlib.mongodb.util.SimplePool;
/**
* This class is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases.
*/
public class DBPortPool extends SimplePool<DBPort> {
public String getHost() {
return _addr.getHost();
}
public int getPort() {
return _addr.getPort();
}
public synchronized ConnectionPoolStatisticsBean getStatistics() {
return new ConnectionPoolStatisticsBean(getTotal(), getInUse(), getInUseConnections());
}
private InUseConnectionBean[] getInUseConnections() {
List<InUseConnectionBean> inUseConnectionInfoList = new ArrayList<InUseConnectionBean>();
long currentNanoTime = System.nanoTime();
for (DBPort port : _out) {
inUseConnectionInfoList.add(new InUseConnectionBean(port, currentNanoTime));
}
return inUseConnectionInfoList.toArray(new InUseConnectionBean[inUseConnectionInfoList.size()]);
}
static class Holder {
Holder( MongoOptions options ){
_options = options;
{
MBeanServer temp = null;
try {
temp = ManagementFactory.getPlatformMBeanServer();
}
catch ( Throwable t ){
}
_server = temp;
}
}
DBPortPool get( ServerAddress addr ){
@ -66,40 +79,46 @@ public class DBPortPool extends SimplePool<DBPort> {
return p;
}
p = new DBPortPool( addr , _options );
p = createPool(addr);
_pools.put( addr , p);
if ( _server != null ){
try {
ObjectName on = createObjectName( addr );
if ( _server.isRegistered( on ) ){
_server.unregisterMBean( on );
Bytes.LOGGER.log( Level.INFO , "multiple Mongo instances for same host, jmx numbers might be off" );
String on = createObjectName(addr);
if (MBeanServerFactory.getMBeanServer().isRegistered(on)) {
MBeanServerFactory.getMBeanServer().unregisterMBean(on);
Bytes.LOGGER.log(Level.INFO, "multiple Mongo instances for same host, jmx numbers might be off");
}
_server.registerMBean( p , on );
MBeanServerFactory.getMBeanServer().registerMBean(p, on);
} catch (JMException e) {
Bytes.LOGGER.log(Level.WARNING, "JMX registration error: " + e +
"\nConsider setting com.mongodb.MongoOptions.alwaysUseMBeans property to true." +
"\nContinuing...");
} catch (java.security.AccessControlException e) {
Bytes.LOGGER.log(Level.WARNING, "JMX registration error: " + e +
"\nContinuing...");
}
catch ( JMException e ){
Bytes.LOGGER.log( Level.WARNING , "jmx registration error: " + e + " continuing..." );
}
catch ( java.security.AccessControlException e ){
Bytes.LOGGER.log( Level.WARNING , "jmx registration error: " + e + " continuing..." );
}
}
}
return p;
}
private DBPortPool createPool(final ServerAddress addr) {
if (isJava5 || _options.isAlwaysUseMBeans()) {
return new Java5MongoConnectionPool(addr, _options);
} else {
return new MongoConnectionPool(addr, _options);
}
}
void close(){
synchronized ( _pools ){
for ( DBPortPool p : _pools.values() ){
p.close();
try {
ObjectName on = createObjectName( p._addr );
if ( _server.isRegistered( on ) ){
_server.unregisterMBean( on );
String on = createObjectName( p._addr );
if ( MBeanServerFactory.getMBeanServer().isRegistered(on) ){
MBeanServerFactory.getMBeanServer().unregisterMBean(on);
}
} catch ( JMException e ){
Bytes.LOGGER.log( Level.WARNING , "jmx de-registration error, continuing" , e );
@ -108,20 +127,24 @@ public class DBPortPool extends SimplePool<DBPort> {
}
}
private ObjectName createObjectName( ServerAddress addr ) throws MalformedObjectNameException {
private String createObjectName( ServerAddress addr ) {
String name = "com.mongodb:type=ConnectionPool,host=" + addr.toString().replace( ":" , ",port=" ) + ",instance=" + _serial;
if ( _options.description != null )
name += ",description=" + _options.description;
return new ObjectName( name );
return name;
}
static {
isJava5 = System.getProperty("java.version").startsWith("1.5");
}
final MongoOptions _options;
final Map<ServerAddress,DBPortPool> _pools = Collections.synchronizedMap( new HashMap<ServerAddress,DBPortPool>() );
final MBeanServer _server;
final int _serial = nextSerial.incrementAndGet();
// we use this to give each Holder a different mbean name
static AtomicInteger nextSerial = new AtomicInteger(0);
static final boolean isJava5;
}
// ----
@ -136,8 +159,13 @@ public class DBPortPool extends SimplePool<DBPort> {
public static class SemaphoresOut extends NoMoreConnection {
private static final long serialVersionUID = -4415279469780082174L;
private static final String message = "Concurrent requests for database connection have exceeded limit";
SemaphoresOut(){
super( "Out of semaphores to get db connection" );
super( message );
}
SemaphoresOut(int numPermits){
super( message + " of " + numPermits);
}
}
@ -151,7 +179,7 @@ public class DBPortPool extends SimplePool<DBPort> {
// ----
DBPortPool( ServerAddress addr , MongoOptions options ){
super( "DBPortPool-" + addr.toString() + ", options = " + options.toString() , options.connectionsPerHost , options.connectionsPerHost );
super( "DBPortPool-" + addr.toString() + ", options = " + options.toString() , options.connectionsPerHost );
_options = options;
_addr = addr;
_waitingSem = new Semaphore( _options.connectionsPerHost * _options.threadsAllowedToBlockForConnectionMultiplier );
@ -161,29 +189,32 @@ public class DBPortPool extends SimplePool<DBPort> {
return 0;
}
protected int pick( int iThink , boolean couldCreate ){
final int id = System.identityHashCode(Thread.currentThread());
final int s = _availSafe.size();
for ( int i=0; i<s; i++ ){
DBPort p = _availSafe.get(i);
if ( p._lastThread == id )
@Override
protected int pick( int recommended, boolean couldCreate ){
int id = System.identityHashCode(Thread.currentThread());
for (int i = _avail.size() - 1; i >= 0; i--){
if ( _avail.get(i)._lastThread == id )
return i;
}
if ( couldCreate )
return -1;
return iThink;
return couldCreate ? -1 : recommended;
}
public DBPort get(){
/**
* @return
* @throws MongoException
*/
@Override
public DBPort get() {
DBPort port = null;
if ( ! _waitingSem.tryAcquire() )
throw new SemaphoresOut();
throw new SemaphoresOut(_options.connectionsPerHost * _options.threadsAllowedToBlockForConnectionMultiplier);
try {
port = get( _options.maxWaitTime );
}
finally {
} catch (InterruptedException e) {
throw new MongoInterruptedException(e);
} finally {
_waitingSem.release();
}
@ -194,17 +225,17 @@ public class DBPortPool extends SimplePool<DBPort> {
return port;
}
void gotError( Exception e ){
if ( e instanceof java.nio.channels.ClosedByInterruptException ||
e instanceof InterruptedException ){
// return true if the exception is recoverable
boolean gotError( Exception e ){
if (e instanceof java.nio.channels.ClosedByInterruptException){
// this is probably a request that is taking too long
// so usually doesn't mean there is a real db problem
return;
return true;
}
if ( e instanceof java.net.SocketTimeoutException ){
// we don't want to clear the port pool for a connection timing out
return;
if ( e instanceof InterruptedIOException){
// we don't want to clear the port pool for a connection timing out or interrupted
return true;
}
Bytes.LOGGER.log( Level.WARNING , "emptying DBPortPool to " + getServerAddress() + " b/c of error" , e );
@ -212,10 +243,14 @@ public class DBPortPool extends SimplePool<DBPort> {
List<DBPort> all = new ArrayList<DBPort>();
while ( true ){
try {
DBPort temp = get(0);
if ( temp == null )
break;
all.add( temp );
} catch (InterruptedException interruptedException) {
throw new MongoInterruptedException(interruptedException);
}
}
for ( DBPort p : all ){
@ -223,20 +258,15 @@ public class DBPortPool extends SimplePool<DBPort> {
done(p);
}
return false;
}
void close(){
clear();
}
@Override
public void cleanup( DBPort p ){
p.close();
}
public boolean ok( DBPort t ){
return _addr.getSocketAddress().equals( t._addr );
}
@Override
protected DBPort createNew(){
return new DBPort( _addr , this , _options );
}

View File

@ -52,6 +52,7 @@ public class DBRef extends DBRefBase {
* @param db the database
* @param ref the reference
* @return
* @throws MongoException
*/
public static DBObject fetch(DB db, DBObject ref) {
String ns;

View File

@ -39,8 +39,9 @@ public class DBRefBase {
/**
* fetches the object referenced from the database
* @return
* @throws MongoException
*/
public DBObject fetch() {
public DBObject fetch() throws MongoException {
if (_loadedPointedTo)
return _pointedTo;
@ -84,16 +85,23 @@ public class DBRefBase {
}
@Override
public boolean equals(Object obj) {
if (obj == this)
return true;
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final DBRefBase dbRefBase = (DBRefBase) o;
if (_id != null ? !_id.equals(dbRefBase._id) : dbRefBase._id != null) return false;
if (_ns != null ? !_ns.equals(dbRefBase._ns) : dbRefBase._ns != null) return false;
if (obj instanceof DBRefBase) {
DBRefBase ref = (DBRefBase) obj;
if (_ns.equals(ref.getRef()) && _id.equals(ref.getId()))
return true;
}
return false;
@Override
public int hashCode() {
int result = _id != null ? _id.hashCode() : 0;
result = 31 * result + (_ns != null ? _ns.hashCode() : 0);
return result;
}
final Object _id;

View File

@ -18,71 +18,39 @@
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.mongodb.ReadPreference.TaggedReadPreference;
import java.io.IOException;
import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
import java.util.logging.Logger;
public class DBTCPConnector implements DBConnector {
static Logger _logger = Logger.getLogger( Bytes.LOGGER.getName() + ".tcp" );
static Logger _createLogger = Logger.getLogger( _logger.getName() + ".connect" );
public DBTCPConnector( Mongo m , ServerAddress addr )
throws MongoException {
_mongo = m;
_portHolder = new DBPortPool.Holder( m._options );
_checkAddress( addr );
_createLogger.info( addr.toString() );
setMasterAddress(addr);
_allHosts = null;
_rsStatus = null;
/**
* @param mongo the Mongo instance
* @throws MongoException
*/
public DBTCPConnector( Mongo mongo ) {
_mongo = mongo;
_portHolder = new DBPortPool.Holder( mongo._options );
MongoAuthority.Type type = mongo.getAuthority().getType();
if (type == MongoAuthority.Type.Direct) {
setMasterAddress(mongo.getAuthority().getServerAddresses().get(0));
} else if (type == MongoAuthority.Type.Set) {
_connectionStatus = new DynamicConnectionStatus(mongo, mongo.getAuthority().getServerAddresses());
} else {
throw new IllegalArgumentException("Unsupported authority type: " + type);
}
public DBTCPConnector( Mongo m , ServerAddress ... all )
throws MongoException {
this( m , Arrays.asList( all ) );
}
public DBTCPConnector( Mongo m , List<ServerAddress> all )
throws MongoException {
_mongo = m;
_portHolder = new DBPortPool.Holder( m._options );
_checkAddress( all );
_allHosts = new ArrayList<ServerAddress>( all ); // make a copy so it can't be modified
_rsStatus = new ReplicaSetStatus( m, _allHosts );
_createLogger.info( all + " -> " + getAddress() );
}
public void start() {
if (_rsStatus != null)
_rsStatus.start();
if (_connectionStatus != null) {
_connectionStatus.start();
}
private static ServerAddress _checkAddress( ServerAddress addr ){
if ( addr == null )
throw new NullPointerException( "address can't be null" );
return addr;
}
private static ServerAddress _checkAddress( List<ServerAddress> addrs ){
if ( addrs == null )
throw new NullPointerException( "addresses can't be null" );
if ( addrs.size() == 0 )
throw new IllegalArgumentException( "need to specify at least 1 address" );
return addrs.get(0);
}
/**
@ -98,7 +66,7 @@ public class DBTCPConnector implements DBConnector {
*/
@Override
public void requestStart(){
_myPort.get().requestStart();
_myPort.requestStart();
}
/**
@ -110,12 +78,16 @@ public class DBTCPConnector implements DBConnector {
*/
@Override
public void requestDone(){
_myPort.get().requestDone();
_myPort.requestDone();
}
/**
* @throws MongoException
*/
@Override
public void requestEnsureConnection(){
_myPort.get().requestEnsureConnection();
checkMaster( false , true );
_myPort.requestEnsureConnection();
}
void _checkClosed(){
@ -124,31 +96,47 @@ public class DBTCPConnector implements DBConnector {
}
WriteResult _checkWriteError( DB db, DBPort port , WriteConcern concern )
throws MongoException, IOException {
throws IOException{
CommandResult e = port.runCommand( db , concern.getCommand() );
e.throwOnError();
return new WriteResult( e , concern );
}
/**
* @param db
* @param m
* @param concern
* @return
* @throws MongoException
*/
@Override
public WriteResult say( DB db , OutMessage m , WriteConcern concern )
throws MongoException {
public WriteResult say( DB db , OutMessage m , WriteConcern concern ){
return say( db , m , concern , null );
}
/**
* @param db
* @param m
* @param concern
* @param hostNeeded
* @return
* @throws MongoException
*/
@Override
public WriteResult say( DB db , OutMessage m , WriteConcern concern , ServerAddress hostNeeded )
throws MongoException {
public WriteResult say( DB db , OutMessage m , WriteConcern concern , ServerAddress hostNeeded ){
if (concern == null) {
throw new IllegalArgumentException("Write concern is null");
}
_checkClosed();
checkMaster( false , true );
MyPort mp = _myPort.get();
DBPort port = mp.get( true , ReadPreference.PRIMARY, hostNeeded );
DBPort port = _myPort.get(true, ReadPreference.primary(), hostNeeded);
try {
port.checkAuth( db );
port.checkAuth( db.getMongo() );
port.say( m );
if ( concern.callGetLastError() ){
return _checkWriteError( db , port , concern );
@ -158,11 +146,11 @@ public class DBTCPConnector implements DBConnector {
}
}
catch ( IOException ioe ){
mp.error( port , ioe );
_myPort.error(port, ioe);
_error( ioe, false );
if ( concern.raiseNetworkErrors() )
throw new MongoException.Network( "can't say something" , ioe );
throw new MongoException.Network("Write operation to server " + port.host() + " failed on database " + db , ioe );
CommandResult res = new CommandResult(port.serverAddress());
res.put( "ok" , false );
@ -173,69 +161,107 @@ public class DBTCPConnector implements DBConnector {
throw me;
}
catch ( RuntimeException re ){
mp.error( port , re );
_myPort.error(port, re);
throw re;
}
finally {
mp.done( port );
_myPort.done(port);
m.doneWithMessage();
}
}
/**
* @param db
* @param coll
* @param m
* @param hostNeeded
* @param decoder
* @return
* @throws MongoException
*/
@Override
public Response call( DB db , DBCollection coll , OutMessage m, ServerAddress hostNeeded, DBDecoder decoder )
throws MongoException {
public Response call( DB db , DBCollection coll , OutMessage m, ServerAddress hostNeeded, DBDecoder decoder ){
return call( db , coll , m , hostNeeded , 2, null, decoder );
}
public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries ) throws MongoException {
/**
* @param db
* @param coll
* @param m
* @param hostNeeded
* @param retries
* @return
* @throws MongoException
*/
@Override
public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries ){
return call( db, coll, m, hostNeeded, retries, null, null);
}
/**
* @param db
* @param coll
* @param m
* @param hostNeeded
* @param readPref
* @param decoder
* @return
* @throws MongoException
*/
@Override
public Response call( DB db, DBCollection coll, OutMessage m, ServerAddress hostNeeded, int retries, ReadPreference readPref, DBDecoder decoder ) throws MongoException{
public Response call( DB db, DBCollection coll, OutMessage m, ServerAddress hostNeeded, int retries,
ReadPreference readPref, DBDecoder decoder ){
try {
return innerCall(db, coll, m, hostNeeded, retries, readPref, decoder);
} finally {
m.doneWithMessage();
}
}
// This method is recursive. It calls itself to implement query retry logic.
private Response innerCall(final DB db, final DBCollection coll, final OutMessage m, final ServerAddress hostNeeded,
final int retries, ReadPreference readPref, final DBDecoder decoder) {
if (readPref == null)
readPref = ReadPreference.PRIMARY;
readPref = ReadPreference.primary();
if (readPref == ReadPreference.PRIMARY && m.hasOption( Bytes.QUERYOPTION_SLAVEOK ))
readPref = ReadPreference.SECONDARY;
if (readPref == ReadPreference.primary() && m.hasOption( Bytes.QUERYOPTION_SLAVEOK ))
readPref = ReadPreference.secondaryPreferred();
boolean secondaryOk = !(readPref == ReadPreference.PRIMARY);
boolean secondaryOk = !(readPref == ReadPreference.primary());
_checkClosed();
// Don't check master on secondary reads unless connected to a replica set
if (!secondaryOk || getReplicaSetStatus() == null)
checkMaster( false, !secondaryOk );
final MyPort mp = _myPort.get();
final DBPort port = mp.get( false , readPref, hostNeeded );
final DBPort port = _myPort.get(false, readPref, hostNeeded);
Response res = null;
boolean retry = false;
try {
port.checkAuth( db );
res = port.call( m , coll, readPref, decoder );
port.checkAuth( db.getMongo() );
res = port.call( m , coll, decoder );
if ( res._responseTo != m.getId() )
throw new MongoException( "ids don't match" );
}
catch ( IOException ioe ){
mp.error( port , ioe );
_myPort.error(port, ioe);
retry = retries > 0 && !coll._name.equals( "$cmd" )
&& !(ioe instanceof SocketTimeoutException) && _error( ioe, secondaryOk );
if ( !retry ){
throw new MongoException.Network( "can't call something : " + port.host() + "/" + db,
ioe );
throw new MongoException.Network("Read operation to server " + port.host() + " failed on database " + db , ioe );
}
}
catch ( RuntimeException re ){
mp.error( port , re );
_myPort.error(port, re);
throw re;
} finally {
mp.done( port );
_myPort.done(port);
}
if (retry)
return call( db , coll , m , hostNeeded , retries - 1 , readPref, decoder );
return innerCall( db , coll , m , hostNeeded , retries - 1 , readPref, decoder );
ServerError err = res.getError();
@ -244,10 +270,9 @@ public class DBTCPConnector implements DBConnector {
if ( retries <= 0 ){
throw new MongoException( "not talking to master and retries used up" );
}
return call( db , coll , m , hostNeeded , retries -1, readPref, decoder );
return innerCall( db , coll , m , hostNeeded , retries -1, readPref, decoder );
}
m.doneWithMessage();
return res;
}
@ -261,17 +286,18 @@ public class DBTCPConnector implements DBConnector {
* @return
*/
public List<ServerAddress> getAllAddress() {
return _allHosts;
return _mongo._authority.getServerAddresses();
}
/**
* Gets the list of server addresses currently seen by the connector.
* This includes addresses auto-discovered from a replica set.
* @return
* @throws MongoException
*/
public List<ServerAddress> getServerAddressList() {
if (_rsStatus != null) {
return _rsStatus.getServerAddressList();
if (_connectionStatus != null) {
return _connectionStatus.getServerAddressList();
}
ServerAddress master = getAddress();
@ -285,7 +311,30 @@ public class DBTCPConnector implements DBConnector {
}
public ReplicaSetStatus getReplicaSetStatus() {
return _rsStatus;
if (_connectionStatus instanceof ReplicaSetStatus) {
return (ReplicaSetStatus) _connectionStatus;
} else if (_connectionStatus instanceof DynamicConnectionStatus) {
return ((DynamicConnectionStatus) _connectionStatus).asReplicaSetStatus();
} else {
return null;
}
}
// This call can block if it's not yet known.
// Be careful when modifying this method, as this method is using the fact that _isMongosDirectConnection
// is of type Boolean and is null when uninitialized.
boolean isMongosConnection() {
if (_connectionStatus instanceof MongosStatus) {
return true;
} else if (_connectionStatus instanceof DynamicConnectionStatus) {
return ((DynamicConnectionStatus) _connectionStatus).asMongosStatus() != null;
}
if (_isMongosDirectConnection == null) {
initDirectConnection();
}
return _isMongosDirectConnection != null ? _isMongosDirectConnection : false;
}
public String getConnectPoint(){
@ -301,9 +350,8 @@ public class DBTCPConnector implements DBConnector {
* @return true if the request should be retried, false otherwise
* @throws MongoException
*/
boolean _error( Throwable t, boolean secondaryOk )
throws MongoException {
if (_rsStatus == null) {
boolean _error( Throwable t, boolean secondaryOk ){
if (_connectionStatus == null) {
// single server, no need to retry
return false;
}
@ -311,127 +359,145 @@ public class DBTCPConnector implements DBConnector {
// the replset has at least 1 server up, try to see if should switch master
// if no server is up, we wont retry until the updater thread finds one
// this is to cut down the volume of requests/errors when all servers are down
if ( _rsStatus.hasServerUp() ){
if ( _connectionStatus.hasServerUp() ){
checkMaster( true , !secondaryOk );
}
return _rsStatus.hasServerUp();
return _connectionStatus.hasServerUp();
}
class MyPort {
DBPort get( boolean keep , ReadPreference readPref, ServerAddress hostNeeded ){
if ( hostNeeded != null ){
if (_requestPort != null && _requestPort.serverAddress().equals(hostNeeded)) {
return _requestPort;
DBPort pinnedRequestPort = getPinnedRequestPortForThread();
if ( hostNeeded != null ) {
if (pinnedRequestPort != null && pinnedRequestPort.serverAddress().equals(hostNeeded)) {
return pinnedRequestPort;
}
// asked for a specific host
return _portHolder.get( hostNeeded ).get();
}
if ( _requestPort != null ){
if ( pinnedRequestPort != null ){
// we are within a request, and have a port, should stick to it
if ( _requestPort.getPool() == _masterPortPool || !keep ) {
if ( pinnedRequestPort.getPool() == _masterPortPool || !keep ) {
// if keep is false, it's a read, so we use port even if master changed
return _requestPort;
return pinnedRequestPort;
}
// it's write and master has changed
// we fall back on new master and try to go on with request
// this may not be best behavior if spec of request is to stick with same server
_requestPort.getPool().done(_requestPort);
_requestPort = null;
}
if ( !(readPref == ReadPreference.PRIMARY) && _rsStatus != null ){
// if not a primary read set, try to use a secondary
// Do they want a Secondary, or a specific tag set?
if (readPref == ReadPreference.SECONDARY) {
ServerAddress slave = _rsStatus.getASecondary();
if ( slave != null ){
return _portHolder.get( slave ).get();
}
} else if (readPref instanceof ReadPreference.TaggedReadPreference) {
// Tag based read
ServerAddress secondary = _rsStatus.getASecondary( ( (TaggedReadPreference) readPref ).getTags() );
if (secondary != null)
return _portHolder.get( secondary ).get();
else
throw new MongoException( "Could not find any valid secondaries with the supplied tags ('" +
( (TaggedReadPreference) readPref ).getTags() + "'");
}
pinnedRequestPort.getPool().done(pinnedRequestPort);
setPinnedRequestPortForThread(null);
}
DBPort port;
if (getReplicaSetStatus() == null){
if (_masterPortPool == null) {
// this should only happen in rare case that no master was ever found
// may get here at startup if it's a read, slaveOk=true, and ALL servers are down
throw new MongoException("Rare case where master=null, probably all servers are down");
}
port = _masterPortPool.get();
}
else {
ReplicaSetStatus.ReplicaSet replicaSet = getReplicaSetStatus()._replicaSetHolder.get();
ConnectionStatus.Node node = readPref.getNode(replicaSet);
if (node == null)
throw new MongoException("No replica set members available in " + replicaSet + " for " + readPref.toDBObject().toString());
port = _portHolder.get(node.getServerAddress()).get();
}
// use master
DBPort p = _masterPortPool.get();
if ( _inRequest ) {
// if within request, remember port to stick to same server
_requestPort = p;
if (threadHasPinnedRequest()) {
setPinnedRequestPortForThread(port);
}
return p;
return port;
}
void done( DBPort p ){
void done( DBPort port ) {
DBPort requestPort = getPinnedRequestPortForThread();
// keep request port
if ( p != _requestPort ){
p.getPool().done(p);
if (port != requestPort) {
port.getPool().done(port);
}
}
/**
* call this method when there is an IOException or other low level error on port.
* @param p
* @param port
* @param e
*/
void error( DBPort p , Exception e ){
p.close();
_requestPort = null;
// _logger.log( Level.SEVERE , "MyPort.error called" , e );
void error( DBPort port , Exception e ){
port.close();
pinnedRequestStatusThreadLocal.remove();
// depending on type of error, may need to close other connections in pool
p.getPool().gotError(e);
boolean recoverable = port.getPool().gotError(e);
if (!recoverable && _connectionStatus != null && _masterPortPool._addr.equals(port.serverAddress())) {
ConnectionStatus.Node newMaster = _connectionStatus.ensureMaster();
if (newMaster != null) {
setMaster(newMaster);
}
}
}
void requestEnsureConnection(){
if ( ! _inRequest )
if ( !threadHasPinnedRequest() )
return;
if ( _requestPort != null )
if ( getPinnedRequestPortForThread() != null )
return;
_requestPort = _masterPortPool.get();
setPinnedRequestPortForThread(_masterPortPool.get());
}
void requestStart(){
_inRequest = true;
pinnedRequestStatusThreadLocal.set(new PinnedRequestStatus());
}
void requestDone(){
if ( _requestPort != null )
_requestPort.getPool().done( _requestPort );
_requestPort = null;
_inRequest = false;
DBPort requestPort = getPinnedRequestPortForThread();
if ( requestPort != null )
requestPort.getPool().done( requestPort );
pinnedRequestStatusThreadLocal.remove();
}
DBPort _requestPort;
// DBPortPool _requestPool;
boolean _inRequest;
PinnedRequestStatus getPinnedRequestStatusForThread() {
return pinnedRequestStatusThreadLocal.get();
}
void checkMaster( boolean force , boolean failIfNoMaster )
throws MongoException {
boolean threadHasPinnedRequest() {
return pinnedRequestStatusThreadLocal.get() != null;
}
if ( _rsStatus != null ){
DBPort getPinnedRequestPortForThread() {
return threadHasPinnedRequest() ? pinnedRequestStatusThreadLocal.get().requestPort : null;
}
void setPinnedRequestPortForThread(final DBPort port) {
pinnedRequestStatusThreadLocal.get().requestPort = port;
}
private final ThreadLocal<PinnedRequestStatus> pinnedRequestStatusThreadLocal = new ThreadLocal<PinnedRequestStatus>();
}
static class PinnedRequestStatus {
DBPort requestPort;
}
void checkMaster( boolean force , boolean failIfNoMaster ){
if ( _connectionStatus != null ){
if ( _masterPortPool == null || force ){
ReplicaSetStatus.Node master = _rsStatus.ensureMaster();
ConnectionStatus.Node master = _connectionStatus.ensureMaster();
if ( master == null ){
if ( failIfNoMaster )
throw new MongoException( "can't find a master" );
@ -442,42 +508,43 @@ public class DBTCPConnector implements DBConnector {
}
} else {
// single server, may have to obtain max bson size
if (_maxBsonObjectSize.get() == 0)
fetchMaxBsonObjectSize();
if (_maxBsonObjectSize == 0)
initDirectConnection();
}
}
synchronized void setMaster(ReplicaSetStatus.Node master) {
synchronized void setMaster(ConnectionStatus.Node master) {
if (_closed.get()) {
return;
}
setMasterAddress(master.getServerAddress());
_maxBsonObjectSize.set(master.getMaxBsonObjectSize());
_maxBsonObjectSize = master.getMaxBsonObjectSize();
}
/**
* Fetches the maximum size for a BSON object from the current master server
* @return the size, or 0 if it could not be obtained
*/
int fetchMaxBsonObjectSize() {
void initDirectConnection() {
if (_masterPortPool == null)
return 0;
return;
DBPort port = _masterPortPool.get();
try {
CommandResult res = port.runCommand(_mongo.getDB("admin"), new BasicDBObject("isMaster", 1));
// max size was added in 1.8
if (res.containsField("maxBsonObjectSize")) {
_maxBsonObjectSize.set(((Integer) res.get("maxBsonObjectSize")).intValue());
_maxBsonObjectSize = (Integer) res.get("maxBsonObjectSize");
} else {
_maxBsonObjectSize.set(Bytes.MAX_OBJECT_SIZE);
_maxBsonObjectSize = Bytes.MAX_OBJECT_SIZE;
}
String msg = res.getString("msg");
_isMongosDirectConnection = msg != null && msg.equals("isdbgrid");
} catch (Exception e) {
_logger.log(Level.WARNING, "Exception determining maxBSONObjectSize ", e);
_logger.log(Level.WARNING, "Exception executing isMaster command on " + port.serverAddress(), e);
} finally {
port.getPool().done(port);
}
return _maxBsonObjectSize.get();
}
@ -488,15 +555,15 @@ public class DBTCPConnector implements DBConnector {
return false;
if ( _masterPortPool != null )
_logger.log(Level.WARNING, "Master switching from " + _masterPortPool.getServerAddress() + " to " + addr);
_logger.log(Level.WARNING, "Primary switching from " + _masterPortPool.getServerAddress() + " to " + addr);
_masterPortPool = newPool;
return true;
}
public String debugString(){
StringBuilder buf = new StringBuilder( "DBTCPConnector: " );
if ( _rsStatus != null ) {
buf.append( "replica set : " ).append( _allHosts );
if ( _connectionStatus != null ) {
buf.append( "set : " ).append( _mongo._authority.getServerAddresses() );
} else {
ServerAddress master = getAddress();
buf.append( master ).append( " " ).append( master != null ? master.getSocketAddress() : null );
@ -513,16 +580,12 @@ public class DBTCPConnector implements DBConnector {
_portHolder = null;
} catch (final Throwable t) { /* nada */ }
}
if ( _rsStatus != null ) {
if ( _connectionStatus != null ) {
try {
_rsStatus.close();
_rsStatus = null;
_connectionStatus.close();
_connectionStatus = null;
} catch (final Throwable t) { /* nada */ }
}
// below this will remove the myport for this thread only
// client using thread pool in web framework may need to call close() from all threads
_myPort.remove();
}
/**
@ -549,33 +612,43 @@ public class DBTCPConnector implements DBConnector {
return ! _closed.get();
}
@Override
public CommandResult authenticate(MongoCredential credentials) {
checkMaster(false, true);
final DBPort port = _myPort.get(false, ReadPreference.primaryPreferred(), null);
try {
CommandResult result = port.authenticate(_mongo, credentials);
_mongo.getAuthority().getCredentialsStore().add(credentials);
return result;
} finally {
_myPort.done(port);
}
}
/**
* Gets the maximum size for a BSON object supported by the current master server.
* Note that this value may change over time depending on which server is master.
* @return the maximum size, or 0 if not obtained from servers yet.
*/
public int getMaxBsonObjectSize() {
return _maxBsonObjectSize.get();
return _maxBsonObjectSize;
}
// expose for unit testing
MyPort getMyPort() {
return _myPort.get();
return _myPort;
}
private volatile DBPortPool _masterPortPool;
private final Mongo _mongo;
private DBPortPool.Holder _portHolder;
private final List<ServerAddress> _allHosts;
private ReplicaSetStatus _rsStatus;
private ConnectionStatus _connectionStatus;
private final AtomicBoolean _closed = new AtomicBoolean(false);
private final AtomicInteger _maxBsonObjectSize = new AtomicInteger(0);
private ThreadLocal<MyPort> _myPort = new ThreadLocal<MyPort>(){
protected MyPort initialValue(){
return new MyPort();
}
};
private volatile int _maxBsonObjectSize;
private volatile Boolean _isMongosDirectConnection;
MyPort _myPort = new MyPort();
}

View File

@ -19,19 +19,21 @@
package com.massivecraft.mcore.xlib.mongodb;
// Bson
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.BasicBSONCallback;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import java.util.LinkedList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* This class overrides BasicBSONCallback to implement some extra features specific to the Database.
* For example DBRef type.
* @author antoine
*/
@SuppressWarnings({"rawtypes"})
public class DefaultDBCallback extends BasicBSONCallback implements DBCallback {
static class DefaultFactory implements DBCallbackFactory {
@ -59,27 +61,41 @@ public class DefaultDBCallback extends BasicBSONCallback implements DBCallback {
@Override
public void objectStart(boolean array, String name){
_lastName = name;
_nameStack.addLast(name);
super.objectStart( array , name );
}
@Override
public Object objectDone(){
BSONObject o = (BSONObject)super.objectDone();
if ( ! ( o instanceof List ) &&
String lastName = null;
if ( _nameStack.size() > 0 ){
lastName = _nameStack.removeLast();
}
if ( ! ( o instanceof List ) && lastName != null &&
o.containsField( "$ref" ) &&
o.containsField( "$id" ) ){
return cur().put( _lastName , new DBRef( _db, o ) );
return cur().put(lastName, new DBRef( _db, o ) );
}
return o;
}
/**
* @return
* @throws MongoException
*/
@Override
public BSONObject create(){
return _create( null );
}
/**
* @param array
* @param path
* @return
* @throws MongoException
*/
@Override
public BSONObject create( boolean array , List<String> path ){
if ( array )
@ -87,7 +103,6 @@ public class DefaultDBCallback extends BasicBSONCallback implements DBCallback {
return _create( path );
}
@SuppressWarnings("rawtypes")
private DBObject _create( List<String> path ){
Class c = null;
@ -131,11 +146,11 @@ public class DefaultDBCallback extends BasicBSONCallback implements DBCallback {
@Override
public void reset(){
_lastName = null;
_nameStack = new LinkedList<String>();
super.reset();
}
private String _lastName;
private LinkedList<String> _nameStack;
final DBCollection _collection;
final DB _db;
static final Logger LOGGER = Logger.getLogger( "com.mongo.DECODING" );

View File

@ -15,11 +15,11 @@
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.BasicBSONDecoder;
import java.io.IOException;
import java.io.InputStream;
import com.massivecraft.mcore.xlib.bson.BasicBSONDecoder;
/**
*
* @author antoine
@ -31,6 +31,11 @@ public class DefaultDBDecoder extends BasicBSONDecoder implements DBDecoder {
public DBDecoder create( ){
return new DefaultDBDecoder( );
}
@Override
public String toString() {
return "DefaultDBDecoder.DefaultFactory";
}
}
public static DBDecoderFactory FACTORY = new DefaultFactory();
@ -57,4 +62,8 @@ public class DefaultDBDecoder extends BasicBSONDecoder implements DBDecoder {
return (DBObject) cbk.get();
}
@Override
public String toString() {
return "DefaultDBDecoder";
}
}

View File

@ -12,13 +12,12 @@
*/
package com.massivecraft.mcore.xlib.mongodb;
import static com.massivecraft.mcore.xlib.bson.BSON.EOO;
import static com.massivecraft.mcore.xlib.bson.BSON.OBJECT;
import static com.massivecraft.mcore.xlib.bson.BSON.REF;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.BasicBSONEncoder;
import com.massivecraft.mcore.xlib.bson.io.OutputBuffer;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import com.massivecraft.mcore.xlib.bson.*;
import com.massivecraft.mcore.xlib.bson.io.*;
import com.massivecraft.mcore.xlib.bson.types.*;
import static com.massivecraft.mcore.xlib.bson.BSON.*;
public class DefaultDBEncoder extends BasicBSONEncoder implements DBEncoder {
@ -35,6 +34,12 @@ public class DefaultDBEncoder extends BasicBSONEncoder implements DBEncoder {
public DBEncoder create( ){
return new DefaultDBEncoder( );
}
@Override
public String toString() {
return "DefaultDBEncoder.DefaultFactory";
}
}
@SuppressWarnings("deprecation")
@ -80,4 +85,9 @@ public class DefaultDBEncoder extends BasicBSONEncoder implements DBEncoder {
public DefaultDBEncoder( ){
}
@Override
public String toString() {
return "DefaultDBEncoder";
}
}

View File

@ -0,0 +1,195 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Responsible for dynamically determining whether the list of server address represents a set of mongos server or
* a replica set. It starts threads that call the ismaster command on every server in the seed list, and as soon as it
* reaches one determines what type of server it is. It then creates the appropriate ConnectionStatus implementation
* and forwards all calls to it.
*/
class DynamicConnectionStatus extends ConnectionStatus {
private static final Logger logger = Logger.getLogger("com.mongodb.DynamicConnectionStatus");
DynamicConnectionStatus(Mongo mongo, List<ServerAddress> mongosAddresses) {
super(mongosAddresses, mongo);
}
@Override
void start() {
super.start();
executorService = Executors.newFixedThreadPool(_mongosAddresses.size());
initExecutorService();
}
@Override
void close() {
if (connectionStatus != null) {
connectionStatus.close();
}
if (executorService != null) {
executorService.shutdownNow();
}
super.close();
}
ReplicaSetStatus asReplicaSetStatus() {
ConnectionStatus connectionStatus = getConnectionStatus();
if (connectionStatus instanceof ReplicaSetStatus) {
return (ReplicaSetStatus) connectionStatus;
}
return null;
}
MongosStatus asMongosStatus() {
ConnectionStatus connectionStatus = getConnectionStatus();
if (connectionStatus instanceof MongosStatus) {
return (MongosStatus) connectionStatus;
}
return null;
}
@Override
List<ServerAddress> getServerAddressList() {
if (connectionStatus != null) {
return connectionStatus.getServerAddressList();
} else {
return new ArrayList<ServerAddress>(_mongosAddresses);
}
}
@Override
boolean hasServerUp() {
ConnectionStatus connectionStatus = getConnectionStatus();
if (connectionStatus != null) {
return connectionStatus.hasServerUp();
} else {
return false;
}
}
@Override
Node ensureMaster() {
ConnectionStatus connectionStatus = getConnectionStatus();
if (connectionStatus != null) {
return connectionStatus.ensureMaster();
} else {
return null;
}
}
void initExecutorService() {
try {
for (final ServerAddress cur : _mongosAddresses) {
executorService.submit(new Runnable() {
@Override
public void run() {
DynamicNode node = new DynamicNode(cur, _mongo, _mongoOptions);
try {
while (!Thread.interrupted()) {
try {
node.update();
if (node._ok) {
notifyOfOkNode(node);
return;
}
} catch (Exception e) {
logger.log(Level.WARNING, "couldn't reach " + node._addr, e);
}
int sleepTime = updaterIntervalNoMasterMS;
Thread.sleep(sleepTime);
}
} catch (InterruptedException e) {
// fall through
}
}
});
}
} catch (RejectedExecutionException e) {
// Ignore, as this can happen if a good node is found before all jobs are submitted and the service has
// been shutdown.
}
}
private void notifyOfOkNode(DynamicNode node) {
synchronized (this) {
if (connectionStatus != null) {
return;
}
if (node.isMongos) {
connectionStatus = new MongosStatus(_mongo, _mongosAddresses);
} else {
connectionStatus = new ReplicaSetStatus(_mongo, _mongosAddresses);
}
notifyAll();
}
connectionStatus.start();
executorService.shutdownNow();
}
static class DynamicNode extends UpdatableNode {
DynamicNode(final ServerAddress addr, Mongo mongo, MongoOptions mongoOptions) {
super(addr, mongo, mongoOptions);
}
@Override
protected Logger getLogger() {
return logger;
}
@Override
public CommandResult update() {
CommandResult res = super.update();
if (res != null) {
String msg = res.getString("msg");
if (msg != null && msg.equals("isdbgrid")) {
isMongos = true;
}
}
return res;
}
private boolean isMongos;
}
private synchronized ConnectionStatus getConnectionStatus() {
if (connectionStatus == null) {
try {
wait(_mongo.getMongoOptions().getConnectTimeout());
} catch (InterruptedException e) {
throw new MongoInterruptedException("Interrupted while waiting for next update to dynamic status", e);
}
}
return connectionStatus;
}
private volatile ConnectionStatus connectionStatus;
private ExecutorService executorService;
}

View File

@ -0,0 +1,81 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
import java.util.concurrent.TimeUnit;
/**
* This class is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases.
*/
public class InUseConnectionBean {
InUseConnectionBean(final DBPort port, long currentNanoTime) {
DBPort.ActiveState activeState = port.getActiveState();
if (activeState == null) {
durationMS = 0;
namespace = null;
opCode = null;
query = null;
threadName = null;
numDocuments = 0;
}
else {
durationMS = TimeUnit.NANOSECONDS.toMillis(currentNanoTime - activeState.startTime);
namespace = activeState.outMessage.getNamespace();
opCode = activeState.outMessage.getOpCode();
query = activeState.outMessage.getQuery() != null ? activeState.outMessage.getQuery().toString() : null;
threadName = activeState.threadName;
numDocuments = activeState.outMessage.getNumDocuments();
}
localPort = port.getLocalPort();
}
public String getNamespace() {
return namespace;
}
public OutMessage.OpCode getOpCode() {
return opCode;
}
public String getQuery() {
return query;
}
public int getLocalPort() {
return localPort;
}
public long getDurationMS() {
return durationMS;
}
public String getThreadName() {
return threadName;
}
public int getNumDocuments() {
return numDocuments;
}
private final String namespace;
private final OutMessage.OpCode opCode;
private final String query;
private final int localPort;
private final long durationMS;
private final String threadName;
private final int numDocuments;
}

View File

@ -0,0 +1,29 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* This class exists only so that on Java 5 the driver can create instances of a standard MBean,
* therefore keeping compatibility with the JMX implementation in the Java 5 JMX class libraries.
*/
class Java5MongoConnectionPool extends DBPortPool implements Java5MongoConnectionPoolMBean {
Java5MongoConnectionPool(ServerAddress addr, MongoOptions options) {
super(addr, options);
}
}

View File

@ -0,0 +1,66 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* A standard MBean interface for a Mongo connection pool, for use on Java 5 virtual machines.
* <p>
* This interface is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases.
*/
public interface Java5MongoConnectionPoolMBean {
/**
* Gets the name of the pool.
*
* @return the name of the pool
*/
String getName();
/**
* Gets the host that this connection pool is connecting to.
*
* @return the host
*/
String getHost();
/**
* Gets the port that this connection pool is connecting to.
*
* @return the port
*/
int getPort();
/**
* Gets the total number of pool members, including idle and and in-use members.
*
* @return total number of members
*/
int getTotal();
/**
* Gets the number of pool members that are currently in use.
*
* @return number of in-use members
*/
int getInUse();
/**
* Gets the maximum allowed size of the pool, including idle and in-use members.
*
* @return the maximum size
*/
int getMaxSize();
}

View File

@ -24,6 +24,7 @@ import com.massivecraft.mcore.xlib.bson.types.ObjectId;
/**
*
*/
@SuppressWarnings({"rawtypes", "unused"})
public class LazyDBCallback extends LazyBSONCallback implements DBCallback {
public LazyDBCallback( DBCollection coll ){
@ -31,7 +32,6 @@ public class LazyDBCallback extends LazyBSONCallback implements DBCallback {
_db = _collection == null ? null : _collection.getDB();
}
@SuppressWarnings("rawtypes")
@Override
public Object createObject( byte[] data, int offset ){
LazyDBObject o = new LazyDBObject( data, offset, this );
@ -52,6 +52,5 @@ public class LazyDBCallback extends LazyBSONCallback implements DBCallback {
final DBCollection _collection;
final DB _db;
@SuppressWarnings("unused")
private static final Logger log = Logger.getLogger( LazyDBCallback.class.getName() );
}

View File

@ -25,6 +25,13 @@ import java.io.IOException;
* Encoder that only knows how to encode BSONObject instances of type LazyDBObject.
*/
public class LazyDBEncoder implements DBEncoder {
/**
* @param buf
* @param o
* @return
* @throws MongoException
*/
@Override
public int writeObject(final OutputBuffer buf, BSONObject o) {
if (!(o instanceof LazyDBObject)) {

View File

@ -21,13 +21,13 @@ import java.util.logging.Logger;
/**
*
*/
@SuppressWarnings({"rawtypes", "unused"})
public class LazyWriteableDBCallback extends LazyDBCallback {
public LazyWriteableDBCallback( DBCollection coll ){
super(coll);
}
@SuppressWarnings("rawtypes")
@Override
public Object createObject( byte[] data, int offset ){
LazyWriteableDBObject o = new LazyWriteableDBObject( data, offset, this );
@ -42,6 +42,5 @@ public class LazyWriteableDBCallback extends LazyDBCallback {
return o;
}
@SuppressWarnings("unused")
private static final Logger log = Logger.getLogger( LazyWriteableDBCallback.class.getName() );
}

View File

@ -24,6 +24,7 @@ import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.LazyBSONCallback;
import com.massivecraft.mcore.xlib.bson.io.BSONByteBuffer;
@SuppressWarnings({"unchecked", "rawtypes"})
public class LazyWriteableDBObject extends LazyDBObject {
public LazyWriteableDBObject(BSONByteBuffer buff, LazyBSONCallback cbk){
@ -64,7 +65,6 @@ public class LazyWriteableDBObject extends LazyDBObject {
/* (non-Javadoc)
* @see org.bson.LazyBSONObject#putAll(java.util.Map)
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public void putAll(Map m) {
writeable.putAll(m);

View File

@ -52,10 +52,9 @@ public class MapReduceCommand {
* @param query
* the query to use on input
* @return
* @throws MongoException
* @dochub mapreduce
*/
public MapReduceCommand(DBCollection inputCollection , String map , String reduce , String outputCollection, OutputType type, DBObject query) throws MongoException {
public MapReduceCommand(DBCollection inputCollection , String map , String reduce , String outputCollection, OutputType type, DBObject query) {
_input = inputCollection.getName();
_map = map;
_reduce = reduce;

View File

@ -65,6 +65,7 @@ public class MapReduceOutput {
/**
* drops the collection that holds the results
* @throws MongoException
*/
public void drop(){
if ( _coll != null)

View File

@ -18,6 +18,8 @@
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.io.PoolOutputBuffer;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
@ -25,54 +27,56 @@ import java.util.Collection;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import com.massivecraft.mcore.xlib.bson.io.PoolOutputBuffer;
import java.util.logging.Logger;
/**
* A database connection with internal pooling.
* For most application, you should have 1 Mongo instance for the entire JVM.
*
* The following are equivalent, and all connect to the
* local database running on the default port:
*
* <blockquote><pre>
* Mongo mongo1 = new Mongo( "127.0.0.1" );
* Mongo mongo2 = new Mongo( "127.0.0.1", 27017 );
* Mongo mongo3 = new Mongo( new DBAddress( "127.0.0.1", 27017, "test" ) );
* Mongo mongo4 = new Mongo( new ServerAddress( "127.0.0.1") );
* </pre></blockquote>
*
* Mongo instances have connection pooling built in - see the requestStart
* and requestDone methods for more information.
* http://www.mongodb.org/display/DOCS/Java+Driver+Concurrency
*
* <h3>Connecting to a Replica Set</h3>
* A database connection with internal connection pooling. For most applications, you should have one Mongo instance
* for the entire JVM.
* <p>
* The following are equivalent, and all connect to the local database running on the default port:
* <pre>
* Mongo mongo1 = new Mongo();
* Mongo mongo1 = new Mongo("localhost");
* Mongo mongo2 = new Mongo("localhost", 27017);
* Mongo mongo4 = new Mongo(new ServerAddress("localhost"));
* </pre>
* <p>
* You can connect to a
* <a href="http://www.mongodb.org/display/DOCS/Replica+Sets">replica set</a>
* using the Java driver by passing several a list if ServerAddress to the
* Mongo constructor.
* For example:
* </p>
* <blockquote><pre>
* List<ServerAddress> addrs = new ArrayList<ServerAddress>();
* addrs.add( new ServerAddress( "127.0.0.1" , 27017 ) );
* addrs.add( new ServerAddress( "127.0.0.1" , 27018 ) );
* addrs.add( new ServerAddress( "127.0.0.1" , 27019 ) );
*
* Mongo mongo = new Mongo( addrs );
* </pre></blockquote>
*
* <a href="http://www.mongodb.org/display/DOCS/Replica+Sets">replica set</a> using the Java driver by passing
* a ServerAddress list to the Mongo constructor. For example:
* <pre>
* Mongo mongo = new Mongo(Arrays.asList(
* new ServerAddress("localhost", 27017),
* new ServerAddress("localhost", 27018),
* new ServerAddress("localhost", 27019)));
* </pre>
* You can connect to a sharded cluster using the same constructor. Mongo will auto-detect whether the servers are
* a list of replica set members or a list of mongos servers.
* <p>
* By default, all read and write operations will be made on the master.
* But it's possible to read from the slave(s) by using slaveOk:
* </p>
* <blockquote><pre>
* mongo.slaveOk();
* </pre></blockquote>
* By default, all read and write operations will be made on the primary,
* but it's possible to read from secondaries by changing the read preference:
* <p>
* <pre>
* mongo.setReadPreference(ReadPreference.secondary());
* </pre>
* By default, write operations will not throw exceptions on failure, but that is easily changed too:
* <p>
* <pre>
* mongo.setWriteConcern(WriteConcern.SAFE);
* </pre>
*
* Note: This class has been superseded by {@code MongoClient}, and may be deprecated in a future release.
*
* @see MongoClient
* @see ReadPreference
* @see WriteConcern
*/
@SuppressWarnings({"rawtypes"})
public class Mongo {
static Logger logger = Logger.getLogger(Bytes.LOGGER.getName() + ".Mongo");
// Make sure you don't change the format of these two static variables. A preprocessing regexp
// is applied and updates the version based on configuration in build.properties.
@ -86,11 +90,14 @@ public class Mongo {
* @deprecated Replaced by <code>Mongo.getMinorVersion()</code>
*/
@Deprecated
public static final int MINOR_VERSION = 8;
public static final int MINOR_VERSION = 11;
private static final String FULL_VERSION = "2.8.0";
private static final String FULL_VERSION = "2.11.1";
static int cleanerIntervalMS;
private static final String ADMIN_DATABASE_NAME = "admin";
static {
cleanerIntervalMS = Integer.parseInt(System.getProperty("com.mongodb.cleanerIntervalMS", "1000"));
}
@ -115,6 +122,7 @@ public class Mongo {
* returns a database object
* @param addr the database address
* @return
* @throws MongoException
*/
public static DB connect( DBAddress addr ){
return new Mongo( addr ).getDB( addr.getDBName() );
@ -124,9 +132,13 @@ public class Mongo {
* Creates a Mongo instance based on a (single) mongodb node (localhost, default port)
* @throws UnknownHostException
* @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient()})
*
*/
@Deprecated
public Mongo()
throws UnknownHostException , MongoException {
throws UnknownHostException {
this( new ServerAddress() );
}
@ -135,9 +147,13 @@ public class Mongo {
* @param host server to connect to
* @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(String)}
*
*/
@Deprecated
public Mongo( String host )
throws UnknownHostException , MongoException {
throws UnknownHostException{
this( new ServerAddress( host ) );
}
@ -147,9 +163,13 @@ public class Mongo {
* @param options default query options
* @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(String, MongoClientOptions)}
*
*/
@Deprecated
public Mongo( String host , MongoOptions options )
throws UnknownHostException , MongoException {
throws UnknownHostException {
this( new ServerAddress( host ) , options );
}
@ -159,9 +179,13 @@ public class Mongo {
* @param port the port on which the database is running
* @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(String, int)}
*
*/
@Deprecated
public Mongo( String host , int port )
throws UnknownHostException , MongoException {
throws UnknownHostException {
this( new ServerAddress( host , port ) );
}
@ -170,10 +194,13 @@ public class Mongo {
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @param addr the database address
* @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(ServerAddress)}
*
*/
public Mongo( ServerAddress addr )
throws MongoException {
this( addr , new MongoOptions() );
@Deprecated
public Mongo( ServerAddress addr ) {
this(addr, new MongoOptions());
}
/**
@ -182,17 +209,13 @@ public class Mongo {
* @param addr the database address
* @param options default query options
* @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(ServerAddress, MongoClientOptions)}
*
*/
public Mongo( ServerAddress addr , MongoOptions options )
throws MongoException {
_addr = addr;
_addrs = null;
_options = options;
_applyMongoOptions();
_connector = new DBTCPConnector( this , _addr );
_connector.start();
_cleaner = new DBCleanerThread();
_cleaner.start();
@Deprecated
public Mongo( ServerAddress addr , MongoOptions options ) {
this(MongoAuthority.direct(addr), options);
}
/**
@ -206,9 +229,8 @@ public class Mongo {
* @throws MongoException
*/
@Deprecated
public Mongo( ServerAddress left , ServerAddress right )
throws MongoException {
this( left , right , new MongoOptions() );
public Mongo( ServerAddress left , ServerAddress right ) {
this(left, right, new MongoOptions());
}
/**
@ -223,94 +245,103 @@ public class Mongo {
* @throws MongoException
*/
@Deprecated
public Mongo( ServerAddress left , ServerAddress right , MongoOptions options )
throws MongoException {
_addr = null;
_addrs = Arrays.asList( left , right );
_options = options;
_applyMongoOptions();
_connector = new DBTCPConnector( this , _addrs );
_connector.start();
_cleaner = new DBCleanerThread();
_cleaner.start();
public Mongo( ServerAddress left , ServerAddress right , MongoOptions options ) {
this(MongoAuthority.dynamicSet(Arrays.asList(left, right)), options);
}
/**
* <p>Creates a Mongo based on a replica set, or pair.
* Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.</p>
* use the Mongo(ServerAddress) constructor.
* <p>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @param replicaSetSeeds Put as many servers as you can in the list and
* the system will figure out the rest.
* @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* sharded cluster.
* @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(java.util.List)}
*
*/
public Mongo( List<ServerAddress> replicaSetSeeds )
throws MongoException {
this( replicaSetSeeds , new MongoOptions() );
@Deprecated
public Mongo( List<ServerAddress> seeds ) {
this( seeds , new MongoOptions() );
}
/**
* <p>Creates a Mongo based on a replica set, or pair.
* It will find all members (the master will be used by default).</p>
* Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.
* <p>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @param replicaSetSeeds put as many servers as you can in the list.
* the system will figure the rest out
* @param options default query options
* @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* sharded cluster.
* @param options for configuring this Mongo instance
* @throws MongoException
*
* @deprecated Replaced by {@link MongoClient#MongoClient(java.util.List, MongoClientOptions)}
*
*/
public Mongo( List<ServerAddress> replicaSetSeeds , MongoOptions options )
throws MongoException {
_addr = null;
_addrs = replicaSetSeeds;
_options = options;
_applyMongoOptions();
_connector = new DBTCPConnector( this , _addrs );
_connector.start();
_cleaner = new DBCleanerThread();
_cleaner.start();
@Deprecated
public Mongo( List<ServerAddress> seeds , MongoOptions options ) {
this(MongoAuthority.dynamicSet(seeds), options);
}
/**
* Creates a Mongo described by a URI.
* If only one address is used it will only connect to that node, otherwise it will discover all nodes.
* If the URI contains database credentials, the database will be authenticated lazily on first use
* with those credentials.
* @param uri
* @see MongoURI
* <p>examples:
* <li>mongodb://127.0.0.1</li>
* <li>mongodb://fred:foobar@127.0.0.1/</li>
* <li>mongodb://localhost</li>
* <li>mongodb://fred:foobar@localhost/</li>
* </p>
* @throws MongoException
* @throws UnknownHostException
* @dochub connections
*
* @deprecated Replaced by {@link MongoClient#MongoClient(MongoClientURI)}
*
*/
@Deprecated
public Mongo( MongoURI uri ) throws UnknownHostException {
this(getMongoAuthorityFromURI(uri), uri.getOptions());
}
public Mongo( MongoURI uri )
throws MongoException , UnknownHostException {
_options = uri.getOptions();
/**
* Creates a Mongo based on an authority and options.
* <p>
* Note: This constructor is provisional and is subject to change before the final release
*
* @param authority the authority
* @param options the options
*/
Mongo(MongoAuthority authority, MongoOptions options) {
logger.info("Creating Mongo instance (driver version " + getVersion() + ") with authority " + authority + " and options " + options);
_authority = authority;
_options = options;
_applyMongoOptions();
if ( uri.getHosts().size() == 1 ){
_addr = new ServerAddress( uri.getHosts().get(0) );
_addrs = null;
_connector = new DBTCPConnector( this , _addr );
}
else {
List<ServerAddress> replicaSetSeeds = new ArrayList<ServerAddress>( uri.getHosts().size() );
for ( String host : uri.getHosts() )
replicaSetSeeds.add( new ServerAddress( host ) );
_addr = null;
_addrs = replicaSetSeeds;
_connector = new DBTCPConnector( this , replicaSetSeeds );
}
_connector = new DBTCPConnector( this );
_connector.start();
_cleaner = new DBCleanerThread();
if (_options.cursorFinalizerEnabled) {
_cleaner = new CursorCleanerThread();
_cleaner.start();
} else {
_cleaner = null;
}
}
/**
@ -345,15 +376,13 @@ public class Mongo {
* @return
* @throws MongoException
*/
@SuppressWarnings("rawtypes")
public List<String> getDatabaseNames()
throws MongoException {
public List<String> getDatabaseNames(){
BasicDBObject cmd = new BasicDBObject();
cmd.put("listDatabases", 1);
CommandResult res = getDB( "admin" ).command(cmd, getOptions());
CommandResult res = getDB(ADMIN_DATABASE_NAME).command(cmd, getOptions());
res.throwOnError();
List l = (List)res.get("databases");
@ -372,8 +401,7 @@ public class Mongo {
* @param dbName name of database to drop
* @throws MongoException
*/
public void dropDatabase(String dbName)
throws MongoException {
public void dropDatabase(String dbName){
getDB( dbName ).dropDatabase();
}
@ -442,6 +470,7 @@ public class Mongo {
* Gets the list of server addresses currently seen by the connector.
* This includes addresses auto-discovered from a replica set.
* @return
* @throws MongoException
*/
public List<ServerAddress> getServerAddressList() {
return _connector.getServerAddressList();
@ -457,6 +486,7 @@ public class Mongo {
_connector.close();
} catch (final Throwable t) { /* nada */ }
if (_cleaner != null) {
_cleaner.interrupt();
try {
@ -465,6 +495,7 @@ public class Mongo {
//end early
}
}
}
/**
* Sets the write concern for this database. Will be used as default for
@ -505,10 +536,10 @@ public class Mongo {
}
/**
* makes it possible to run read queries on slave nodes
* makes it possible to run read queries on secondary nodes
*
* @deprecated Replaced with ReadPreference.SECONDARY
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference.SECONDARY
* @deprecated Replaced with {@code ReadPreference.secondaryPreferred()}
* @see ReadPreference#secondaryPreferred()
*/
@Deprecated
public void slaveOk(){
@ -552,8 +583,13 @@ public class Mongo {
*/
@SuppressWarnings("deprecation")
void _applyMongoOptions() {
if (_options.slaveOk) slaveOk();
setWriteConcern( _options.getWriteConcern() );
if (_options.slaveOk) {
slaveOk();
}
if (_options.getReadPreference() != null) {
setReadPreference(_options.getReadPreference());
}
setWriteConcern(_options.getWriteConcern());
}
/**
@ -568,23 +604,42 @@ public class Mongo {
* Note that this value may change over time depending on which server is master.
* If the size is not known yet, a request may be sent to the master server
* @return the maximum size
* @throws MongoException
*/
public int getMaxBsonObjectSize() {
int maxsize = _connector.getMaxBsonObjectSize();
if (maxsize == 0)
maxsize = _connector.fetchMaxBsonObjectSize();
if (maxsize == 0) {
_connector.initDirectConnection();
}
maxsize = _connector.getMaxBsonObjectSize();
return maxsize > 0 ? maxsize : Bytes.MAX_OBJECT_SIZE;
}
final ServerAddress _addr;
final List<ServerAddress> _addrs;
boolean isMongosConnection() {
return _connector.isMongosConnection();
}
private static MongoAuthority getMongoAuthorityFromURI(final MongoURI uri) throws UnknownHostException {
if ( uri.getHosts().size() == 1 ){
return MongoAuthority.direct(new ServerAddress(uri.getHosts().get(0)), uri.getCredentials());
}
else {
List<ServerAddress> replicaSetSeeds = new ArrayList<ServerAddress>(uri.getHosts().size());
for ( String host : uri.getHosts() )
replicaSetSeeds.add( new ServerAddress( host ) );
return MongoAuthority.dynamicSet(replicaSetSeeds, uri.getCredentials());
}
}
final MongoOptions _options;
final DBTCPConnector _connector;
final ConcurrentMap<String,DB> _dbs = new ConcurrentHashMap<String,DB>();
private WriteConcern _concern = WriteConcern.NORMAL;
private ReadPreference _readPref = ReadPreference.PRIMARY;
private ReadPreference _readPref = ReadPreference.primary();
final Bytes.OptionHolder _netOptions = new Bytes.OptionHolder( null );
final DBCleanerThread _cleaner;
final CursorCleanerThread _cleaner;
final MongoAuthority _authority;
com.massivecraft.mcore.xlib.bson.util.SimplePool<PoolOutputBuffer> _bufferPool =
new com.massivecraft.mcore.xlib.bson.util.SimplePool<PoolOutputBuffer>( 1000 ){
@ -600,33 +655,36 @@ public class Mongo {
* This is done automatically by the server at intervals, but can be forced for better reliability.
* @param async if true, the fsync will be done asynchronously on the server.
* @return
* @throws MongoException
*/
public CommandResult fsync(boolean async) {
DBObject cmd = new BasicDBObject("fsync", 1);
if (async) {
cmd.put("async", 1);
}
return getDB("admin").command(cmd);
return getDB(ADMIN_DATABASE_NAME).command(cmd);
}
/**
* Forces the master server to fsync the RAM data to disk, then lock all writes.
* The database will be read-only after this command returns.
* @return
* @throws MongoException
*/
public CommandResult fsyncAndLock() {
DBObject cmd = new BasicDBObject("fsync", 1);
cmd.put("lock", 1);
return getDB("admin").command(cmd);
return getDB(ADMIN_DATABASE_NAME).command(cmd);
}
/**
* Unlocks the database, allowing the write operations to go through.
* This command may be asynchronous on the server, which means there may be a small delay before the database becomes writable.
* @return
* @throws MongoException
*/
public DBObject unlock() {
DB db = getDB("admin");
DB db = getDB(ADMIN_DATABASE_NAME);
DBCollection col = db.getCollection("$cmd.sys.unlock");
return col.findOne();
}
@ -634,9 +692,10 @@ public class Mongo {
/**
* Returns true if the database is locked (read-only), false otherwise.
* @return
* @throws MongoException
*/
public boolean isLocked() {
DB db = getDB("admin");
DB db = getDB(ADMIN_DATABASE_NAME);
DBCollection col = db.getCollection("$cmd.sys.inprog");
BasicDBObject res = (BasicDBObject) col.findOne();
if (res.containsField("fsyncLock")) {
@ -663,7 +722,7 @@ public class Mongo {
* @throws UnknownHostException
*/
public Mongo connect( MongoURI uri )
throws MongoException , UnknownHostException {
throws UnknownHostException {
String key = _toKey( uri );
@ -701,9 +760,9 @@ public class Mongo {
}
class DBCleanerThread extends Thread {
class CursorCleanerThread extends Thread {
DBCleanerThread() {
CursorCleanerThread() {
setDaemon(true);
setName("MongoCleaner" + hashCode());
}
@ -728,15 +787,18 @@ public class Mongo {
@Override
public String toString() {
StringBuilder str = new StringBuilder("Mongo: ");
List<ServerAddress> list = getServerAddressList();
if (list == null || list.size() == 0)
str.append("null");
else {
for ( ServerAddress addr : list )
str.append( addr.toString() ).append( ',' );
str.deleteCharAt( str.length() - 1 );
return "Mongo{" +
"authority=" + _authority +
", options=" + _options +
'}';
}
return str.toString();
/**
* Gets the authority, which includes the connection type, the server address(es), and the credentials.
* @return the authority
*/
MongoAuthority getAuthority() {
return _authority;
}
}

View File

@ -0,0 +1,214 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.util.annotations.Immutable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
* This class represents the authority to which this client is connecting. It includes
* both the server address(es) and optional authentication credentials. The class name is informed by the
* <a href="http://tools.ietf.org/html/rfc3986#section-3.2">URI RFC</a>, which refers to the username/host/port
* part of a URI as the "authority".
*
* @since 2.11.0
*/
@Immutable
class MongoAuthority {
private final Type type;
private final List<ServerAddress> serverAddresses;
private final MongoCredentialsStore credentialsStore;
/**
* Enumeration of the connection types.
*/
enum Type {
Direct,
Set
}
/**
*
* @param serverAddress
* @return
*/
public static MongoAuthority direct(ServerAddress serverAddress) {
return direct(serverAddress, (MongoCredential) null);
}
/**
*
* @param serverAddress
* @param credentials
* @return
*/
public static MongoAuthority direct(ServerAddress serverAddress, MongoCredential credentials) {
return direct(serverAddress, new MongoCredentialsStore(credentials));
}
/**
*
* @param serverAddress
* @param credentialsStore
* @return
*/
public static MongoAuthority direct(ServerAddress serverAddress, MongoCredentialsStore credentialsStore) {
return new MongoAuthority(serverAddress, credentialsStore);
}
/**
*
* @param serverAddresses
* @return
*/
public static MongoAuthority dynamicSet(List<ServerAddress> serverAddresses) {
return dynamicSet(serverAddresses, (MongoCredential) null);
}
/**
*
* @param serverAddresses
* @param credentials
* @return
*/
public static MongoAuthority dynamicSet(List<ServerAddress> serverAddresses, MongoCredential credentials) {
return dynamicSet(serverAddresses, new MongoCredentialsStore(credentials));
}
/**
*
* @param serverAddresses
* @param credentialsStore
* @return
*/
public static MongoAuthority dynamicSet(List<ServerAddress> serverAddresses, MongoCredentialsStore credentialsStore) {
return new MongoAuthority(serverAddresses, Type.Set, credentialsStore);
}
/**
* Constructs an instance with a single server address and a store of authentication credentials.
* This will be a direct connection, even if it's part of a replica set.
*
* @param serverAddress the server address of a mongo server
*/
private MongoAuthority(final ServerAddress serverAddress, MongoCredentialsStore credentialsStore) {
if (serverAddress == null) {
throw new IllegalArgumentException("serverAddress can not be null");
}
if (credentialsStore == null) {
throw new IllegalArgumentException("credentialsStore can not be null");
}
this.serverAddresses = Arrays.asList(serverAddress);
this.credentialsStore = credentialsStore;
this.type = Type.Direct;
}
/**
* Constructs an instance with a list of server addresses, which may either be a list of mongos servers
* or a list of members of a replica set, and a store of authentication credentials.
*
* @param serverAddresses the server addresses
* @param credentialsStore the credentials store
*/
private MongoAuthority(final List<ServerAddress> serverAddresses, Type type, MongoCredentialsStore credentialsStore) {
if (serverAddresses == null) {
throw new IllegalArgumentException("serverAddresses can not be null");
}
if (credentialsStore == null) {
throw new IllegalArgumentException("credentialsStore can not be null");
}
if (type == null) {
throw new IllegalArgumentException("type can not be null");
}
if (type == Type.Direct) {
throw new IllegalArgumentException("type can not be Direct with a list of server addresses");
}
this.type = type;
this.serverAddresses = new ArrayList<ServerAddress>(serverAddresses);
this.credentialsStore = credentialsStore;
}
/**
* Returns the list of server addresses.
*
* @return the server address list
*/
public List<ServerAddress> getServerAddresses() {
return serverAddresses == null ? null : Collections.unmodifiableList(serverAddresses);
}
/**
* Gets the credentials store. If this instance was constructed with a single credential, this store will
* contain it.
*
* @return the credentials store
*/
public MongoCredentialsStore getCredentialsStore() {
return credentialsStore;
}
/**
* Gets the authority type
*
* @return the authority type
*/
public Type getType() {
return type;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final MongoAuthority that = (MongoAuthority) o;
if (!credentialsStore.equals(that.credentialsStore)) return false;
if (!serverAddresses.equals(that.serverAddresses)) return false;
if (type != that.type) return false;
return true;
}
@Override
public int hashCode() {
int result = credentialsStore.hashCode();
result = 31 * result + serverAddresses.hashCode();
result = 31 * result + type.hashCode();
return result;
}
@Override
public String toString() {
return "MongoAuthority{" +
"type=" + type +
", serverAddresses=" + serverAddresses +
", credentials=" + credentialsStore +
'}';
}
}

View File

@ -0,0 +1,286 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import java.net.UnknownHostException;
import java.util.List;
/**
* A MongoDB client with internal connection pooling. For most applications, you should have one MongoClient instance
* for the entire JVM.
* <p>
* The following are equivalent, and all connect to the local database running on the default port:
* <pre>
* MongoClient mongoClient1 = new MongoClient();
* MongoClient mongoClient1 = new MongoClient("localhost");
* MongoClient mongoClient2 = new MongoClient("localhost", 27017);
* MongoClient mongoClient4 = new MongoClient(new ServerAddress("localhost"));
* MongoClient mongoClient5 = new MongoClient(new ServerAddress("localhost"), new MongoClientOptions.Builder().build());
* </pre>
* <p>
* You can connect to a
* <a href="http://www.mongodb.org/display/DOCS/Replica+Sets">replica set</a> using the Java driver by passing
* a ServerAddress list to the MongoClient constructor. For example:
* <pre>
* MongoClient mongoClient = new MongoClient(Arrays.asList(
* new ServerAddress("localhost", 27017),
* new ServerAddress("localhost", 27018),
* new ServerAddress("localhost", 27019)));
* </pre>
* You can connect to a sharded cluster using the same constructor. MongoClient will auto-detect whether the servers are
* a list of replica set members or a list of mongos servers.
* <p>
* By default, all read and write operations will be made on the primary, but it's possible to read from secondaries
* by changing the read preference:
* <pre>
* mongoClient.setReadPreference(ReadPreference.secondaryPreferred());
* </pre>
* By default, all write operations will wait for acknowledgment by the server, as the default write concern is
* {@code WriteConcern.ACKNOWLEDGED}.
* <p>
* Note: This class supersedes the {@code Mongo} class. While it extends {@code Mongo}, it differs from it in that
* the default write concern is to wait for acknowledgment from the server of all write operations. In addition, its
* constructors accept instances of {@code MongoClientOptions} and {@code MongoClientURI}, which both also
* set the same default write concern.
* <p>
* In general, users of this class will pick up all of the default options specified in {@code MongoClientOptions}. In
* particular, note that the default value of the connectionsPerHost option has been increased to 100 from the old
* default value of 10 used by the superseded {@code Mongo} class.
*
* @see ReadPreference#primary()
* @see com.massivecraft.mcore.xlib.mongodb.WriteConcern#ACKNOWLEDGED
* @see MongoClientOptions
* @see MongoClientURI
* @since 2.10.0
*/
public class MongoClient extends Mongo {
private final MongoClientOptions options;
/**
* Creates an instance based on a (single) mongodb node (localhost, default port).
*
* @throws UnknownHostException
* @throws MongoException
*/
public MongoClient() throws UnknownHostException {
this(new ServerAddress());
}
/**
* Creates a Mongo instance based on a (single) mongodb node.
*
* @param host server to connect to in format host[:port]
* @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException
*/
public MongoClient(String host) throws UnknownHostException {
this(new ServerAddress(host));
}
/**
* Creates a Mongo instance based on a (single) mongodb node (default port).
*
* @param host server to connect to in format host[:port]
* @param options default query options
* @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException
*/
public MongoClient(String host, MongoClientOptions options) throws UnknownHostException {
this(new ServerAddress(host), options);
}
/**
* Creates a Mongo instance based on a (single) mongodb node.
*
* @param host the database's host address
* @param port the port on which the database is running
* @throws UnknownHostException if the database host cannot be resolved
* @throws MongoException
*/
public MongoClient(String host, int port) throws UnknownHostException {
this(new ServerAddress(host, port));
}
/**
* Creates a Mongo instance based on a (single) mongodb node
*
* @param addr the database address
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
*/
public MongoClient(ServerAddress addr) {
this(addr, new MongoClientOptions.Builder().build());
}
/**
* Creates a Mongo instance based on a (single) mongodb node and a list of credentials
*
* @param addr the database address
* @param credentialsList the list of credentials used to authenticate all connections
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @since 2.11.0
*/
public MongoClient(ServerAddress addr, List<MongoCredential> credentialsList) {
this(addr, credentialsList, new MongoClientOptions.Builder().build());
}
/**
* Creates a Mongo instance based on a (single) mongo node using a given ServerAddress and default options.
*
* @param addr the database address
* @param options default options
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
*/
public MongoClient(ServerAddress addr, MongoClientOptions options) {
this(addr, null, options);
}
/**
* Creates a Mongo instance based on a (single) mongo node using a given ServerAddress and default options.
*
* @param addr the database address
* @param credentialsList the list of credentials used to authenticate all connections
* @param options default options
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @since 2.11.0
*/
@SuppressWarnings("deprecation")
public MongoClient(ServerAddress addr, List<MongoCredential> credentialsList, MongoClientOptions options) {
super(MongoAuthority.direct(addr, new MongoCredentialsStore(credentialsList)), new MongoOptions(options));
this.options = options;
}
/**
* Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.
* <p/>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* sharded cluster.
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
*/
public MongoClient(List<ServerAddress> seeds) {
this(seeds, null, new MongoClientOptions.Builder().build());
}
/**
* Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.
* <p/>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* sharded cluster. \
* @param credentialsList the list of credentials used to authenticate all connections
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @since 2.11.0
*/
public MongoClient(List<ServerAddress> seeds, List<MongoCredential> credentialsList) {
this(seeds, credentialsList, new MongoClientOptions.Builder().build());
}
/**
* Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.
* <p/>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* sharded cluster.
* @param options default options
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
*/
public MongoClient(List<ServerAddress> seeds, MongoClientOptions options) {
this(seeds, null, options);
}
/**
* Creates a Mongo based on a list of replica set members or a list of mongos.
* It will find all members (the master will be used by default). If you pass in a single server in the list,
* the driver will still function as if it is a replica set. If you have a standalone server,
* use the Mongo(ServerAddress) constructor.
* <p/>
* If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to,
* and automatically fail over to the next server if the closest is down.
*
* @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can
* either be a list of mongod servers in the same replica set or a list of mongos servers in the same
* sharded cluster.
* @param credentialsList the list of credentials used to authenticate all connections
* @param options default options
* @throws MongoException
* @see com.massivecraft.mcore.xlib.mongodb.ServerAddress
* @since 2.11.0
*/
@SuppressWarnings("deprecation")
public MongoClient(List<ServerAddress> seeds, List<MongoCredential> credentialsList, MongoClientOptions options) {
super(MongoAuthority.dynamicSet(seeds, new MongoCredentialsStore(credentialsList)), new MongoOptions(options));
this.options = options;
}
/**
* Creates a Mongo described by a URI.
* If only one address is used it will only connect to that node, otherwise it will discover all nodes.
* @param uri the URI
* @throws MongoException
* @throws UnknownHostException
* @see MongoURI
* @dochub connections
*/
@SuppressWarnings("deprecation")
public MongoClient(MongoClientURI uri) throws UnknownHostException {
super(new MongoURI(uri));
this.options = uri.getOptions();
}
/**
* Gets the list of credentials that this client authenticates all connections with
*
* @return the list of credentials
* @since 2.11.0
*/
public List<MongoCredential> getCredentialsList() {
return getAuthority().getCredentialsStore().asList();
}
public MongoClientOptions getMongoClientOptions() {
return options;
}
}

View File

@ -0,0 +1,600 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.util.annotations.Immutable;
import javax.net.SocketFactory;
/**
* Various settings to control the behavior of a <code>MongoClient</code>.
* <p/>
* Note: This class is a replacement for {@code MongoOptions}, to be used with {@code MongoClient}. The main difference
* in behavior is that the default write concern is {@code WriteConcern.ACKNOWLEDGED}.
*
* @see MongoClient
* @since 2.10.0
*/
@Immutable
public class MongoClientOptions {
/**
* A builder for MongoClientOptions so that MongoClientOptions can be immutable, and to support easier
* construction through chaining.
*
* @since 2.10.0
*/
public static class Builder {
private String description;
private int connectionsPerHost = 100;
private int threadsAllowedToBlockForConnectionMultiplier = 5;
private int maxWaitTime = 1000 * 60 * 2;
private int connectTimeout = 1000 * 10;
private int socketTimeout = 0;
private boolean socketKeepAlive = false;
private boolean autoConnectRetry = false;
private long maxAutoConnectRetryTime = 0;
private ReadPreference readPreference = ReadPreference.primary();
private DBDecoderFactory dbDecoderFactory = DefaultDBDecoder.FACTORY;
private DBEncoderFactory dbEncoderFactory = DefaultDBEncoder.FACTORY;
private WriteConcern writeConcern = WriteConcern.ACKNOWLEDGED;
private SocketFactory socketFactory = SocketFactory.getDefault();
private boolean cursorFinalizerEnabled = true;
private boolean alwaysUseMBeans = false;
/**
* Sets the description.
*
* @param description the description of this MongoClient
* @return {@code this}
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getDescription()
*/
public Builder description(final String description) {
this.description = description;
return this;
}
/**
* Sets the maximum number of connections per host.
*
* @param connectionsPerHost maximum number of connections
* @return {@code this}
* @throws IllegalArgumentException if <code>connnectionsPerHost < 1</code>
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getConnectionsPerHost()
*/
public Builder connectionsPerHost(final int connectionsPerHost) {
if (connectionsPerHost < 1) {
throw new IllegalArgumentException("Minimum value is 1");
}
this.connectionsPerHost = connectionsPerHost;
return this;
}
/**
* Sets the multiplier for number of threads allowed to block waiting for a connection.
*
* @param threadsAllowedToBlockForConnectionMultiplier
* the multiplier
* @return {@code this}
* @throws IllegalArgumentException if <code>threadsAllowedToBlockForConnectionMultiplier < 1</code>
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getThreadsAllowedToBlockForConnectionMultiplier()
*/
public Builder threadsAllowedToBlockForConnectionMultiplier(final int threadsAllowedToBlockForConnectionMultiplier) {
if (threadsAllowedToBlockForConnectionMultiplier < 1) {
throw new IllegalArgumentException("Minimum value is 1");
}
this.threadsAllowedToBlockForConnectionMultiplier = threadsAllowedToBlockForConnectionMultiplier;
return this;
}
/**
* Sets the maximum time that a thread will block waiting for a connection.
*
* @param maxWaitTime the maximum wait time
* @return {@code this}
* @throws IllegalArgumentException if <code>maxWaitTime < 0</code>
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getMaxWaitTime()
*/
public Builder maxWaitTime(final int maxWaitTime) {
if (maxWaitTime < 0) {
throw new IllegalArgumentException("Minimum value is 0");
}
this.maxWaitTime = maxWaitTime;
return this;
}
/**
* Sets the connection timeout.
*
* @param connectTimeout the connection timeout
* @return {@code this}
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getConnectTimeout()
*/
public Builder connectTimeout(final int connectTimeout) {
if (connectTimeout < 0) {
throw new IllegalArgumentException("Minimum value is 0");
}
this.connectTimeout = connectTimeout;
return this;
}
/**
* Sets the socket timeout.
*
* @param socketTimeout the socket timeout
* @return {@code this}
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#getSocketTimeout()
*/
public Builder socketTimeout(final int socketTimeout) {
if (socketTimeout < 0) {
throw new IllegalArgumentException("Minimum value is 0");
}
this.socketTimeout = socketTimeout;
return this;
}
/**
* Sets whether socket keep alive is enabled.
*
* @param socketKeepAlive keep alive
* @return {@code this}
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientOptions#isSocketKeepAlive()
*/
public Builder socketKeepAlive(final boolean socketKeepAlive) {
this.socketKeepAlive = socketKeepAlive;
return this;
}
/**
* Sets whether auto connect retry is enabled.
*
* @param autoConnectRetry auto connect retry
* @return {@code this}
* @see MongoClientOptions#isAutoConnectRetry()
*/
public Builder autoConnectRetry(final boolean autoConnectRetry) {
this.autoConnectRetry = autoConnectRetry;
return this;
}
/**
* Sets the maximum auto connect retry time.
*
* @param maxAutoConnectRetryTime the maximum auto connect retry time
* @return {@code this}
* @see MongoClientOptions#getMaxAutoConnectRetryTime()
*/
public Builder maxAutoConnectRetryTime(final long maxAutoConnectRetryTime) {
if (maxAutoConnectRetryTime < 0) {
throw new IllegalArgumentException("Minimum value is 0");
}
this.maxAutoConnectRetryTime = maxAutoConnectRetryTime;
return this;
}
/**
* Sets the read preference.
*
* @param readPreference read preference
* @return {@code this}
* @see MongoClientOptions#getReadPreference()
*/
public Builder readPreference(final ReadPreference readPreference) {
if (readPreference == null) {
throw new IllegalArgumentException("null is not a legal value");
}
this.readPreference = readPreference;
return this;
}
/**
* Sets the decoder factory.
*
* @param dbDecoderFactory the decoder factory
* @return {@code this}
* @see MongoClientOptions#getDbDecoderFactory()
*/
public Builder dbDecoderFactory(final DBDecoderFactory dbDecoderFactory) {
if (dbDecoderFactory == null) {
throw new IllegalArgumentException("null is not a legal value");
}
this.dbDecoderFactory = dbDecoderFactory;
return this;
}
/**
* Sets the encoder factory.
*
* @param dbEncoderFactory the encoder factory
* @return {@code this}
* @see MongoClientOptions#getDbEncoderFactory()
*/
public Builder dbEncoderFactory(final DBEncoderFactory dbEncoderFactory) {
if (dbEncoderFactory == null) {
throw new IllegalArgumentException("null is not a legal value");
}
this.dbEncoderFactory = dbEncoderFactory;
return this;
}
/**
* Sets the write concern.
*
* @param writeConcern the write concern
* @return {@code this}
* @see MongoClientOptions#getWriteConcern()
*/
public Builder writeConcern(final WriteConcern writeConcern) {
if (writeConcern == null) {
throw new IllegalArgumentException("null is not a legal value");
}
this.writeConcern = writeConcern;
return this;
}
/**
* Sets the socket factory.
*
* @param socketFactory the socket factory
* @return {@code this}
* @see MongoClientOptions#getSocketFactory()
*/
public Builder socketFactory(final SocketFactory socketFactory) {
if (socketFactory == null) {
throw new IllegalArgumentException("null is not a legal value");
}
this.socketFactory = socketFactory;
return this;
}
/**
* Sets whether cursor finalizers are enabled.
*
* @param cursorFinalizerEnabled whether cursor finalizers are enabled.
* @return {@code this}
* @see MongoClientOptions#isCursorFinalizerEnabled()
*/
public Builder cursorFinalizerEnabled(final boolean cursorFinalizerEnabled) {
this.cursorFinalizerEnabled = cursorFinalizerEnabled;
return this;
}
/**
* Sets whether JMX beans registered by the driver should always be MBeans, regardless of whether the VM is
* Java 6 or greater. If false, the driver will use MXBeans if the VM is Java 6 or greater, and use MBeans if
* the VM is Java 5.
*
* @param alwaysUseMBeans true if driver should always use MBeans, regardless of VM version
* @return this
* @see MongoClientOptions#isAlwaysUseMBeans()
*/
public Builder alwaysUseMBeans(final boolean alwaysUseMBeans) {
this.alwaysUseMBeans = alwaysUseMBeans;
return this;
}
/**
* Sets defaults to be what they are in {@code MongoOptions}.
*
* @return {@code this}
* @see MongoOptions
*/
public Builder legacyDefaults() {
connectionsPerHost = 10;
writeConcern = WriteConcern.NORMAL;
return this;
}
/**
* Build an instance of MongoClientOptions.
*
* @return the options from this builder
*/
public MongoClientOptions build() {
return new MongoClientOptions(this);
}
}
/**
* Create a new Builder instance. This is a convenience method, equivalent to {@code new MongoClientOptions.Builder()}.
*
* @return a new instance of a Builder
*/
public static Builder builder() {
return new Builder();
}
/**
* Gets the description for this MongoClient, which is used in various places like logging and JMX.
* <p/>
* Default is null.
*
* @return the description
*/
public String getDescription() {
return description;
}
/**
* The maximum number of connections allowed per host for this MongoClient instance.
* Those connections will be kept in a pool when idle.
* Once the pool is exhausted, any operation requiring a connection will block waiting for an available connection.
* <p/>
* Default is 100.
*
* @return the maximum size of the connection pool per host
* @see MongoClientOptions#getThreadsAllowedToBlockForConnectionMultiplier()
*/
public int getConnectionsPerHost() {
return connectionsPerHost;
}
/**
* this multiplier, multiplied with the connectionsPerHost setting, gives the maximum number of threads that
* may be waiting for a connection to become available from the pool. All further threads will get an exception right
* away. For example if connectionsPerHost is 10 and threadsAllowedToBlockForConnectionMultiplier is 5, then up to 50
* threads can wait for a connection.
* <p/>
* Default is 5.
*
* @return the multiplier
*/
public int getThreadsAllowedToBlockForConnectionMultiplier() {
return threadsAllowedToBlockForConnectionMultiplier;
}
/**
* The maximum wait time in milliseconds that a thread may wait for a connection to become available.
* <p/>
* Default is 120,000. A value of 0 means that it will not wait. A negative value means to wait indefinitely.
*
* @return the maximum wait time.
*/
public int getMaxWaitTime() {
return maxWaitTime;
}
/**
* The connection timeout in milliseconds. A value of 0 means no timeout.
* It is used solely when establishing a new connection {@link java.net.Socket#connect(java.net.SocketAddress, int) }
* <p/>
* Default is 10,000.
*
* @return the socket connect timeout
*/
public int getConnectTimeout() {
return connectTimeout;
}
/**
* The socket timeout in milliseconds.
* It is used for I/O socket read and write operations {@link java.net.Socket#setSoTimeout(int)}
* <p/>
* Default is 0 and means no timeout.
*
* @return the socket timeout
*/
public int getSocketTimeout() {
return socketTimeout;
}
/**
* This flag controls the socket keep alive feature that keeps a connection alive through firewalls {@link java.net.Socket#setKeepAlive(boolean)}
* <p/>
* * Default is false.
*
* @return whether keep-alive is enabled on each socket
*/
public boolean isSocketKeepAlive() {
return socketKeepAlive;
}
/**
* If true, the driver will keep trying to connect to the same server in case that the socket cannot be established.
* There is maximum amount of time to keep retrying, which is 15s by default.
* This can be useful to avoid some exceptions being thrown when a server is down temporarily by blocking the operations.
* It also can be useful to smooth the transition to a new master (so that a new master is elected within the retry time).
* Note that when using this flag:
* - for a replica set, the driver will trying to connect to the old master for that time, instead of failing over to the new one right away
* - this does not prevent exception from being thrown in read/write operations on the socket, which must be handled by application
* <p/>
* Even if this flag is false, the driver already has mechanisms to automatically recreate broken connections and retry the read operations.
* Default is false.
*
* @return whether socket connect is retried
*/
public boolean isAutoConnectRetry() {
return autoConnectRetry;
}
/**
* The maximum amount of time in MS to spend retrying to open connection to the same server.
* Default is 0, which means to use the default 15s if autoConnectRetry is on.
*
* @return the maximum socket connect retry time.
*/
public long getMaxAutoConnectRetryTime() {
return maxAutoConnectRetryTime;
}
/**
* The read preference to use for queries, map-reduce, aggregation, and count.
* <p/>
* Default is {@code ReadPreference.primary()}.
*
* @return the read preference
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference#primary()
*/
public ReadPreference getReadPreference() {
return readPreference;
}
/**
* Override the decoder factory. Default is for the standard Mongo Java driver configuration.
*
* @return the decoder factory
*/
public DBDecoderFactory getDbDecoderFactory() {
return dbDecoderFactory;
}
/**
* Override the encoder factory. Default is for the standard Mongo Java driver configuration.
*
* @return the encoder factory
*/
public DBEncoderFactory getDbEncoderFactory() {
return dbEncoderFactory;
}
/**
* The write concern to use.
* <p/>
* Default is {@code WriteConcern.ACKNOWLEDGED}.
*
* @return the write concern
* @see WriteConcern#ACKNOWLEDGED
*/
public WriteConcern getWriteConcern() {
return writeConcern;
}
/**
* The socket factory for creating sockets to the mongo server.
* <p/>
* Default is SocketFactory.getDefault()
*
* @return the socket factory
*/
public SocketFactory getSocketFactory() {
return socketFactory;
}
/**
* Gets whether there is a a finalize method created that cleans up instances of DBCursor that the client
* does not close. If you are careful to always call the close method of DBCursor, then this can safely be set to false.
* <p/>
* Default is true.
*
* @return whether finalizers are enabled on cursors
* @see DBCursor
* @see com.massivecraft.mcore.xlib.mongodb.DBCursor#close()
*/
public boolean isCursorFinalizerEnabled() {
return cursorFinalizerEnabled;
}
/**
* Gets whether JMX beans registered by the driver should always be MBeans, regardless of whether the VM is
* Java 6 or greater. If false, the driver will use MXBeans if the VM is Java 6 or greater, and use MBeans if
* the VM is Java 5.
* <p>
* Default is false.
* </p>
*/
public boolean isAlwaysUseMBeans() {
return alwaysUseMBeans;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final MongoClientOptions that = (MongoClientOptions) o;
if (alwaysUseMBeans != that.alwaysUseMBeans) return false;
if (autoConnectRetry != that.autoConnectRetry) return false;
if (connectTimeout != that.connectTimeout) return false;
if (connectionsPerHost != that.connectionsPerHost) return false;
if (cursorFinalizerEnabled != that.cursorFinalizerEnabled) return false;
if (maxAutoConnectRetryTime != that.maxAutoConnectRetryTime) return false;
if (maxWaitTime != that.maxWaitTime) return false;
if (socketKeepAlive != that.socketKeepAlive) return false;
if (socketTimeout != that.socketTimeout) return false;
if (threadsAllowedToBlockForConnectionMultiplier != that.threadsAllowedToBlockForConnectionMultiplier)
return false;
if (!dbDecoderFactory.equals(that.dbDecoderFactory)) return false;
if (!dbEncoderFactory.equals(that.dbEncoderFactory)) return false;
if (description != null ? !description.equals(that.description) : that.description != null) return false;
if (!readPreference.equals(that.readPreference)) return false;
// Compare SocketFactory Class, since some equivalent SocketFactory instances are not equal to each other
if (!socketFactory.getClass().equals(that.socketFactory.getClass())) return false;
if (!writeConcern.equals(that.writeConcern)) return false;
return true;
}
@Override
public int hashCode() {
int result = description != null ? description.hashCode() : 0;
result = 31 * result + connectionsPerHost;
result = 31 * result + threadsAllowedToBlockForConnectionMultiplier;
result = 31 * result + maxWaitTime;
result = 31 * result + connectTimeout;
result = 31 * result + socketTimeout;
result = 31 * result + (socketKeepAlive ? 1 : 0);
result = 31 * result + (autoConnectRetry ? 1 : 0);
result = 31 * result + (int) (maxAutoConnectRetryTime ^ (maxAutoConnectRetryTime >>> 32));
result = 31 * result + readPreference.hashCode();
result = 31 * result + dbDecoderFactory.hashCode();
result = 31 * result + dbEncoderFactory.hashCode();
result = 31 * result + writeConcern.hashCode();
result = 31 * result + socketFactory.hashCode();
result = 31 * result + (cursorFinalizerEnabled ? 1 : 0);
result = 31 * result + (alwaysUseMBeans ? 1 : 0);
return result;
}
private MongoClientOptions(final Builder builder) {
description = builder.description;
connectionsPerHost = builder.connectionsPerHost;
threadsAllowedToBlockForConnectionMultiplier = builder.threadsAllowedToBlockForConnectionMultiplier;
maxWaitTime = builder.maxWaitTime;
connectTimeout = builder.connectTimeout;
socketTimeout = builder.socketTimeout;
autoConnectRetry = builder.autoConnectRetry;
socketKeepAlive = builder.socketKeepAlive;
maxAutoConnectRetryTime = builder.maxAutoConnectRetryTime;
readPreference = builder.readPreference;
dbDecoderFactory = builder.dbDecoderFactory;
dbEncoderFactory = builder.dbEncoderFactory;
writeConcern = builder.writeConcern;
socketFactory = builder.socketFactory;
cursorFinalizerEnabled = builder.cursorFinalizerEnabled;
alwaysUseMBeans = builder.alwaysUseMBeans;
}
private final String description;
private final int connectionsPerHost;
private final int threadsAllowedToBlockForConnectionMultiplier;
private final int maxWaitTime;
private final int connectTimeout;
private final int socketTimeout;
private final boolean socketKeepAlive;
private final boolean autoConnectRetry;
private final long maxAutoConnectRetryTime;
private final ReadPreference readPreference;
private final DBDecoderFactory dbDecoderFactory;
private final DBEncoderFactory dbEncoderFactory;
private final WriteConcern writeConcern;
private final SocketFactory socketFactory;
private final boolean cursorFinalizerEnabled;
private final boolean alwaysUseMBeans;
}

View File

@ -0,0 +1,612 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import javax.net.ssl.SSLSocketFactory;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Logger;
/**
* Represents a <a href="http://www.mongodb.org/display/DOCS/Connections">URI</a>
* which can be used to create a MongoClient instance. The URI describes the hosts to
* be used and options.
* <p>The format of the URI is:
* <pre>
* mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]
* </pre>
* <ul>
* <li>{@code mongodb://} is a required prefix to identify that this is a string in the standard connection format.</li>
* <li>{@code username:password@} are optional. If given, the driver will attempt to login to a database after
* connecting to a database server.</li>
* <li>{@code host1} is the only required part of the URI. It identifies a server address to connect to.</li>
* <li>{@code :portX} is optional and defaults to :27017 if not provided.</li>
* <li>{@code /database} is the name of the database to login to and thus is only relevant if the
* {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.</li>
* <li>{@code ?options} are connection options. Note that if {@code database} is absent there is still a {@code /}
* required between the last host and the {@code ?} introducing the options. Options are name=value pairs and the pairs
* are separated by "&amp;". For backwards compatibility, ";" is accepted as a separator in addition to "&amp;",
* but should be considered as deprecated.</li>
* </ul>
* <p>
* The Java driver supports the following options (case insensitive):
* <p>
* Replica set configuration:
* </p>
* <ul>
* <li>{@code replicaSet=name}: Implies that the hosts given are a seed list, and the driver will attempt to find
* all members of the set.</li>
* </ul>
* <p>Connection Configuration:</p>
* <ul>
* <li>{@code ssl=true|false}: Whether to connect using SSL.</li>
* <li>{@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.</li>
* <li>{@code socketTimeoutMS=ms}: How long a send or receive on a socket can take before timing out.</li>
* </ul>
* <p>Connection pool configuration:</p>
* <ul>
* <li>{@code maxPoolSize=n}: The maximum number of connections in the connection pool.</li>
* <li>{@code waitQueueMultiple=n} : this multiplier, multiplied with the maxPoolSize setting, gives the maximum number of
* threads that may be waiting for a connection to become available from the pool. All further threads will get an
* exception right away.</li>
* <li>{@code waitQueueTimeoutMS=ms}: The maximum wait time in milliseconds that a thread may wait for a connection to
* become available.</li>
* </ul>
* <p>Write concern configuration:</p>
* <ul>
* <li>{@code safe=true|false}
* <ul>
* <li>{@code true}: the driver sends a getLastError command after every update to ensure that the update succeeded
* (see also {@code w} and {@code wtimeoutMS}).</li>
* <li>{@code false}: the driver does not send a getLastError command after every update.</li>
* </ul>
* </li>
* <li>{@code w=wValue}
* <ul>
* <li>The driver adds { w : wValue } to the getLastError command. Implies {@code safe=true}.</li>
* <li>wValue is typically a number, but can be any string in order to allow for specifications like
* {@code "majority"}</li>
* </ul>
* </li>
* <li>{@code wtimeoutMS=ms}
* <ul>
* <li>The driver adds { wtimeout : ms } to the getlasterror command. Implies {@code safe=true}.</li>
* <li>Used in combination with {@code w}</li>
* </ul>
* </li>
* </ul>
* <p>Read preference configuration:</p>
* <ul>
* <li>{@code slaveOk=true|false}: Whether a driver connected to a replica set will send reads to slaves/secondaries.</li>
* <li>{@code readPreference=enum}: The read preference for this connection. If set, it overrides any slaveOk value.
* <ul>
* <li>Enumerated values:
* <ul>
* <li>{@code primary}</li>
* <li>{@code primaryPreferred}</li>
* <li>{@code secondary}</li>
* <li>{@code secondaryPreferred}</li>
* <li>{@code nearest}</li>
* </ul>
* </li>
* </ul>
* </li>
* <li>{@code readPreferenceTags=string}. A representation of a tag set as a comma-separated list of colon-separated
* key-value pairs, e.g. {@code "dc:ny,rack:1}". Spaces are stripped from beginning and end of all keys and values.
* To specify a list of tag sets, using multiple readPreferenceTags,
* e.g. {@code readPreferenceTags=dc:ny,rack:1;readPreferenceTags=dc:ny;readPreferenceTags=}
* <ul>
* <li>Note the empty value for the last one, which means match any secondary as a last resort.</li>
* <li>Order matters when using multiple readPreferenceTags.</li>
* </ul>
* </li>
* </ul>
* <p>Authentication configuration:</p>
* <ul>
* <li>{@code authMechanism=MONGO-CR|GSSAPI}: The authentication mechanism to use if a credential was supplied.
* The default is MONGODB-CR, which is the native MongoDB Challenge Response mechanism.
* </li>
* <li>{@code authSource=string}: The source of the authentication credentials. This is typically the database that
* the credentials have been created. The value defaults to the database specified in the path portion of the URI.
* If the database is specified in neither place, the default value is "admin". For GSSAPI, it's not necessary to specify
* a source.
* </li>
* <ul>
* <p>
* Note: This class is a replacement for {@code MongoURI}, to be used with {@code MongoClient}. The main difference
* in behavior is that the default write concern is {@code WriteConcern.ACKNOWLEDGED}.
* </p>
*
* @see MongoClientOptions for the default values for all options
* @since 2.10.0
*/
public class MongoClientURI {
private static final String PREFIX = "mongodb://";
private static final String UTF_8 = "UTF-8";
/**
* Creates a MongoURI from the given string.
*
* @param uri the URI
* @dochub connections
*/
public MongoClientURI(final String uri) {
this(uri, new MongoClientOptions.Builder());
}
/**
* Creates a MongoURI from the given URI string, and MongoClientOptions.Builder. The builder can be configured
* with default options, which may be overridden by options specified in the URI string.
*
* @param uri the URI
* @param builder a Builder
* @see com.massivecraft.mcore.xlib.mongodb.MongoClientURI#getOptions()
* @since 2.11.0
*/
public MongoClientURI(String uri, MongoClientOptions.Builder builder) {
try {
this.uri = uri;
if (!uri.startsWith(PREFIX))
throw new IllegalArgumentException("uri needs to start with " + PREFIX);
uri = uri.substring(PREFIX.length());
String serverPart;
String nsPart;
String optionsPart;
String userName = null;
char[] password = null;
{
int idx = uri.lastIndexOf("/");
if (idx < 0) {
if (uri.contains("?")) {
throw new IllegalArgumentException("URI contains options without trailing slash");
}
serverPart = uri;
nsPart = null;
optionsPart = "";
} else {
serverPart = uri.substring(0, idx);
nsPart = uri.substring(idx + 1);
idx = nsPart.indexOf("?");
if (idx >= 0) {
optionsPart = nsPart.substring(idx + 1);
nsPart = nsPart.substring(0, idx);
} else {
optionsPart = "";
}
}
}
{ // userName,password,hosts
List<String> all = new LinkedList<String>();
int idx = serverPart.indexOf("@");
if (idx > 0) {
String authPart = serverPart.substring(0, idx);
serverPart = serverPart.substring(idx + 1);
idx = authPart.indexOf(":");
if (idx == -1) {
userName = URLDecoder.decode(authPart, UTF_8);
} else {
userName = URLDecoder.decode(authPart.substring(0, idx), UTF_8);
password = URLDecoder.decode(authPart.substring(idx + 1), UTF_8).toCharArray();
}
}
Collections.addAll(all, serverPart.split(","));
hosts = Collections.unmodifiableList(all);
}
if (nsPart != null && !nsPart.isEmpty()) { // database,_collection
int idx = nsPart.indexOf(".");
if (idx < 0) {
database = nsPart;
collection = null;
} else {
database = nsPart.substring(0, idx);
collection = nsPart.substring(idx + 1);
}
} else {
database = null;
collection = null;
}
Map<String, List<String>> optionsMap = parseOptions(optionsPart);
options = createOptions(optionsMap, builder);
credentials = createCredentials(optionsMap, userName, password, database);
warnOnUnsupportedOptions(optionsMap);
} catch (UnsupportedEncodingException e) {
throw new MongoInternalException("This should not happen", e);
}
}
static Set<String> generalOptionsKeys = new HashSet<String>();
static Set<String> authKeys = new HashSet<String>();
static Set<String> readPreferenceKeys = new HashSet<String>();
static Set<String> writeConcernKeys = new HashSet<String>();
static Set<String> allKeys = new HashSet<String>();
static {
generalOptionsKeys.add("maxpoolsize");
generalOptionsKeys.add("waitqueuemultiple");
generalOptionsKeys.add("waitqueuetimeoutms");
generalOptionsKeys.add("connecttimeoutms");
generalOptionsKeys.add("sockettimeoutms");
generalOptionsKeys.add("sockettimeoutms");
generalOptionsKeys.add("autoconnectretry");
generalOptionsKeys.add("ssl");
readPreferenceKeys.add("slaveok");
readPreferenceKeys.add("readpreference");
readPreferenceKeys.add("readpreferencetags");
writeConcernKeys.add("safe");
writeConcernKeys.add("w");
writeConcernKeys.add("wtimeout");
writeConcernKeys.add("fsync");
writeConcernKeys.add("j");
authKeys.add("authmechanism");
authKeys.add("authsource");
allKeys.addAll(generalOptionsKeys);
allKeys.addAll(authKeys);
allKeys.addAll(readPreferenceKeys);
allKeys.addAll(writeConcernKeys);
}
private void warnOnUnsupportedOptions(Map<String, List<String>> optionsMap) {
for (String key : optionsMap.keySet()) {
if (!allKeys.contains(key)) {
LOGGER.warning("Unknown or Unsupported Option '" + key + "'");
}
}
}
private MongoClientOptions createOptions(Map<String, List<String>> optionsMap, MongoClientOptions.Builder builder) {
for (String key : generalOptionsKeys) {
String value = getLastValue(optionsMap, key);
if (value == null) {
continue;
}
if (key.equals("maxpoolsize")) {
builder.connectionsPerHost(Integer.parseInt(value));
} else if (key.equals("waitqueuemultiple")) {
builder.threadsAllowedToBlockForConnectionMultiplier(Integer.parseInt(value));
} else if (key.equals("waitqueuetimeoutms")) {
builder.maxWaitTime(Integer.parseInt(value));
} else if (key.equals("connecttimeoutms")) {
builder.connectTimeout(Integer.parseInt(value));
} else if (key.equals("sockettimeoutms")) {
builder.socketTimeout(Integer.parseInt(value));
} else if (key.equals("autoconnectretry")) {
builder.autoConnectRetry(_parseBoolean(value));
} else if (key.equals("ssl")) {
if (_parseBoolean(value)) {
builder.socketFactory(SSLSocketFactory.getDefault());
}
}
}
WriteConcern writeConcern = createWriteConcern(optionsMap);
ReadPreference readPreference = createReadPreference(optionsMap);
if (writeConcern != null) {
builder.writeConcern(writeConcern);
}
if (readPreference != null) {
builder.readPreference(readPreference);
}
return builder.build();
}
private WriteConcern createWriteConcern(final Map<String, List<String>> optionsMap) {
Boolean safe = null;
String w = null;
int wTimeout = 0;
boolean fsync = false;
boolean journal = false;
for (String key : writeConcernKeys) {
String value = getLastValue(optionsMap, key);
if (value == null) {
continue;
}
if (key.equals("safe")) {
safe = _parseBoolean(value);
} else if (key.equals("w")) {
w = value;
} else if (key.equals("wtimeout")) {
wTimeout = Integer.parseInt(value);
} else if (key.equals("fsync")) {
fsync = _parseBoolean(value);
} else if (key.equals("j")) {
journal = _parseBoolean(value);
}
}
return buildWriteConcern(safe, w, wTimeout, fsync, journal);
}
private ReadPreference createReadPreference(final Map<String, List<String>> optionsMap) {
Boolean slaveOk = null;
String readPreferenceType = null;
DBObject firstTagSet = null;
List<DBObject> remainingTagSets = new ArrayList<DBObject>();
for (String key : readPreferenceKeys) {
String value = getLastValue(optionsMap, key);
if (value == null) {
continue;
}
if (key.equals("slaveok")) {
slaveOk = _parseBoolean(value);
} else if (key.equals("readpreference")) {
readPreferenceType = value;
} else if (key.equals("readpreferencetags")) {
for (String cur : optionsMap.get(key)) {
DBObject tagSet = getTagSet(cur.trim());
if (firstTagSet == null) {
firstTagSet = tagSet;
} else {
remainingTagSets.add(tagSet);
}
}
}
}
return buildReadPreference(readPreferenceType, firstTagSet, remainingTagSets, slaveOk);
}
private MongoCredential createCredentials(Map<String, List<String>> optionsMap, final String userName,
final char[] password, String database) {
if (userName == null) {
return null;
}
if (database == null) {
database = "admin";
}
String mechanism = MongoCredential.MONGODB_CR_MECHANISM;
String authSource = database;
for (String key : authKeys) {
String value = getLastValue(optionsMap, key);
if (value == null) {
continue;
}
if (key.equals("authmechanism")) {
mechanism = value;
} else if (key.equals("authsource")) {
authSource = value;
}
}
if (mechanism.equals(MongoCredential.GSSAPI_MECHANISM)) {
return MongoCredential.createGSSAPICredential(userName);
}
else if (mechanism.equals(MongoCredential.MONGODB_CR_MECHANISM)) {
return MongoCredential.createMongoCRCredential(userName, authSource, password);
}
else {
throw new IllegalArgumentException("Unsupported authMechanism: " + mechanism);
}
}
private String getLastValue(final Map<String, List<String>> optionsMap, final String key) {
List<String> valueList = optionsMap.get(key);
if (valueList == null) {
return null;
}
return valueList.get(valueList.size() - 1);
}
private Map<String, List<String>> parseOptions(String optionsPart) {
Map<String, List<String>> optionsMap = new HashMap<String, List<String>>();
for (String _part : optionsPart.split("&|;")) {
int idx = _part.indexOf("=");
if (idx >= 0) {
String key = _part.substring(0, idx).toLowerCase();
String value = _part.substring(idx + 1);
List<String> valueList = optionsMap.get(key);
if (valueList == null) {
valueList = new ArrayList<String>(1);
}
valueList.add(value);
optionsMap.put(key, valueList);
}
}
return optionsMap;
}
private ReadPreference buildReadPreference(final String readPreferenceType, final DBObject firstTagSet,
final List<DBObject> remainingTagSets, final Boolean slaveOk) {
if (readPreferenceType != null) {
if (firstTagSet == null) {
return ReadPreference.valueOf(readPreferenceType);
} else {
return ReadPreference.valueOf(readPreferenceType, firstTagSet,
remainingTagSets.toArray(new DBObject[remainingTagSets.size()]));
}
} else if (slaveOk != null) {
if (slaveOk.equals(Boolean.TRUE)) {
return ReadPreference.secondaryPreferred();
}
}
return null;
}
private WriteConcern buildWriteConcern(final Boolean safe, final String w,
final int wTimeout, final boolean fsync, final boolean journal) {
if (w != null || wTimeout != 0 || fsync || journal) {
if (w == null) {
return new WriteConcern(1, wTimeout, fsync, journal);
} else {
try {
return new WriteConcern(Integer.parseInt(w), wTimeout, fsync, journal);
} catch (NumberFormatException e) {
return new WriteConcern(w, wTimeout, fsync, journal);
}
}
} else if (safe != null) {
if (safe) {
return WriteConcern.ACKNOWLEDGED;
} else {
return WriteConcern.UNACKNOWLEDGED;
}
}
return null;
}
private DBObject getTagSet(String tagSetString) {
DBObject tagSet = new BasicDBObject();
if (tagSetString.length() > 0) {
for (String tag : tagSetString.split(",")) {
String[] tagKeyValuePair = tag.split(":");
if (tagKeyValuePair.length != 2) {
throw new IllegalArgumentException("Bad read preference tags: " + tagSetString);
}
tagSet.put(tagKeyValuePair[0].trim(), tagKeyValuePair[1].trim());
}
}
return tagSet;
}
boolean _parseBoolean(String _in) {
String in = _in.trim();
return in != null && in.length() > 0 && (in.equals("1") || in.toLowerCase().equals("true") || in.toLowerCase()
.equals("yes"));
}
// ---------------------------------
/**
* Gets the username
*
* @return the username
*/
public String getUsername() {
return credentials != null ? credentials.getUserName() : null;
}
/**
* Gets the password
*
* @return the password
*/
public char[] getPassword() {
return credentials != null ? credentials.getPassword() : null;
}
/**
* Gets the list of hosts
*
* @return the host list
*/
public List<String> getHosts() {
return hosts;
}
/**
* Gets the database name
*
* @return the database name
*/
public String getDatabase() {
return database;
}
/**
* Gets the collection name
*
* @return the collection name
*/
public String getCollection() {
return collection;
}
/**
* Get the unparsed URI.
*
* @return the URI
*/
public String getURI() {
return uri;
}
/**
* Gets the credentials.
*
* @return the credentials
*/
public MongoCredential getCredentials() {
return credentials;
}
/**
* Gets the options
*
* @return the MongoClientOptions based on this URI.
*/
public MongoClientOptions getOptions() {
return options;
}
// ---------------------------------
private final MongoClientOptions options;
private final MongoCredential credentials;
private final List<String> hosts;
private final String database;
private final String collection;
private final String uri;
static final Logger LOGGER = Logger.getLogger("com.mongodb.MongoURI");
@Override
public String toString() {
return uri;
}
}

View File

@ -0,0 +1,28 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* This class exists only so that, on Java 6 and above, the driver can create instances of an MXBean.
*/
class MongoConnectionPool extends DBPortPool implements MongoConnectionPoolMXBean {
MongoConnectionPool(ServerAddress addr, MongoOptions options) {
super(addr, options);
}
}

View File

@ -0,0 +1,62 @@
/**
* Copyright (c) 2008 - 20112 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.mongodb.util.ConnectionPoolStatisticsBean;
/**
* A standard MXBean interface for a Mongo connection pool, for use on Java 6 and above virtual machines.
* <p>
* This interface is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases.
*/
public interface MongoConnectionPoolMXBean {
/**
* Gets the name of the pool.
*
* @return the name of the pool
*/
String getName();
/**
* Gets the maximum allowed size of the pool, including idle and in-use members.
*
* @return the maximum size
*/
int getMaxSize();
/**
* Gets the host that this connection pool is connecting to.
*
* @return the host
*/
String getHost();
/**
* Gets the port that this connection pool is connecting to.
*
* @return the port
*/
int getPort();
/**
* Gets the statistics for this connection pool.
*
* @return the connection pool statistics
*/
ConnectionPoolStatisticsBean getStatistics();
}

View File

@ -0,0 +1,175 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.util.annotations.Immutable;
import java.util.Arrays;
/**
* Represents credentials to authenticate to a mongo server, as well as the source of the credentials and
* the authentication mechanism to use.
*
* @since 2.11.0
*/
@Immutable
public final class MongoCredential {
/**
* The GSSAPI mechanism. See the <a href="http://tools.ietf.org/html/rfc4752">RFC</a>.
*/
public static final String GSSAPI_MECHANISM = "GSSAPI";
/**
* The MongoDB Challenge Response mechanism.
*/
public static final String MONGODB_CR_MECHANISM = "MONGODB-CR";
private final String mechanism;
private final String userName;
private final String source;
private final char[] password;
/**
* Creates a MongoCredential instance for the MongoDB Challenge Response protocol.
*
* @param userName the user name
* @param database the database where the user is defined
* @param password the user's password
* @return the credential
*/
public static MongoCredential createMongoCRCredential(String userName, String database, char[] password) {
return new MongoCredential(MONGODB_CR_MECHANISM, userName, database, password);
}
/**
* Creates a MongoCredential instance for the GSSAPI SASL mechanism.
*
* @param userName the user name
* @return the credential
*/
public static MongoCredential createGSSAPICredential(String userName) {
return new MongoCredential(GSSAPI_MECHANISM, userName, "$external", null);
}
/**
*
* Constructs a new instance using the given mechanism, userName, source, and password
*
* @param mechanism the authentication mechanism
* @param userName the user name
* @param source the source of the user name, typically a database name
* @param password the password
*/
MongoCredential(final String mechanism, final String userName, final String source, final char[] password) {
if (mechanism == null) {
throw new IllegalArgumentException("mechanism can not be null");
}
if (userName == null) {
throw new IllegalArgumentException("username can not be null");
}
if (mechanism.equals(MONGODB_CR_MECHANISM) && password == null) {
throw new IllegalArgumentException("Password can not be null for " + MONGODB_CR_MECHANISM + " mechanism");
}
if (mechanism.equals(GSSAPI_MECHANISM) && password != null) {
throw new IllegalArgumentException("Password must be null for the " + GSSAPI_MECHANISM + " mechanism");
}
this.mechanism = mechanism;
this.userName = userName;
this.source = source;
this.password = password != null ? password.clone() : null;
}
/**
* Gets the mechanism
*
* @return the mechanism.
*/
public String getMechanism() {
return mechanism;
}
/**
* Gets the user name
*
* @return the user name. Can never be null.
*/
public String getUserName() {
return userName;
}
/**
* Gets the source of the user name, typically the name of the database where the user is defined.
*
* @return the user name. Can never be null.
*/
public String getSource() {
return source;
}
/**
* Gets the password.
*
* @return the password. Can be null for some mechanisms.
*/
public char[] getPassword() {
if (password == null) {
return null;
}
return password.clone();
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final MongoCredential that = (MongoCredential) o;
if (!mechanism.equals(that.mechanism)) return false;
if (!Arrays.equals(password, that.password)) return false;
if (!source.equals(that.source)) return false;
if (!userName.equals(that.userName)) return false;
return true;
}
@Override
public int hashCode() {
int result = mechanism.hashCode();
result = 31 * result + userName.hashCode();
result = 31 * result + source.hashCode();
result = 31 * result + (password != null ? Arrays.hashCode(password) : 0);
return result;
}
@Override
public String toString() {
return "MongoCredential{" +
"mechanism='" + mechanism + '\'' +
", userName='" + userName + '\'' +
", source='" + source + '\'' +
", password=<hidden>" +
'}';
}
}

View File

@ -0,0 +1,147 @@
/*
* Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.util.annotations.ThreadSafe;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* An effectively immutable store of credentials to mongo servers. It enforces the invariant that there can be at most
* one credentials for each database. It allows at most one credentials with a null database.
*
* There is still a package-protected method to add a new credentials to the store, but it's only there
* to support DB.authenticate, which allows you to add new credentials at any point during the life of a MongoClient.
*
* @since 2.11.0
*/
@ThreadSafe
class MongoCredentialsStore {
private final Map<String, MongoCredential> credentialsMap = new HashMap<String, MongoCredential>();
private volatile Set<String> allDatabasesWithCredentials = new HashSet<String>();
/**
* Creates an empty store
*/
public MongoCredentialsStore() {
}
/**
* Creates a store with a single credentials.
*
* @param credentials A single credentials, which may be null.
*/
public MongoCredentialsStore(MongoCredential credentials) {
if (credentials == null) {
return;
}
add(credentials);
}
/**
* Creates a store with the list of credentials.
*
* @param credentialsList The list of credentials
*/
public MongoCredentialsStore(Iterable<MongoCredential> credentialsList) {
if (credentialsList == null) {
return;
}
for (MongoCredential cur : credentialsList) {
add(cur);
}
}
/**
* Adds a new credentials.
*
* @param credentials the new credentials
* @throws IllegalArgumentException if there already exist different credentials for the same database
*/
synchronized void add(MongoCredential credentials) {
MongoCredential existingCredentials = credentialsMap.get(credentials.getSource());
if (existingCredentials != null) {
if (existingCredentials.equals(credentials)) {
return;
}
throw new IllegalArgumentException("Can't add more than one credentials for the same database");
}
credentialsMap.put(credentials.getSource(), credentials);
allDatabasesWithCredentials = new HashSet<String>(allDatabasesWithCredentials);
allDatabasesWithCredentials.add(credentials.getSource());
}
/**
* Gets the set of databases for which there are credentials stored.
*
* @return an unmodifiable set of database names. Can contain the null string.
*/
public Set<String> getDatabases() {
return Collections.unmodifiableSet(allDatabasesWithCredentials);
}
/**
* Gets the stored credentials for the given database.
*
* @param database the database. This can be null, to get the credentials with the null database.
* @return the credentials for the given database. Can be null if not are stored.
*/
public synchronized MongoCredential get(String database) {
return credentialsMap.get(database);
}
/**
* Gets the MongoCredentials in this map as a List
* @return the list of credentials
*/
public synchronized List<MongoCredential> asList() {
return new ArrayList<MongoCredential>(credentialsMap.values());
}
@Override
public synchronized boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final MongoCredentialsStore that = (MongoCredentialsStore) o;
if (!credentialsMap.equals(that.credentialsMap)) return false;
return true;
}
@Override
public synchronized int hashCode() {
return credentialsMap.hashCode();
}
@Override
public String toString() {
return "{" +
"credentials=" + credentialsMap +
'}';
}
}

View File

@ -96,12 +96,21 @@ public class MongoException extends RuntimeException {
private static final long serialVersionUID = -4415279469780082174L;
Network( String msg , java.io.IOException ioe ){
/**
*
* @param msg the message
* @param ioe the cause
*/
public Network( String msg , java.io.IOException ioe ){
super( -2 , msg , ioe );
_ioe = ioe;
}
Network( java.io.IOException ioe ){
/**
*
* @param ioe the cause
*/
public Network( java.io.IOException ioe ){
super( ioe.toString() , ioe );
_ioe = ioe;
}
@ -110,14 +119,14 @@ public class MongoException extends RuntimeException {
}
/**
* Subclass of MongoException representing a duplicate key exception
* Subclass of WriteConcernException representing a duplicate key error
*/
public static class DuplicateKey extends MongoException {
public static class DuplicateKey extends WriteConcernException {
private static final long serialVersionUID = -4415279469780082174L;
DuplicateKey( int code , String msg ){
super( code , msg );
public DuplicateKey(final CommandResult commandResult) {
super(commandResult);
}
}
@ -133,10 +142,10 @@ public class MongoException extends RuntimeException {
/**
*
* @param cursorId
* @param serverAddress
* @param cursorId cursor
* @param serverAddress server address
*/
CursorNotFound(long cursorId, ServerAddress serverAddress){
public CursorNotFound(long cursorId, ServerAddress serverAddress){
super( -5 , "cursor " + cursorId + " not found on server " + serverAddress );
this.cursorId = cursorId;
this.serverAddress = serverAddress;

View File

@ -0,0 +1,36 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* A non-checked exception indicating that the driver has been interrupted by a call to Thread.interrupt.
*
* @see Thread#interrupt()
* @see InterruptedException
*/
public class MongoInterruptedException extends MongoException {
private static final long serialVersionUID = -4110417867718417860L;
public MongoInterruptedException(final InterruptedException e) {
super("A driver operation has been interrupted", e);
}
public MongoInterruptedException(final String message, final InterruptedException e) {
super(message, e);
}
}

View File

@ -21,25 +21,53 @@ package com.massivecraft.mcore.xlib.mongodb;
import javax.net.SocketFactory;
/**
* Various settings for the driver.
* Not thread safe.
* Various settings for a Mongo instance. Not thread safe, and superseded by MongoClientOptions. This class may
* be deprecated in a future release.
*
* @see MongoClientOptions
* @see MongoClient
*/
public class MongoOptions {
@Deprecated
public MongoOptions(){
reset();
}
/**
* @deprecated Replaced by {@link MongoClientOptions}
*/
@Deprecated
public MongoOptions(final MongoClientOptions options) {
connectionsPerHost = options.getConnectionsPerHost();
threadsAllowedToBlockForConnectionMultiplier = options.getThreadsAllowedToBlockForConnectionMultiplier();
maxWaitTime = options.getMaxWaitTime();
connectTimeout = options.getConnectTimeout();
socketTimeout = options.getSocketTimeout();
socketKeepAlive = options.isSocketKeepAlive();
autoConnectRetry = options.isAutoConnectRetry();
maxAutoConnectRetryTime = options.getMaxAutoConnectRetryTime();
readPreference = options.getReadPreference();
dbDecoderFactory = options.getDbDecoderFactory();
dbEncoderFactory = options.getDbEncoderFactory();
socketFactory = options.getSocketFactory();
description = options.getDescription();
cursorFinalizerEnabled = options.isCursorFinalizerEnabled();
writeConcern = options.getWriteConcern();
slaveOk = false; // default to false, as readPreference field will be responsible
}
public void reset(){
connectionsPerHost = Bytes.CONNECTIONS_PER_HOST;
threadsAllowedToBlockForConnectionMultiplier = 5;
maxWaitTime = 1000 * 60 * 2;
connectTimeout = 0;
connectTimeout = 1000 * 10;
socketTimeout = 0;
socketKeepAlive = false;
autoConnectRetry = false;
maxAutoConnectRetryTime = 0;
slaveOk = false;
readPreference = null;
safe = false;
w = 0;
wtimeout = 0;
@ -49,6 +77,8 @@ public class MongoOptions {
dbEncoderFactory = DefaultDBEncoder.FACTORY;
socketFactory = SocketFactory.getDefault();
description = null;
cursorFinalizerEnabled = true;
alwaysUseMBeans = false;
}
public MongoOptions copy() {
@ -62,6 +92,7 @@ public class MongoOptions {
m.autoConnectRetry = autoConnectRetry;
m.maxAutoConnectRetryTime = maxAutoConnectRetryTime;
m.slaveOk = slaveOk;
m.readPreference = readPreference;
m.safe = safe;
m.w = w;
m.wtimeout = wtimeout;
@ -71,22 +102,89 @@ public class MongoOptions {
m.dbEncoderFactory = dbEncoderFactory;
m.socketFactory = socketFactory;
m.description = description;
m.cursorFinalizerEnabled = cursorFinalizerEnabled;
m.alwaysUseMBeans = alwaysUseMBeans;
return m;
}
/**
* Helper method to return the appropriate WriteConcern instance based
* on the current related options settings.
* Helper method to return the appropriate WriteConcern instance based on the current related options settings.
**/
public WriteConcern getWriteConcern(){
// Ensure we only set writeconcern once; if non-default w, etc skip safe (implied)
if ( w != 0 || wtimeout != 0 || fsync )
return new WriteConcern( w , wtimeout , fsync );
else if (safe)
public WriteConcern getWriteConcern() {
if (writeConcern != null) {
return writeConcern;
} else if ( w != 0 || wtimeout != 0 || fsync | j) {
return new WriteConcern( w , wtimeout , fsync, j );
} else if (safe) {
return WriteConcern.SAFE;
else
} else {
return WriteConcern.NORMAL;
}
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final MongoOptions options = (MongoOptions) o;
if (autoConnectRetry != options.autoConnectRetry) return false;
if (connectTimeout != options.connectTimeout) return false;
if (connectionsPerHost != options.connectionsPerHost) return false;
if (cursorFinalizerEnabled != options.cursorFinalizerEnabled) return false;
if (fsync != options.fsync) return false;
if (j != options.j) return false;
if (maxAutoConnectRetryTime != options.maxAutoConnectRetryTime) return false;
if (maxWaitTime != options.maxWaitTime) return false;
if (safe != options.safe) return false;
if (slaveOk != options.slaveOk) return false;
if (socketKeepAlive != options.socketKeepAlive) return false;
if (socketTimeout != options.socketTimeout) return false;
if (threadsAllowedToBlockForConnectionMultiplier != options.threadsAllowedToBlockForConnectionMultiplier)
return false;
if (w != options.w) return false;
if (wtimeout != options.wtimeout) return false;
if (dbDecoderFactory != null ? !dbDecoderFactory.equals(options.dbDecoderFactory) : options.dbDecoderFactory != null)
return false;
if (dbEncoderFactory != null ? !dbEncoderFactory.equals(options.dbEncoderFactory) : options.dbEncoderFactory != null)
return false;
if (description != null ? !description.equals(options.description) : options.description != null) return false;
if (readPreference != null ? !readPreference.equals(options.readPreference) : options.readPreference != null)
return false;
if (socketFactory != null ? !socketFactory.equals(options.socketFactory) : options.socketFactory != null)
return false;
if (writeConcern != null ? !writeConcern.equals(options.writeConcern) : options.writeConcern != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = description != null ? description.hashCode() : 0;
result = 31 * result + connectionsPerHost;
result = 31 * result + threadsAllowedToBlockForConnectionMultiplier;
result = 31 * result + maxWaitTime;
result = 31 * result + connectTimeout;
result = 31 * result + socketTimeout;
result = 31 * result + (socketKeepAlive ? 1 : 0);
result = 31 * result + (autoConnectRetry ? 1 : 0);
result = 31 * result + (int) (maxAutoConnectRetryTime ^ (maxAutoConnectRetryTime >>> 32));
result = 31 * result + (slaveOk ? 1 : 0);
result = 31 * result + (readPreference != null ? readPreference.hashCode() : 0);
result = 31 * result + (dbDecoderFactory != null ? dbDecoderFactory.hashCode() : 0);
result = 31 * result + (dbEncoderFactory != null ? dbEncoderFactory.hashCode() : 0);
result = 31 * result + (safe ? 1 : 0);
result = 31 * result + w;
result = 31 * result + wtimeout;
result = 31 * result + (fsync ? 1 : 0);
result = 31 * result + (j ? 1 : 0);
result = 31 * result + (socketFactory != null ? socketFactory.hashCode() : 0);
result = 31 * result + (cursorFinalizerEnabled ? 1 : 0);
result = 31 * result + (writeConcern != null ? writeConcern.hashCode() : 0);
return result;
}
/**
* <p>The description for <code>Mongo</code> instances created with these options. This is used in various places like logging.</p>
@ -118,9 +216,9 @@ public class MongoOptions {
public int maxWaitTime;
/**
* The connection timeout in milliseconds.
* The connection timeout in milliseconds. A value of 0 means no timeout.
* It is used solely when establishing a new connection {@link java.net.Socket#connect(java.net.SocketAddress, int) }
* Default is 0 and means no timeout.
* Default is 10,000.
*/
public int connectTimeout;
@ -164,12 +262,17 @@ public class MongoOptions {
* Note that reading from secondaries can increase performance and reliability, but it may result in temporary inconsistent results.
* Default is false.
*
* @deprecated Replaced in MongoDB 2.0/Java Driver 2.7 with ReadPreference.SECONDARY
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference.SECONDARY
* @deprecated Replaced with {@code ReadPreference.secondaryPreferred()}
* @see ReadPreference#secondaryPreferred()
*/
@Deprecated
public boolean slaveOk;
/**
* Specifies the read preference.
*/
public ReadPreference readPreference;
/**
* Override the DBCallback factory. Default is for the standard Mongo Java driver configuration.
*/
@ -219,29 +322,39 @@ public class MongoOptions {
*/
public SocketFactory socketFactory;
public String toString(){
StringBuilder buf = new StringBuilder();
buf.append( "description=" ).append( description ).append( ", " );
buf.append( "connectionsPerHost=" ).append( connectionsPerHost ).append( ", " );
buf.append( "threadsAllowedToBlockForConnectionMultiplier=" ).append( threadsAllowedToBlockForConnectionMultiplier ).append( ", " );
buf.append( "maxWaitTime=" ).append( maxWaitTime ).append( ", " );
buf.append( "connectTimeout=" ).append( connectTimeout ).append( ", " );
buf.append( "socketTimeout=" ).append( socketTimeout ).append( ", " );
buf.append( "socketKeepAlive=" ).append( socketKeepAlive ).append( ", " );
buf.append( "autoConnectRetry=" ).append( autoConnectRetry ).append( ", " );
buf.append( "maxAutoConnectRetryTime=" ).append( maxAutoConnectRetryTime ).append( ", " );
buf.append( "slaveOk=" ).append( slaveOk ).append( ", " );
buf.append( "safe=" ).append( safe ).append( ", " );
buf.append( "w=" ).append( w ).append( ", " );
buf.append( "wtimeout=" ).append( wtimeout ).append( ", " );
buf.append( "fsync=" ).append( fsync ).append( ", " );
buf.append( "j=" ).append( j );
return buf.toString();
}
/**
* Sets whether there is a a finalize method created that cleans up instances of DBCursor that the client
* does not close. If you are careful to always call the close method of DBCursor, then this can safely be set to false.
* @see com.mongodb.DBCursor#close().
* Default is true.
*/
public boolean cursorFinalizerEnabled;
/**
* @return The description for <code>Mongo</code> instances created with these options
* Sets the write concern. If this is not set, the write concern defaults to the combination of settings of
* the other write concern-related fields. If set, this will override all of the other write concern-related
* fields.
*
* @see #w
* @see #safe
* @see #wtimeout
* @see #fsync
* @see #j
*/
public WriteConcern writeConcern;
/**
* Sets whether JMX beans registered by the driver should always be MBeans, regardless of whether the VM is
* Java 6 or greater. If false, the driver will use MXBeans if the VM is Java 6 or greater, and use MBeans if
* the VM is Java 5.
* <p>
* Default is false.
* </p>
*/
public boolean alwaysUseMBeans;
/**
* @return The description for <code>MongoClient</code> instances created with these options
*/
public synchronized String getDescription() {
return description;
@ -282,7 +395,7 @@ public class MongoOptions {
/**
*
* @param this multiplied with connectionsPerHost, sets the maximum number of threads that
* @param threads multiplied with connectionsPerHost, sets the maximum number of threads that
* may be waiting for a connection
*/
public synchronized void setThreadsAllowedToBlockForConnectionMultiplier(int threads) {
@ -497,6 +610,14 @@ public class MongoOptions {
j = safe;
}
/**
*
* @param writeConcern sets the write concern
*/
public void setWriteConcern(final WriteConcern writeConcern) {
this.writeConcern = writeConcern;
}
/**
*
* @return the socket factory for creating sockets to mongod
@ -512,4 +633,82 @@ public class MongoOptions {
public synchronized void setSocketFactory(SocketFactory factory) {
socketFactory = factory;
}
/**
*
* @return the read preference
*/
public ReadPreference getReadPreference() {
return readPreference;
}
/**
*
* @param readPreference the read preference
*/
public void setReadPreference(ReadPreference readPreference) {
this.readPreference = readPreference;
}
/**
*
* @return whether DBCursor finalizer is enabled
*/
public boolean isCursorFinalizerEnabled() {
return cursorFinalizerEnabled;
}
/**
*
* @param cursorFinalizerEnabled whether cursor finalizer is enabled
*/
public void setCursorFinalizerEnabled(final boolean cursorFinalizerEnabled) {
this.cursorFinalizerEnabled = cursorFinalizerEnabled;
}
/**
*
* @return true if the driver should always use MBeans, regardless of VM
*/
public boolean isAlwaysUseMBeans() {
return alwaysUseMBeans;
}
/**
*
* @param alwaysUseMBeans sets whether the driver should always use MBeans, regardless of VM
*/
public void setAlwaysUseMBeans(final boolean alwaysUseMBeans) {
this.alwaysUseMBeans = alwaysUseMBeans;
}
@Override
public String toString() {
return "MongoOptions{" +
"description='" + description + '\'' +
", connectionsPerHost=" + connectionsPerHost +
", threadsAllowedToBlockForConnectionMultiplier=" + threadsAllowedToBlockForConnectionMultiplier +
", maxWaitTime=" + maxWaitTime +
", connectTimeout=" + connectTimeout +
", socketTimeout=" + socketTimeout +
", socketKeepAlive=" + socketKeepAlive +
", autoConnectRetry=" + autoConnectRetry +
", maxAutoConnectRetryTime=" + maxAutoConnectRetryTime +
", slaveOk=" + slaveOk +
", readPreference=" + readPreference +
", dbDecoderFactory=" + dbDecoderFactory +
", dbEncoderFactory=" + dbEncoderFactory +
", safe=" + safe +
", w=" + w +
", wtimeout=" + wtimeout +
", fsync=" + fsync +
", j=" + j +
", socketFactory=" + socketFactory +
", cursorFinalizerEnabled=" + cursorFinalizerEnabled +
", writeConcern=" + writeConcern +
", alwaysUseMBeans=" + alwaysUseMBeans +
'}';
}
}

View File

@ -17,154 +17,135 @@
package com.massivecraft.mcore.xlib.mongodb;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.logging.Logger;
/**
* Represents a <a href="http://www.mongodb.org/display/DOCS/Connections">URI</a>
* which can be used to create a Mongo instance. The URI describes the hosts to
* be used and options.
*
* The Java driver supports the following options (case insensitive):<br />
*
* <p>
* This class has been superseded by <{@code MongoClientURI}, and may be deprecated in a future release.
* <p>The format of the URI is:
* <pre>
* mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]
* </pre>
* <ul>
* <li>maxpoolsize</li>
* <li>waitqueuemultiple</li>
* <li>waitqueuetimeoutms</li>
* <li>connecttimeoutms</li>
* <li>sockettimeoutms</li>
* <li>autoconnectretry</li>
* <li>slaveok</li>
* <li>safe</li>
* <li>w</li>
* <li>wtimeout</li>
* <li>fsync</li>
* <li>{@code mongodb://} is a required prefix to identify that this is a string in the standard connection format.</li>
* <li>{@code username:password@} are optional. If given, the driver will attempt to login to a database after
* connecting to a database server.</li>
* <li>{@code host1} is the only required part of the URI. It identifies a server address to connect to.</li>
* <li>{@code :portX} is optional and defaults to :27017 if not provided.</li>
* <li>{@code /database} is the name of the database to login to and thus is only relevant if the
* {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.</li>
* <li>{@code ?options} are connection options. Note that if {@code database} is absent there is still a {@code /}
* required between the last host and the {@code ?} introducing the options. Options are name=value pairs and the pairs
* are separated by "&amp;". For backwards compatibility, ";" is accepted as a separator in addition to "&amp;",
* but should be considered as deprecated.</li>
* </ul>
* <p>
* The Java driver supports the following options (case insensitive):
* <p>
* Replica set configuration:
* </p>
* <ul>
* <li>{@code replicaSet=name}: Implies that the hosts given are a seed list, and the driver will attempt to find
* all members of the set.</li>
* </ul>
* <p>Connection Configuration:</p>
* <ul>
* <li>{@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.</li>
* <li>{@code socketTimeoutMS=ms}: How long a send or receive on a socket can take before timing out.</li>
* </ul>
* <p>Connection pool configuration:</p>
* <ul>
* <li>{@code maxPoolSize=n}: The maximum number of connections in the connection pool.</li>
* <li>{@code waitQueueMultiple=n} : this multiplier, multiplied with the maxPoolSize setting, gives the maximum number of
* threads that may be waiting for a connection to become available from the pool. All further threads will get an
* exception right away.</li>
* <li>{@code waitQueueTimeoutMS=ms}: The maximum wait time in milliseconds that a thread may wait for a connection to
* become available.</li>
* </ul>
* <p>Write concern configuration:</p>
* <ul>
* <li>{@code safe=true|false}
* <ul>
* <li>{@code true}: the driver sends a getLastError command after every update to ensure that the update succeeded
* (see also {@code w} and {@code wtimeoutMS}).</li>
* <li>{@code false}: the driver does not send a getLastError command after every update.</li>
* </ul>
* </li>
* <li>{@code w=wValue}
* <ul>
* <li>The driver adds { w : wValue } to the getLastError command. Implies {@code safe=true}.</li>
* <li>wValue is typically a number, but can be any string in order to allow for specifications like
* {@code "majority"}</li>
* </ul>
* </li>
* <li>{@code wtimeoutMS=ms}
* <ul>
* <li>The driver adds { wtimeout : ms } to the getlasterror command. Implies {@code safe=true}.</li>
* <li>Used in combination with {@code w}</li>
* </ul>
* </li>
* </ul>
* <p>Read preference configuration:</p>
* <ul>
* <li>{@code slaveOk=true|false}: Whether a driver connected to a replica set will send reads to slaves/secondaries.</li>
* <li>{@code readPreference=enum}: The read preference for this connection. If set, it overrides any slaveOk value.
* <ul>
* <li>Enumerated values:
* <ul>
* <li>{@code primary}</li>
* <li>{@code primaryPreferred}</li>
* <li>{@code secondary}</li>
* <li>{@code secondaryPreferred}</li>
* <li>{@code nearest}</li>
* </ul>
* </li>
* </ul>
* </li>
* <li>{@code readPreferenceTags=string}. A representation of a tag set as a comma-separated list of colon-separated
* key-value pairs, e.g. {@code "dc:ny,rack:1}". Spaces are stripped from beginning and end of all keys and values.
* To specify a list of tag sets, using multiple readPreferenceTags,
* e.g. {@code readPreferenceTags=dc:ny,rack:1;readPreferenceTags=dc:ny;readPreferenceTags=}
* <ul>
* <li>Note the empty value for the last one, which means match any secondary as a last resort.</li>
* <li>Order matters when using multiple readPreferenceTags.</li>
* </ul>
* </li>
* </ul>
* @see MongoClientURI
* @see MongoOptions for the default values for all options
*/
public class MongoURI {
/**
* The prefix for mongodb URIs.
*/
public static final String MONGODB_PREFIX = "mongodb://";
private final MongoClientURI mongoClientURI;
private final MongoOptions mongoOptions;
/**
* Creates a MongoURI described by a String.
* examples
* mongodb://127.0.0.1
* mongodb://fred:foobar@127.0.0.1/
* Creates a MongoURI from a string.
* @param uri the URI
* @dochub connections
*
* @deprecated Replaced by {@link MongoClientURI#MongoClientURI(String)}
*
*/
public MongoURI( String uri ){
_uri = uri;
if ( ! uri.startsWith( MONGODB_PREFIX ) )
throw new IllegalArgumentException( "uri needs to start with " + MONGODB_PREFIX );
uri = uri.substring(MONGODB_PREFIX.length());
String serverPart;
String nsPart;
String optionsPart;
{
int idx = uri.lastIndexOf( "/" );
if ( idx < 0 ){
serverPart = uri;
nsPart = null;
optionsPart = null;
}
else {
serverPart = uri.substring( 0 , idx );
nsPart = uri.substring( idx + 1 );
idx = nsPart.indexOf( "?" );
if ( idx >= 0 ){
optionsPart = nsPart.substring( idx + 1 );
nsPart = nsPart.substring( 0 , idx );
}
else {
optionsPart = null;
@Deprecated
public MongoURI( String uri ) {
this.mongoClientURI = new MongoClientURI(uri, new MongoClientOptions.Builder().legacyDefaults());
mongoOptions = new MongoOptions(mongoClientURI.getOptions());
}
}
}
{ // _username,_password,_hosts
List<String> all = new LinkedList<String>();
int idx = serverPart.indexOf( "@" );
if ( idx > 0 ){
String authPart = serverPart.substring( 0 , idx );
serverPart = serverPart.substring( idx + 1 );
idx = authPart.indexOf( ":" );
_username = authPart.substring( 0, idx );
_password = authPart.substring( idx + 1 ).toCharArray();
}
else {
_username = null;
_password = null;
}
for ( String s : serverPart.split( "," ) )
all.add( s );
_hosts = Collections.unmodifiableList( all );
}
if ( nsPart != null ){ // _database,_collection
int idx = nsPart.indexOf( "." );
if ( idx < 0 ){
_database = nsPart;
_collection = null;
}
else {
_database = nsPart.substring( 0 , idx );
_collection = nsPart.substring( idx + 1 );
}
}
else {
_database = null;
_collection = null;
}
if ( optionsPart != null && optionsPart.length() > 0 ) parseOptions( optionsPart );
}
@SuppressWarnings("deprecation")
private void parseOptions( String optionsPart ){
for ( String _part : optionsPart.split( "&|;" ) ){
int idx = _part.indexOf( "=" );
if ( idx >= 0 ){
String key = _part.substring( 0, idx ).toLowerCase();
String value = _part.substring( idx + 1 );
if ( key.equals( "maxpoolsize" ) ) _options.connectionsPerHost = Integer.parseInt( value );
else if ( key.equals( "minpoolsize" ) )
LOGGER.warning( "Currently No support in Java driver for Min Pool Size." );
else if ( key.equals( "waitqueuemultiple" ) )
_options.threadsAllowedToBlockForConnectionMultiplier = Integer.parseInt( value );
else if ( key.equals( "waitqueuetimeoutms" ) ) _options.maxWaitTime = Integer.parseInt( value );
else if ( key.equals( "connecttimeoutms" ) ) _options.connectTimeout = Integer.parseInt( value );
else if ( key.equals( "sockettimeoutms" ) ) _options.socketTimeout = Integer.parseInt( value );
else if ( key.equals( "autoconnectretry" ) ) _options.autoConnectRetry = _parseBoolean( value );
else if ( key.equals( "slaveok" ) ) _options.slaveOk = _parseBoolean( value );
else if ( key.equals( "safe" ) ) _options.safe = _parseBoolean( value );
else if ( key.equals( "w" ) ) _options.w = Integer.parseInt( value );
else if ( key.equals( "wtimeout" ) ) _options.wtimeout = Integer.parseInt( value );
else if ( key.equals( "fsync" ) ) _options.fsync = _parseBoolean( value );
else LOGGER.warning( "Unknown or Unsupported Option '" + value + "'" );
}
}
}
boolean _parseBoolean( String _in ){
String in = _in.trim();
if ( in != null && in.length() > 0 && ( in.equals( "1" ) || in.toLowerCase().equals( "true" ) || in.toLowerCase()
.equals( "yes" ) ) )
return true;
else return false;
@Deprecated
public MongoURI(final MongoClientURI mongoClientURI) {
this.mongoClientURI = mongoClientURI;
mongoOptions = new MongoOptions(mongoClientURI.getOptions());
}
// ---------------------------------
@ -174,7 +155,7 @@ public class MongoURI {
* @return
*/
public String getUsername(){
return _username;
return mongoClientURI.getUsername();
}
/**
@ -182,7 +163,7 @@ public class MongoURI {
* @return
*/
public char[] getPassword(){
return _password;
return mongoClientURI.getPassword();
}
/**
@ -190,7 +171,7 @@ public class MongoURI {
* @return
*/
public List<String> getHosts(){
return _hosts;
return mongoClientURI.getHosts();
}
/**
@ -198,7 +179,7 @@ public class MongoURI {
* @return
*/
public String getDatabase(){
return _database;
return mongoClientURI.getDatabase();
}
/**
@ -206,85 +187,84 @@ public class MongoURI {
* @return
*/
public String getCollection(){
return _collection;
return mongoClientURI.getCollection();
}
/**
* Gets the options
* @return
* Gets the credentials
*
* @since 2.11.0
*/
public MongoCredential getCredentials() {
return mongoClientURI.getCredentials();
}
/**
* Gets the options. This method will return the same instance of {@code MongoOptions} for every call, so it's
* possible to mutate the returned instance to change the defaults.
* @return the mongo options
*/
public MongoOptions getOptions(){
return _options;
return mongoOptions;
}
/**
* creates a Mongo instance based on the URI
* @return
* @return a new Mongo instance. There is no caching, so each call will create a new instance, each of which
* must be closed manually.
* @throws MongoException
* @throws UnknownHostException
*/
@SuppressWarnings("deprecation")
public Mongo connect()
throws MongoException , UnknownHostException {
throws UnknownHostException {
// TODO caching?
return new Mongo( this );
// Note: we can't change this to new MongoClient(this) as that would silently change the default write concern.
return new Mongo(this);
}
/**
* returns the DB object from a newly created Mongo instance based on this URI
* @return
* @return the database specified in the URI. This will implicitly create a new Mongo instance,
* which must be closed manually.
* @throws MongoException
* @throws UnknownHostException
*/
public DB connectDB()
throws MongoException , UnknownHostException {
// TODO auth
return connect().getDB( _database );
public DB connectDB() throws UnknownHostException {
return connect().getDB(getDatabase());
}
/**
* returns the URI's DB object from a given Mongo instance
* @param m
* @return
* @param mongo the Mongo instance to get the database from.
* @return the database specified in this URI
*/
public DB connectDB( Mongo m ){
// TODO auth
return m.getDB( _database );
public DB connectDB( Mongo mongo ){
return mongo.getDB( getDatabase() );
}
/**
* returns the URI's Collection from a given DB object
* @param db
* @param db the database to get the collection from
* @return
*/
public DBCollection connectCollection( DB db ){
return db.getCollection( _collection );
return db.getCollection( getCollection() );
}
/**
* returns the URI's Collection from a given Mongo instance
* @param m
* @return
* @param mongo the mongo instance to get the collection from
* @return the collection specified in this URI
*/
public DBCollection connectCollection( Mongo m ){
return connectDB( m ).getCollection( _collection );
public DBCollection connectCollection( Mongo mongo ){
return connectDB( mongo ).getCollection( getCollection() );
}
// ---------------------------------
final String _username;
final char[] _password;
final List<String> _hosts;
final String _database;
final String _collection;
final MongoOptions _options = new MongoOptions();
final String _uri;
static final Logger LOGGER = Logger.getLogger( "com.mongodb.MongoURI" );
@Override
public String toString() {
return _uri;
return mongoClientURI.toString();
}
}

View File

@ -0,0 +1,134 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* A connection to a set of mongos servers.
*/
class MongosStatus extends ConnectionStatus {
private static final Logger logger = Logger.getLogger("com.mongodb.MongosStatus");
MongosStatus(Mongo mongo, List<ServerAddress> mongosAddresses) {
super(mongosAddresses, mongo);
_updater = new MongosUpdater();
}
@Override
boolean hasServerUp() {
return preferred != null;
}
@Override
Node ensureMaster() {
checkClosed();
return getPreferred();
}
@Override
List<ServerAddress> getServerAddressList() {
return new ArrayList<ServerAddress>(_mongosAddresses);
}
class MongosUpdater extends BackgroundUpdater {
MongosUpdater() {
super("MongosStatus:MongosUpdater");
}
@Override
public void run() {
List<MongosNode> mongosNodes = getMongosNodes();
try {
while (!Thread.interrupted()) {
try {
MongosNode bestThisPass = null;
for (MongosNode cur : mongosNodes) {
cur.update();
if (cur._ok) {
if (bestThisPass == null || (cur._pingTimeMS < bestThisPass._pingTimeMS)) {
bestThisPass = cur;
}
}
}
setPreferred(bestThisPass);
} catch (Exception e) {
logger.log(Level.WARNING, "couldn't do update pass", e);
}
int sleepTime = preferred == null ? updaterIntervalNoMasterMS : updaterIntervalMS;
Thread.sleep(sleepTime);
}
} catch (InterruptedException e) {
logger.log(Level.INFO, "Exiting background thread");
// Allow thread to exit
}
}
private List<MongosNode> getMongosNodes() {
List<MongosNode> mongosNodes = new ArrayList<MongosNode>(_mongosAddresses.size());
for (ServerAddress serverAddress : _mongosAddresses) {
mongosNodes.add(new MongosNode(serverAddress, _mongo, _mongoOptions));
}
return mongosNodes;
}
}
static class MongosNode extends UpdatableNode {
MongosNode(final ServerAddress addr, Mongo mongo, MongoOptions mongoOptions) {
super(addr, mongo, mongoOptions);
}
@Override
protected Logger getLogger() {
return logger;
}
}
// Sends a notification every time preferred is set.
private synchronized void setPreferred(final MongosNode bestThisPass) {
if (bestThisPass == null) {
preferred = null;
} else {
preferred = new Node(bestThisPass._pingTimeMS, bestThisPass._addr, bestThisPass._maxBsonObjectSize, bestThisPass._ok);
}
notifyAll();
}
// Gets the current preferred node. If there is no preferred node, wait to get a notification before returning null.
private synchronized Node getPreferred() {
if (preferred == null) {
try {
synchronized (this) {
wait(_mongo.getMongoOptions().getConnectTimeout());
}
} catch (InterruptedException e) {
throw new MongoInterruptedException("Interrupted while waiting for next update to mongos status", e);
}
}
return preferred;
}
// The current preferred mongos Node to use as the master. This is not necessarily the node that is currently in use.
// Rather, it's the node that is preferred if there is a problem with the currently in use node.
private volatile Node preferred;
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.mongodb.util.Util;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
class NativeAuthenticationHelper {
static DBObject getAuthCommand(String userName, char[] password, String nonce) {
return getAuthCommand(userName, createHash(userName, password), nonce);
}
static DBObject getAuthCommand(String userName, byte[] authHash, String nonce) {
String key = nonce + userName + new String(authHash);
BasicDBObject cmd = new BasicDBObject();
cmd.put("authenticate", 1);
cmd.put("user", userName);
cmd.put("nonce", nonce);
cmd.put("key", Util.hexMD5(key.getBytes()));
return cmd;
}
static BasicDBObject getNonceCommand() {
return new BasicDBObject("getnonce", 1);
}
static byte[] createHash(String userName, char[] password) {
ByteArrayOutputStream bout = new ByteArrayOutputStream(userName.length() + 20 + password.length);
try {
bout.write(userName.getBytes());
bout.write(":mongo:".getBytes());
for (final char ch : password) {
if (ch >= 128)
throw new IllegalArgumentException("can't handle non-ascii passwords yet");
bout.write((byte) ch);
}
} catch (IOException ioe) {
throw new RuntimeException("impossible", ioe);
}
return Util.hexMD5(bout.toByteArray()).getBytes();
}
private NativeAuthenticationHelper() {
}
}

View File

@ -1,5 +1,3 @@
// OutMessage.java
/**
* Copyright (C) 2008 10gen Inc.
*
@ -18,115 +16,243 @@
package com.massivecraft.mcore.xlib.mongodb;
import java.io.IOException;
import java.io.OutputStream;
import java.util.concurrent.atomic.AtomicInteger;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.BasicBSONEncoder;
import com.massivecraft.mcore.xlib.bson.io.PoolOutputBuffer;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Collection;
import java.util.concurrent.atomic.AtomicInteger;
class OutMessage extends BasicBSONEncoder {
static AtomicInteger ID = new AtomicInteger(1);
enum OpCode {
OP_UPDATE(2001),
OP_INSERT(2002),
OP_QUERY(2004),
OP_GETMORE(2005),
OP_DELETE(2006),
OP_KILL_CURSORS(2007);
static OutMessage query( Mongo m , int options , String ns , int numToSkip , int batchSize , DBObject query , DBObject fields ){
return query( m, options, ns, numToSkip, batchSize, query, fields, ReadPreference.PRIMARY );
OpCode(int value) {
this.value = value;
}
static OutMessage query( Mongo m , int options , String ns , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref ){
return query( m, options, ns, numToSkip, batchSize, query, fields, readPref, DefaultDBEncoder.FACTORY.create());
private final int value;
public int getValue() {
return value;
}
}
static OutMessage query( Mongo m , int options , String ns , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref, DBEncoder enc ){
OutMessage out = new OutMessage( m , 2004, enc );
static AtomicInteger REQUEST_ID = new AtomicInteger(1);
out._appendQuery( options , ns , numToSkip , batchSize , query , fields, readPref);
public static OutMessage insert(final DBCollection collection, final DBEncoder encoder, WriteConcern concern) {
OutMessage om = new OutMessage(collection, OpCode.OP_INSERT, encoder);
om.writeInsertPrologue(concern);
return out;
return om;
}
OutMessage( Mongo m ){
this( m , DefaultDBEncoder.FACTORY.create() );
public static OutMessage update(final DBCollection collection, final DBEncoder encoder,
final boolean upsert, final boolean multi, final DBObject query, final DBObject o) {
OutMessage om = new OutMessage(collection, OpCode.OP_UPDATE, encoder, query);
om.writeUpdate(upsert, multi, query, o);
return om;
}
OutMessage( Mongo m , int op ){
this( m );
reset( op );
public static OutMessage remove(final DBCollection collection, final DBEncoder encoder, final DBObject query) {
OutMessage om = new OutMessage(collection, OpCode.OP_DELETE, encoder, query);
om.writeRemove();
return om;
}
OutMessage( Mongo m , DBEncoder encoder ) {
_encoder = encoder;
static OutMessage query( DBCollection collection , int options , int numToSkip , int batchSize , DBObject query , DBObject fields ){
return query( collection, options, numToSkip, batchSize, query, fields, ReadPreference.primary() );
}
static OutMessage query( DBCollection collection , int options , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref ){
return query( collection, options, numToSkip, batchSize, query, fields, readPref, DefaultDBEncoder.FACTORY.create());
}
static OutMessage query( DBCollection collection , int options , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref, DBEncoder enc ){
OutMessage om = new OutMessage(collection, enc, query, options, readPref);
om.writeQuery(fields, numToSkip, batchSize);
return om;
}
static OutMessage getMore(DBCollection collection, long cursorId, int batchSize) {
OutMessage om = new OutMessage(collection, OpCode.OP_GETMORE);
om.writeGetMore(cursorId, batchSize);
return om;
}
static OutMessage killCursors(Mongo mongo, int numCursors) {
OutMessage om = new OutMessage(mongo , OpCode.OP_KILL_CURSORS);
om.writeKillCursorsPrologue(numCursors);
return om;
}
private OutMessage( Mongo m , OpCode opCode ){
this(null, m, opCode, null);
}
private OutMessage(final DBCollection collection, final OpCode opCode) {
this(collection, opCode, null);
}
private OutMessage(final DBCollection collection, final OpCode opCode, final DBEncoder enc) {
this(collection, collection.getDB().getMongo(), opCode, enc);
}
private OutMessage(final DBCollection collection, final Mongo m, final OpCode opCode, final DBEncoder enc) {
this(collection, m, opCode, enc, null, -1, null);
}
private OutMessage(final DBCollection collection, final OpCode opCode, final DBEncoder enc, final DBObject query) {
this(collection, collection.getDB().getMongo(), opCode, enc, query, 0, null);
}
private OutMessage(final DBCollection collection, final DBEncoder enc, final DBObject query, final int options, final ReadPreference readPref) {
this(collection, collection.getDB().getMongo(), OpCode.OP_QUERY, enc, query, options, readPref);
}
private OutMessage(final DBCollection collection, final Mongo m, OpCode opCode, final DBEncoder enc, final DBObject query, final int options, final ReadPreference readPref) {
_collection = collection;
_mongo = m;
_buffer = _mongo == null ? new PoolOutputBuffer() : _mongo._bufferPool.get();
_encoder = enc;
_buffer = _mongo._bufferPool.get();
_buffer.reset();
set(_buffer);
set( _buffer );
_id = REQUEST_ID.getAndIncrement();
_opCode = opCode;
writeMessagePrologue(opCode);
if (query == null) {
_query = null;
_queryOptions = 0;
} else {
_query = query;
int allOptions = options;
if (readPref != null && readPref.isSlaveOk()) {
allOptions |= Bytes.QUERYOPTION_SLAVEOK;
}
OutMessage( Mongo m , int op , DBEncoder enc ) {
this( m , enc );
reset( op );
_queryOptions = allOptions;
}
private void _appendQuery( int options , String ns , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref){
_queryOptions = options;
_readPref = readPref;
//If the readPrefs are non-null and non-primary, set slaveOk query option
if (_readPref != null && !(_readPref instanceof ReadPreference.PrimaryReadPreference))
_queryOptions |= Bytes.QUERYOPTION_SLAVEOK;
writeInt( _queryOptions );
writeCString( ns );
writeInt( numToSkip );
writeInt( batchSize );
putObject( query );
if ( fields != null )
putObject( fields );
}
private void reset( int op ){
done();
_buffer.reset();
set( _buffer );
private void writeInsertPrologue(final WriteConcern concern) {
int flags = 0;
if (concern.getContinueOnErrorForInsert()) {
flags |= 1;
}
writeInt(flags);
writeCString(_collection.getFullName());
}
_id = ID.getAndIncrement();
private void writeUpdate(final boolean upsert, final boolean multi, final DBObject query, final DBObject o) {
writeInt(0); // reserved
writeCString(_collection.getFullName());
int flags = 0;
if ( upsert ) flags |= 1;
if ( multi ) flags |= 2;
writeInt(flags);
putObject(query);
putObject(o);
}
private void writeRemove() {
writeInt(0); // reserved
writeCString(_collection.getFullName());
Collection<String> keys = _query.keySet();
if ( keys.size() == 1 && keys.iterator().next().equals( "_id" ) && _query.get( keys.iterator().next() ) instanceof ObjectId)
writeInt( 1 );
else
writeInt( 0 );
putObject(_query);
}
private void writeGetMore(final long cursorId, final int batchSize) {
writeInt(0);
writeCString(_collection.getFullName());
writeInt(batchSize);
writeLong(cursorId);
}
private void writeKillCursorsPrologue(final int numCursors) {
writeInt(0); // reserved
writeInt(numCursors);
}
private void writeQuery(final DBObject fields, final int numToSkip, final int batchSize) {
writeInt(_queryOptions);
writeCString(_collection.getFullName());
writeInt(numToSkip);
writeInt(batchSize);
putObject(_query);
if (fields != null)
putObject(fields);
}
private void writeMessagePrologue(final OpCode opCode) {
writeInt( 0 ); // length: will set this later
writeInt( _id );
writeInt( 0 ); // response to
writeInt( op );
writeInt( opCode.getValue() );
}
void prepare(){
if (_buffer == null) {
throw new IllegalStateException("Already closed");
}
_buffer.writeInt( 0 , _buffer.size() );
}
void pipe( OutputStream out ) throws IOException {
if (_buffer == null) {
throw new IllegalStateException("Already closed");
}
void pipe( OutputStream out )
throws IOException {
_buffer.pipe( out );
}
int size(){
int size() {
if (_buffer == null) {
throw new IllegalStateException("Already closed");
}
return _buffer.size();
}
byte[] toByteArray(){
return _buffer.toByteArray();
void doneWithMessage() {
if (_buffer == null) {
throw new IllegalStateException("Only call this once per instance");
}
void doneWithMessage(){
if ( _buffer != null && _mongo != null ) {
_buffer.reset();
_mongo._bufferPool.done( _buffer );
}
_mongo._bufferPool.done(_buffer);
_buffer = null;
_mongo = null;
done();
}
boolean hasOption( int option ){
@ -137,30 +263,44 @@ class OutMessage extends BasicBSONEncoder {
return _id;
}
OpCode getOpCode() {
return _opCode;
}
DBObject getQuery() {
return _query;
}
String getNamespace() {
return _collection != null ? _collection.getFullName() : null;
}
int getNumDocuments() {
return _numDocuments;
}
@Override
public int putObject(BSONObject o) {
if (_buffer == null) {
throw new IllegalStateException("Already closed");
}
// check max size
int sz = _encoder.writeObject(_buf, o);
if (_mongo != null) {
int maxsize = _mongo.getConnector().getMaxBsonObjectSize();
maxsize = Math.max(maxsize, Bytes.MAX_OBJECT_SIZE);
if (sz > maxsize) {
throw new MongoInternalException("DBObject of size " + sz + " is over Max BSON size " + _mongo.getMaxBsonObjectSize());
int objectSize = _encoder.writeObject(_buf, o);
if (objectSize > Math.max(_mongo.getConnector().getMaxBsonObjectSize(), Bytes.MAX_OBJECT_SIZE)) {
throw new MongoInternalException("DBObject of size " + objectSize + " is over Max BSON size " + _mongo.getMaxBsonObjectSize());
}
}
return sz;
_numDocuments++;
return objectSize;
}
public ReadPreference getReadPreference(){
return _readPref;
}
private Mongo _mongo;
private final Mongo _mongo;
private final DBCollection _collection;
private PoolOutputBuffer _buffer;
private int _id;
private int _queryOptions = 0;
private ReadPreference _readPref = ReadPreference.PRIMARY;
private DBEncoder _encoder;
private final int _id;
private final OpCode _opCode;
private final int _queryOptions;
private final DBObject _query;
private final DBEncoder _encoder;
private volatile int _numDocuments; // only one thread will modify this field, so volatile is sufficient synchronization
}

View File

@ -23,6 +23,7 @@
package com.massivecraft.mcore.xlib.mongodb;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.regex.Pattern;
@ -31,6 +32,7 @@ import java.util.regex.Pattern;
* @author Julson Lim
*
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public class QueryBuilder {
/**
@ -61,7 +63,7 @@ public class QueryBuilder {
* Adds a new key to the query if not present yet.
* Sets this key as the current key.
* @param key MongoDB document key
* @return Returns the current QueryBuilder
* @return this
*/
public QueryBuilder put(String key) {
_currentKey = key;
@ -72,10 +74,10 @@ public class QueryBuilder {
}
/**
* Equivalent to <code>QueryBuilder.put(key)</code>. Intended for compound query chains to be more readable
* Example: QueryBuilder.start("a").greaterThan(1).and("b").lessThan(3)
* Equivalent to <code>QueryBuilder.put(key)</code>. Intended for compound query chains to be more readable, e.g.
* {@code QueryBuilder.start("a").greaterThan(1).and("b").lessThan(3) }
* @param key MongoDB document key
* @return Returns the current QueryBuilder with an appended key operand
* @return this
*/
public QueryBuilder and(String key) {
return put(key);
@ -211,6 +213,18 @@ public class QueryBuilder {
return this;
}
/**
* Equivalent to the $elemMatch operand
* @param match the object to match
* @return Returns the current QueryBuilder with an appended elemMatch operator
*/
public QueryBuilder elemMatch(final DBObject match) {
addOperand(QueryOperators.ELEM_MATCH, match);
return this;
}
/**
* Equivalent of the $within operand, used for geospatial operation
* @param x x coordinate
@ -219,8 +233,8 @@ public class QueryBuilder {
* @return
*/
public QueryBuilder withinCenter( double x , double y , double radius ){
addOperand( "$within" ,
new BasicDBObject( "$center" , new Object[]{ new Double[]{ x , y } , radius } ) );
addOperand( QueryOperators.WITHIN ,
new BasicDBObject(QueryOperators.CENTER, new Object[]{ new Double[]{ x , y } , radius } ) );
return this;
}
@ -231,7 +245,7 @@ public class QueryBuilder {
* @return
*/
public QueryBuilder near( double x , double y ){
addOperand( "$near" ,
addOperand(QueryOperators.NEAR,
new Double[]{ x , y } );
return this;
}
@ -244,7 +258,7 @@ public class QueryBuilder {
* @return
*/
public QueryBuilder near( double x , double y , double maxDistance ){
addOperand( "$near" ,
addOperand( QueryOperators.NEAR ,
new Double[]{ x , y , maxDistance } );
return this;
}
@ -256,7 +270,7 @@ public class QueryBuilder {
* @return
*/
public QueryBuilder nearSphere( double longitude , double latitude ){
addOperand( "$nearSphere" ,
addOperand(QueryOperators.NEAR_SPHERE,
new Double[]{ longitude , latitude } );
return this;
}
@ -269,7 +283,7 @@ public class QueryBuilder {
* @return
*/
public QueryBuilder nearSphere( double longitude , double latitude , double maxDistance ){
addOperand( "$nearSphere" ,
addOperand( QueryOperators.NEAR_SPHERE ,
new Double[]{ longitude , latitude , maxDistance } );
return this;
}
@ -283,8 +297,8 @@ public class QueryBuilder {
* @return
*/
public QueryBuilder withinCenterSphere( double longitude , double latitude , double maxDistance ){
addOperand( "$within" ,
new BasicDBObject( "$centerSphere" , new Object[]{ new Double[]{longitude , latitude} , maxDistance } ) );
addOperand( QueryOperators.WITHIN ,
new BasicDBObject(QueryOperators.CENTER_SPHERE, new Object[]{ new Double[]{longitude , latitude} , maxDistance } ) );
return this;
}
@ -298,8 +312,8 @@ public class QueryBuilder {
* @return
*/
public QueryBuilder withinBox(double x, double y, double x2, double y2) {
addOperand( "$within" ,
new BasicDBObject( "$box" , new Object[] { new Double[] { x, y }, new Double[] { x2, y2 } } ) );
addOperand( QueryOperators.WITHIN ,
new BasicDBObject(QueryOperators.BOX, new Object[] { new Double[] { x, y }, new Double[] { x2, y2 } } ) );
return this;
}
@ -307,47 +321,54 @@ public class QueryBuilder {
* Equivalent to a $within operand, based on a bounding polygon represented by an array of points
*
* @param points an array of Double[] defining the vertices of the search area
* @return
* @return this
*/
public QueryBuilder withinPolygon(List<Double[]> points) {
if(points == null || points.isEmpty() || points.size() < 3)
throw new IllegalArgumentException("Polygon insufficient number of vertices defined");
addOperand( "$within" ,
new BasicDBObject( "$polygon" , points ) );
addOperand( QueryOperators.WITHIN ,
new BasicDBObject(QueryOperators.POLYGON, points ) );
return this;
}
/**
* Equivalent to a $or operand
* @param ors
* @return
* Equivalent to $not meta operator. Must be followed by an operand, not a value, e.g.
* {@code QueryBuilder.start("val").not().mod(Arrays.asList(10, 1)) }
*
* @return Returns the current QueryBuilder with an appended "not" meta operator
*/
public QueryBuilder not() {
_hasNot = true;
return this;
}
/**
* Equivalent to an $or operand
* @param ors the list of conditions to or together
* @return Returns the current QueryBuilder with appended "or" operator
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public QueryBuilder or( DBObject ... ors ){
List l = (List)_query.get( "$or" );
List l = (List)_query.get( QueryOperators.OR );
if ( l == null ){
l = new ArrayList();
_query.put( "$or" , l );
_query.put( QueryOperators.OR , l );
}
for ( DBObject o : ors )
l.add( o );
Collections.addAll(l, ors);
return this;
}
/**
* Equivalent to an $and operand
* @param ands
* @return
* @param ands the list of conditions to and together
* @return Returns the current QueryBuilder with appended "and" operator
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public QueryBuilder and( DBObject ... ands ){
List l = (List)_query.get( "$and" );
List l = (List)_query.get( QueryOperators.AND );
if ( l == null ){
l = new ArrayList();
_query.put( "$and" , l );
_query.put( QueryOperators.AND , l );
}
for ( DBObject o : ands )
l.add( o );
Collections.addAll(l, ands);
return this;
}
@ -367,6 +388,10 @@ public class QueryBuilder {
private void addOperand(String op, Object value) {
if(op == null) {
if (_hasNot) {
value = new BasicDBObject(QueryOperators.NOT, value);
_hasNot = false;
}
_query.put(_currentKey, value);
return;
}
@ -375,13 +400,21 @@ public class QueryBuilder {
BasicDBObject operand;
if(!(storedValue instanceof DBObject)) {
operand = new BasicDBObject();
if (_hasNot) {
DBObject notOperand = new BasicDBObject(QueryOperators.NOT, operand);
_query.put(_currentKey, notOperand);
_hasNot = false;
} else {
_query.put(_currentKey, operand);
}
} else {
operand = (BasicDBObject)_query.get(_currentKey);
if (operand.get(QueryOperators.NOT) != null) {
operand = (BasicDBObject) operand.get(QueryOperators.NOT);
}
}
operand.put(op, value);
}
@SuppressWarnings("serial")
static class QueryBuilderException extends RuntimeException {
QueryBuilderException(String message) {
@ -392,5 +425,6 @@ public class QueryBuilder {
private DBObject _query;
private String _currentKey;
private boolean _hasNot;
}

View File

@ -0,0 +1,192 @@
package com.massivecraft.mcore.xlib.mongodb;
/**
* Utility for constructing Query operation command with query, orderby, hint, explain, snapshot.
*/
class QueryOpBuilder {
static final String READ_PREFERENCE_META_OPERATOR = "$readPreference";
private DBObject query;
private DBObject orderBy;
private DBObject hintObj;
private String hintStr;
private boolean explain;
private boolean snapshot;
private DBObject readPref;
private DBObject specialFields;
public QueryOpBuilder(){
}
/**
* Adds the query clause to the operation
* @param query
* @return
*/
public QueryOpBuilder addQuery(DBObject query){
this.query = query;
return this;
}
/**
* Adds the orderby clause to the operation
* @param orderBy
* @return
*/
public QueryOpBuilder addOrderBy(DBObject orderBy){
this.orderBy = orderBy;
return this;
}
/**
* Adds the hint clause to the operation
* @param hint
* @return
*/
public QueryOpBuilder addHint(String hint){
this.hintStr = hint;
return this;
}
/**
* Adds hint clause to the operation
* @param hint
* @return
*/
public QueryOpBuilder addHint(DBObject hint){
this.hintObj = hint;
return this;
}
/**
* Adds special fields to the operation
* @param specialFields
* @return
*/
public QueryOpBuilder addSpecialFields(DBObject specialFields){
this.specialFields = specialFields;
return this;
}
/**
* Adds the explain flag to the operation
* @param explain
* @return
*/
public QueryOpBuilder addExplain(boolean explain){
this.explain = explain;
return this;
}
/**
* Adds the snapshot flag to the operation
* @param snapshot
* @return
*/
public QueryOpBuilder addSnapshot(boolean snapshot){
this.snapshot = snapshot;
return this;
}
/**
* Adds ReadPreference to the operation
* @param readPref
* @return
*/
public QueryOpBuilder addReadPreference(DBObject readPref){
this.readPref = readPref;
return this;
}
/**
* Constructs the query operation DBObject
* @return DBObject representing the query command to be sent to server
*/
public DBObject get() {
DBObject lclQuery = query;
//must always have a query
if (lclQuery == null) {
lclQuery = new BasicDBObject();
}
if (hasSpecialQueryFields()) {
DBObject queryop = (specialFields == null ? new BasicDBObject() : specialFields);
addToQueryObject(queryop, "$query", lclQuery, true);
addToQueryObject(queryop, "$orderby", orderBy, false);
if (hintStr != null)
addToQueryObject(queryop, "$hint", hintStr);
if (hintObj != null)
addToQueryObject(queryop, "$hint", hintObj);
if (explain)
queryop.put("$explain", true);
if (snapshot)
queryop.put("$snapshot", true);
if (readPref != null)
queryop.put(READ_PREFERENCE_META_OPERATOR, readPref);
return queryop;
}
return lclQuery;
}
private boolean hasSpecialQueryFields(){
if ( readPref != null )
return true;
if ( specialFields != null )
return true;
if ( orderBy != null && orderBy.keySet().size() > 0 )
return true;
if ( hintStr != null || hintObj != null || snapshot || explain)
return true;
return false;
}
/**
* Adds DBObject to the operation
* @param dbobj DBObject to add field to
* @param field name of the field
* @param obj object to add to the operation. Ignore if <code>null</code>.
* @param sendEmpty if <code>true</code> adds obj even if it's empty. Ignore if <code>false</code> and obj is empty.
* @return
*/
private void addToQueryObject(DBObject dbobj, String field, DBObject obj, boolean sendEmpty) {
if (obj == null)
return;
if (!sendEmpty && obj.keySet().size() == 0)
return;
addToQueryObject(dbobj, field, obj);
}
/**
* Adds an Object to the operation
* @param dbobj DBObject to add field to
* @param field name of the field
* @param obj Object to be added. Ignore if <code>null</code>
* @return
*/
private void addToQueryObject(DBObject dbobj, String field, Object obj) {
if (obj == null)
return;
dbobj.put(field, obj);
}
}

View File

@ -19,14 +19,18 @@ package com.massivecraft.mcore.xlib.mongodb;
/**
* MongoDB keywords for various query operations
* @author Julson Lim
*
* @author Julson Lim
*/
public class QueryOperators {
public static final String OR = "$or";
public static final String AND = "$and";
public static final String GT = "$gt";
public static final String GTE = "$gte";
public static final String LT = "$lt";
public static final String LTE = "$lte";
public static final String NE = "$ne";
public static final String IN = "$in";
public static final String NIN = "$nin";
@ -34,6 +38,36 @@ public class QueryOperators {
public static final String ALL = "$all";
public static final String SIZE = "$size";
public static final String EXISTS = "$exists";
public static final String ELEM_MATCH = "$elemMatch";
// (to be implemented in QueryBuilder)
public static final String WHERE = "$where";
public static final String NOR = "$nor";
public static final String TYPE = "$type";
public static final String NOT = "$not";
// geo operators
public static final String WITHIN = "$within";
public static final String NEAR = "$near";
public static final String NEAR_SPHERE = "$nearSphere";
public static final String BOX = "$box";
public static final String CENTER = "$center";
public static final String POLYGON = "$polygon";
public static final String CENTER_SPHERE = "$centerSphere";
// (to be implemented in QueryBuilder)
public static final String MAX_DISTANCE = "$maxDistance";
public static final String UNIQUE_DOCS = "$uniqueDocs";
// meta query operators (to be implemented in QueryBuilder)
public static final String RETURN_KEY = "$returnKey";
public static final String MAX_SCAN = "$maxScan";
public static final String ORDER_BY = "$orderby";
public static final String EXPLAIN = "$explain";
public static final String SNAPSHOT = "$snapshot";
public static final String MIN = "$min";
public static final String MAX = "$max";
public static final String SHOW_DISK_LOC = "$showDiskLoc";
public static final String HINT = "$hint";
public static final String COMMENT = "$comment";
}

View File

@ -55,6 +55,7 @@ import com.massivecraft.mcore.xlib.bson.types.ObjectId;
/**
* This object wraps the binary object format ("BSON") used for the transport of serialized objects to / from the Mongo database.
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public class RawDBObject implements DBObject {
RawDBObject( ByteBuffer buf ){
@ -75,7 +76,6 @@ public class RawDBObject implements DBObject {
return e.getObject();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public Map toMap() {
Map m = new HashMap();
Iterator i = this.keySet().iterator();
@ -94,7 +94,6 @@ public class RawDBObject implements DBObject {
throw new RuntimeException( "read only" );
}
@SuppressWarnings("rawtypes")
public void putAll( Map m ){
throw new RuntimeException( "read only" );
}

View File

@ -13,58 +13,331 @@
package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.mongodb.ReplicaSetStatus.ReplicaSetNode;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class ReadPreference {
public static class PrimaryReadPreference extends ReadPreference {
private PrimaryReadPreference() {}
/**
* An abstract class that represents preferred replica set members to which a query or command can be sent.
*
* @mongodb.driver.manual applications/replication/#replica-set-read-preference Read Preference
*/
public abstract class ReadPreference {
ReadPreference() {
}
/**
* @return <code>true</code> if this preference allows reads or commands from secondary nodes
*/
public abstract boolean isSlaveOk();
/**
* @return <code>DBObject</code> representation of this preference
*/
public abstract DBObject toDBObject();
/**
* The name of this read preference.
*
* @return the name
*/
public abstract String getName();
abstract ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set);
/**
* Preference to read from primary only.
* Cannot be combined with tags.
*
* @author breinero
*/
private static class PrimaryReadPreference extends ReadPreference {
private PrimaryReadPreference() {
}
@Override
public String toString(){
return "ReadPreference.PRIMARY" ;
}
}
public static class SecondaryReadPreference extends ReadPreference {
private SecondaryReadPreference() {}
@Override
public String toString(){
return "ReadPreference.SECONDARY";
public boolean isSlaveOk() {
return false;
}
@Override
public String toString() {
return getName();
}
@Override
public boolean equals(final Object o) {
return o != null && getClass() == o.getClass();
}
@Override
public int hashCode() {
return getName().hashCode();
}
@Override
ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
return set.getMaster();
}
@Override
public DBObject toDBObject() {
return new BasicDBObject("mode", getName());
}
@Override
public String getName() {
return "primary";
}
}
/**
* Read from a secondary if available and matches tags.
*
* @deprecated As of release 2.9, replaced by
* <code>ReadPreference.secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets)</code>
*/
@Deprecated
public static class TaggedReadPreference extends ReadPreference {
public TaggedReadPreference( DBObject tags ) {
public TaggedReadPreference(Map<String, String> tags) {
if (tags == null || tags.size() == 0) {
throw new IllegalArgumentException("tags can not be null or empty");
}
_tags = new BasicDBObject(tags);
List<DBObject> maps = splitMapIntoMultipleMaps(_tags);
_pref = new TaggableReadPreference.SecondaryReadPreference(maps.get(0), getRemainingMaps(maps));
}
public TaggedReadPreference(DBObject tags) {
if (tags == null || tags.keySet().size() == 0) {
throw new IllegalArgumentException("tags can not be null or empty");
}
_tags = tags;
List<DBObject> maps = splitMapIntoMultipleMaps(_tags);
_pref = new TaggableReadPreference.SecondaryReadPreference(maps.get(0), getRemainingMaps(maps));
}
public TaggedReadPreference( Map<String, String> tags ) {
_tags = new BasicDBObject( tags );
}
public DBObject getTags() {
DBObject tags = new BasicDBObject();
for (String key : _tags.keySet())
tags.put(key, _tags.get(key));
public DBObject getTags(){
return _tags;
return tags;
}
@Override
public String toString(){
return getTags().toString();
public boolean isSlaveOk() {
return _pref.isSlaveOk();
}
@Override
ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
return _pref.getNode(set);
}
@Override
public DBObject toDBObject() {
return _pref.toDBObject();
}
@Override
public String getName() {
return _pref.getName();
}
private static List<DBObject> splitMapIntoMultipleMaps(DBObject tags) {
List<DBObject> tagList = new ArrayList<DBObject>(tags.keySet().size());
for (String key : tags.keySet()) {
tagList.add(new BasicDBObject(key, tags.get(key).toString()));
}
return tagList;
}
private DBObject[] getRemainingMaps(final List<DBObject> maps) {
if (maps.size() <= 1) {
return new DBObject[0];
}
return maps.subList(1, maps.size() - 1).toArray(new DBObject[maps.size() - 1]);
}
private final DBObject _tags;
private final ReadPreference _pref;
}
public static ReadPreference PRIMARY = new PrimaryReadPreference();
/**
* @return ReadPreference which reads from primary only
*/
public static ReadPreference primary() {
return _PRIMARY;
}
public static ReadPreference SECONDARY = new SecondaryReadPreference();
/**
* @return ReadPreference which reads primary if available.
*/
public static ReadPreference primaryPreferred() {
return _PRIMARY_PREFERRED;
}
/*
/**
* @return ReadPreference which reads primary if available, otherwise a secondary respective of tags.
*/
public static TaggableReadPreference primaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets) {
return new TaggableReadPreference.PrimaryPreferredReadPreference(firstTagSet, remainingTagSets);
}
/**
* @return ReadPreference which reads secondary.
*/
public static ReadPreference secondary() {
return _SECONDARY;
}
/**
* @return ReadPreference which reads secondary respective of tags.
*/
public static TaggableReadPreference secondary(DBObject firstTagSet, DBObject... remainingTagSets) {
return new TaggableReadPreference.SecondaryReadPreference(firstTagSet, remainingTagSets);
}
/**
* @return ReadPreference which reads secondary if available, otherwise from primary.
*/
public static ReadPreference secondaryPreferred() {
return _SECONDARY_PREFERRED;
}
/**
* @return ReadPreference which reads secondary if available respective of tags, otherwise from primary irrespective of tags.
*/
public static TaggableReadPreference secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets) {
return new TaggableReadPreference.SecondaryPreferredReadPreference(firstTagSet, remainingTagSets);
}
/**
* @return ReadPreference which reads nearest node.
*/
public static ReadPreference nearest() {
return _NEAREST;
}
public static ReadPreference valueOf(String name) {
if (name == null) {
throw new IllegalArgumentException();
}
name = name.toLowerCase();
if (name.equals(_PRIMARY.getName().toLowerCase())) {
return _PRIMARY;
}
if (name.equals(_SECONDARY.getName().toLowerCase())) {
return _SECONDARY;
}
if (name.equals(_SECONDARY_PREFERRED.getName().toLowerCase())) {
return _SECONDARY_PREFERRED;
}
if (name.equals(_PRIMARY_PREFERRED.getName().toLowerCase())) {
return _PRIMARY_PREFERRED;
}
if (name.equals(_NEAREST.getName().toLowerCase())) {
return _NEAREST;
}
throw new IllegalArgumentException("No match for read preference of " + name);
}
public static TaggableReadPreference valueOf(String name, DBObject firstTagSet, final DBObject... remainingTagSets) {
if (name == null) {
throw new IllegalArgumentException();
}
name = name.toLowerCase();
if (name.equals(_SECONDARY.getName().toLowerCase())) {
return new TaggableReadPreference.SecondaryReadPreference(firstTagSet, remainingTagSets);
}
if (name.equals(_SECONDARY_PREFERRED.getName().toLowerCase())) {
return new TaggableReadPreference.SecondaryPreferredReadPreference(firstTagSet, remainingTagSets);
}
if (name.equals(_PRIMARY_PREFERRED.getName().toLowerCase())) {
return new TaggableReadPreference.PrimaryPreferredReadPreference(firstTagSet, remainingTagSets);
}
if (name.equals(_NEAREST.getName().toLowerCase())) {
return new TaggableReadPreference.NearestReadPreference(firstTagSet, remainingTagSets);
}
throw new IllegalArgumentException("No match for read preference of " + name);
}
/**
* @return ReadPreference which reads nearest node respective of tags.
*/
public static TaggableReadPreference nearest(DBObject firstTagSet, DBObject... remainingTagSets) {
return new TaggableReadPreference.NearestReadPreference(firstTagSet, remainingTagSets);
}
/**
* A primary read preference. Equivalent to calling {@code ReadPreference.primary()}.
*
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference#primary()
* @deprecated As of release 2.9.0, replaced by {@code ReadPreference.primary()}
*/
@Deprecated
public static final ReadPreference PRIMARY;
/**
* A secondary-preferred read preference. Equivalent to calling
* {@code ReadPreference.secondaryPreferred}. This reference should really have been called
* {@code ReadPreference.SECONDARY_PREFERRED}, but the naming of it preceded the idea of distinguishing
* between secondary and secondary-preferred, so for backwards compatibility, leaving the name as is with
* the behavior as it was when it was created.
*
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference#secondary()
* @see com.massivecraft.mcore.xlib.mongodb.ReadPreference#secondaryPreferred()
* @deprecated As of release 2.9.0, replaced by {@code ReadPreference.secondaryPreferred()}
*/
@Deprecated
public static final ReadPreference SECONDARY;
/**
* @deprecated As of release 2.9.0, replaced by
* {@code ReadPreference.secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets)}
*/
@Deprecated
public static ReadPreference withTags(Map<String, String> tags) {
return new TaggedReadPreference( tags );
}
/**
* @deprecated As of release 2.9.0, replaced by
* {@code ReadPreference.secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets)}
*/
@Deprecated
public static ReadPreference withTags( final DBObject tags ) {
return new TaggedReadPreference( tags );
}
*/
private static final ReadPreference _PRIMARY;
private static final ReadPreference _SECONDARY;
private static final ReadPreference _SECONDARY_PREFERRED;
private static final ReadPreference _PRIMARY_PREFERRED;
private static final ReadPreference _NEAREST;
static {
_PRIMARY = new PrimaryReadPreference();
_SECONDARY = new TaggableReadPreference.SecondaryReadPreference();
_SECONDARY_PREFERRED = new TaggableReadPreference.SecondaryPreferredReadPreference();
_PRIMARY_PREFERRED = new TaggableReadPreference.PrimaryPreferredReadPreference();
_NEAREST = new TaggableReadPreference.NearestReadPreference();
PRIMARY = _PRIMARY;
SECONDARY = _SECONDARY_PREFERRED; // this is not a bug. See SECONDARY Javadoc.
}
}

View File

@ -32,6 +32,7 @@ import com.massivecraft.mcore.xlib.bson.BSONObject;
/**
* This class enables to map simple Class fields to a BSON object fields
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public abstract class ReflectionDBObject implements DBObject {
public Object get( String key ){
@ -58,7 +59,6 @@ public abstract class ReflectionDBObject implements DBObject {
return getWrapper().set( this , key , v );
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void putAll( Map m ){
for ( Map.Entry entry : (Set<Map.Entry>)m.entrySet() ){
put( entry.getKey().toString() , entry.getValue() );
@ -91,7 +91,6 @@ public abstract class ReflectionDBObject implements DBObject {
return false;
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public Map toMap() {
Map m = new HashMap();
Iterator i = this.keySet().iterator();
@ -133,7 +132,6 @@ public abstract class ReflectionDBObject implements DBObject {
* Represents a wrapper around the DBObject to interface with the Class fields
*/
public static class JavaWrapper {
@SuppressWarnings("rawtypes")
JavaWrapper( Class c ){
_class = c;
_name = c.getName();
@ -205,7 +203,6 @@ public abstract class ReflectionDBObject implements DBObject {
}
}
@SuppressWarnings("rawtypes")
Class getInternalClass( String path ){
String cur = path;
String next = null;
@ -228,7 +225,6 @@ public abstract class ReflectionDBObject implements DBObject {
return w.getInternalClass( next );
}
@SuppressWarnings("rawtypes")
final Class _class;
final String _name;
final Map<String,FieldInfo> _fields;
@ -236,7 +232,6 @@ public abstract class ReflectionDBObject implements DBObject {
}
static class FieldInfo {
@SuppressWarnings("rawtypes")
FieldInfo( String name , Class c ){
_name = name;
_class = c;
@ -249,7 +244,6 @@ public abstract class ReflectionDBObject implements DBObject {
}
final String _name;
@SuppressWarnings("rawtypes")
final Class _class;
Method _getter;
Method _setter;
@ -260,7 +254,6 @@ public abstract class ReflectionDBObject implements DBObject {
* @param c
* @return
*/
@SuppressWarnings("rawtypes")
public static JavaWrapper getWrapperIfReflectionObject( Class c ){
if ( ReflectionDBObject.class.isAssignableFrom( c ) )
return getWrapper( c );
@ -272,7 +265,6 @@ public abstract class ReflectionDBObject implements DBObject {
* @param c
* @return
*/
@SuppressWarnings("rawtypes")
public static JavaWrapper getWrapper( Class c ){
JavaWrapper w = _wrappers.get( c );
if ( w == null ){
@ -282,7 +274,6 @@ public abstract class ReflectionDBObject implements DBObject {
return w;
}
@SuppressWarnings("rawtypes")
private static final Map<Class,JavaWrapper> _wrappers = Collections.synchronizedMap( new HashMap<Class,JavaWrapper>() );
private static final Set<String> IGNORE_FIELDS = new HashSet<String>();
static {

View File

@ -20,11 +20,17 @@ package com.massivecraft.mcore.xlib.mongodb;
import com.massivecraft.mcore.xlib.bson.util.annotations.Immutable;
import com.massivecraft.mcore.xlib.bson.util.annotations.ThreadSafe;
import com.massivecraft.mcore.xlib.mongodb.util.JSON;
import java.net.UnknownHostException;
import java.util.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Set;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Collections;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.atomic.AtomicReference;
import java.util.logging.Level;
import java.util.logging.Logger;
@ -38,30 +44,24 @@ import java.util.logging.Logger;
* Keeps replica set status. Maintains a background thread to ping all members of the set to keep the status current.
*/
@ThreadSafe
public class ReplicaSetStatus {
@SuppressWarnings({"rawtypes"})
public class ReplicaSetStatus extends ConnectionStatus {
static final Logger _rootLogger = Logger.getLogger( "com.mongodb.ReplicaSetStatus" );
ReplicaSetStatus( Mongo mongo, List<ServerAddress> initial ){
_mongoOptions = _mongoOptionsDefaults.copy();
_mongoOptions.socketFactory = mongo._options.socketFactory;
_mongo = mongo;
super(initial, mongo);
_updater = new Updater(initial);
}
void start() {
_updater.start();
}
public String getName() {
return _setName.get();
return _replicaSetHolder.get().getSetName();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{replSetName: ").append(_setName.get());
sb.append(", nextResolveTime: ").append(new Date(_updater.getNextResolveTime()).toString());
sb.append("{replSetName: ").append(_replicaSetHolder.get().getSetName());
sb.append(", members: ").append(_replicaSetHolder);
sb.append(", updaterIntervalMS: ").append(updaterIntervalMS);
sb.append(", updaterIntervalNoMasterMS: ").append(updaterIntervalNoMasterMS);
@ -73,30 +73,26 @@ public class ReplicaSetStatus {
return sb.toString();
}
void _checkClosed(){
if ( _closed )
throw new IllegalStateException( "ReplicaSetStatus closed" );
}
/**
* @return master or null if don't have one
* @throws MongoException
*/
public ServerAddress getMaster(){
Node n = getMasterNode();
ReplicaSetNode n = getMasterNode();
if ( n == null )
return null;
return n.getServerAddress();
}
Node getMasterNode(){
_checkClosed();
ReplicaSetNode getMasterNode(){
checkClosed();
return _replicaSetHolder.get().getMaster();
}
/**
* @param srv
* the server to compare
* @param srv the server to compare
* @return indication if the ServerAddress is the current Master/Primary
* @throws MongoException
*/
public boolean isMaster(ServerAddress srv) {
if (srv == null)
@ -105,36 +101,20 @@ public class ReplicaSetStatus {
return srv.equals(getMaster());
}
/**
* @param tags tags map
* @return a good secondary by tag value or null if can't find one
*/
ServerAddress getASecondary( DBObject tags ) {
// store the reference in local, so that it doesn't change out from under us while looping
List<Tag> tagList = new ArrayList<Tag>();
for ( String key : tags.keySet() ) {
tagList.add(new Tag(key, tags.get(key).toString()));
}
Node node = _replicaSetHolder.get().getASecondary(tagList);
if (node != null) {
return node.getServerAddress();
}
return null;
}
/**
* @return a good secondary or null if can't find one
*/
ServerAddress getASecondary() {
Node node = _replicaSetHolder.get().getASecondary();
ReplicaSetNode node = _replicaSetHolder.get().getASecondary();
if (node == null) {
return null;
}
return node._addr;
}
@Override
boolean hasServerUp() {
for (Node node : _replicaSetHolder.get().getAll()) {
for (ReplicaSetNode node : _replicaSetHolder.get().getAll()) {
if (node.isOk()) {
return true;
}
@ -145,17 +125,16 @@ public class ReplicaSetStatus {
// Simple abstraction over a volatile ReplicaSet reference that starts as null. The get method blocks until members
// is not null. The set method notifies all, thus waking up all getters.
@ThreadSafe
static class ReplicaSetHolder {
class ReplicaSetHolder {
private volatile ReplicaSet members;
// blocks until replica set is set.
// blocks until replica set is set, or a timeout occurs
synchronized ReplicaSet get() {
while (members == null) {
try {
wait();
}
catch (InterruptedException e) {
throw new MongoException("Interrupted while waiting for next update to replica set status", e);
wait(_mongo.getMongoOptions().getConnectTimeout());
} catch (InterruptedException e) {
throw new MongoInterruptedException("Interrupted while waiting for next update to replica set status", e);
}
}
return members;
@ -166,6 +145,7 @@ public class ReplicaSetStatus {
if (members == null) {
throw new IllegalArgumentException("members can not be null");
}
this.members = members;
notifyAll();
}
@ -173,10 +153,9 @@ public class ReplicaSetStatus {
// blocks until the replica set is set again
synchronized void waitForNextUpdate() {
try {
wait();
}
catch (InterruptedException e) {
throw new MongoException("Interrupted while waiting for next update to replica set status", e);
wait(_mongo.getMongoOptions().getConnectTimeout());
} catch (InterruptedException e) {
throw new MongoInterruptedException("Interrupted while waiting for next update to replica set status", e);
}
}
@ -198,36 +177,36 @@ public class ReplicaSetStatus {
// of good secondaries so that choosing a random good secondary is dead simple
@Immutable
static class ReplicaSet {
final List<Node> all;
final List<ReplicaSetNode> all;
final Random random;
final List<Node> goodSecondaries;
final Map<Tag, List<Node>> goodSecondariesByTagMap;
final Node master;
final List<ReplicaSetNode> acceptableSecondaries;
final List<ReplicaSetNode> acceptableMembers;
final ReplicaSetNode master;
final String setName;
final ReplicaSetErrorStatus errorStatus;
private int acceptableLatencyMS;
public ReplicaSet(List<ReplicaSetNode> nodeList, Random random, int acceptableLatencyMS) {
public ReplicaSet(List<Node> nodeList, Random random, int acceptableLatencyMS) {
this.random = random;
this.all = Collections.unmodifiableList(new ArrayList<Node>(nodeList));
this.goodSecondaries =
Collections.unmodifiableList(calculateGoodSecondaries(all, calculateBestPingTime(all), acceptableLatencyMS));
Set<Tag> uniqueTags = new HashSet<Tag>();
for (Node curNode : all) {
for (Tag curTag : curNode._tags) {
uniqueTags.add(curTag);
}
}
Map<Tag, List<Node>> goodSecondariesByTagMap = new HashMap<Tag, List<Node>>();
for (Tag curTag : uniqueTags) {
List<Node> taggedMembers = getMembersByTag(all, curTag);
goodSecondariesByTagMap.put(curTag,
Collections.unmodifiableList(calculateGoodSecondaries(taggedMembers,
calculateBestPingTime(taggedMembers), acceptableLatencyMS)));
}
this.goodSecondariesByTagMap = Collections.unmodifiableMap(goodSecondariesByTagMap);
this.all = Collections.unmodifiableList(new ArrayList<ReplicaSetNode>(nodeList));
this.acceptableLatencyMS = acceptableLatencyMS;
errorStatus = validate();
setName = determineSetName();
this.acceptableSecondaries =
Collections.unmodifiableList(calculateGoodMembers(
all, calculateBestPingTime(all, false), acceptableLatencyMS, false));
this.acceptableMembers =
Collections.unmodifiableList(calculateGoodMembers(all, calculateBestPingTime(all, true), acceptableLatencyMS, true));
master = findMaster();
}
public List<Node> getAll() {
public List<ReplicaSetNode> getAll() {
checkStatus();
return all;
}
@ -235,7 +214,9 @@ public class ReplicaSetStatus {
return getMaster() != null;
}
public Node getMaster() {
public ReplicaSetNode getMaster() {
checkStatus();
return master;
}
@ -247,98 +228,196 @@ public class ReplicaSetStatus {
}
}
public Node getASecondary() {
if (goodSecondaries.isEmpty()) {
public ReplicaSetNode getASecondary() {
checkStatus();
if (acceptableSecondaries.isEmpty()) {
return null;
}
return goodSecondaries.get(random.nextInt(goodSecondaries.size()));
return acceptableSecondaries.get(random.nextInt(acceptableSecondaries.size()));
}
public Node getASecondary(List<Tag> tags) {
for (Tag tag : tags) {
List<Node> goodSecondariesByTag = goodSecondariesByTagMap.get(tag);
if (goodSecondariesByTag != null) {
Node node = goodSecondariesByTag.get(random.nextInt(goodSecondariesByTag.size()));
if (node != null) {
return node;
}
}
public ReplicaSetNode getASecondary(List<Tag> tags) {
checkStatus();
// optimization
if (tags.isEmpty()) {
return getASecondary();
}
List<ReplicaSetNode> acceptableTaggedSecondaries = getGoodSecondariesByTags(tags);
if (acceptableTaggedSecondaries.isEmpty()) {
return null;
}
return acceptableTaggedSecondaries.get(random.nextInt(acceptableTaggedSecondaries.size()));
}
public ReplicaSetNode getAMember() {
checkStatus();
if (acceptableMembers.isEmpty()) {
return null;
}
return acceptableMembers.get(random.nextInt(acceptableMembers.size()));
}
public ReplicaSetNode getAMember(List<Tag> tags) {
checkStatus();
if (tags.isEmpty())
return getAMember();
List<ReplicaSetNode> acceptableTaggedMembers = getGoodMembersByTags(tags);
if (acceptableTaggedMembers.isEmpty())
return null;
return acceptableTaggedMembers.get(random.nextInt(acceptableTaggedMembers.size()));
}
List<ReplicaSetNode> getGoodSecondaries(List<ReplicaSetNode> all) {
List<ReplicaSetNode> goodSecondaries = new ArrayList<ReplicaSetNode>(all.size());
for (ReplicaSetNode cur : all) {
if (!cur.isOk()) {
continue;
}
goodSecondaries.add(cur);
}
return goodSecondaries;
}
public List<ReplicaSetNode> getGoodSecondariesByTags(final List<Tag> tags) {
checkStatus();
List<ReplicaSetNode> taggedSecondaries = getMembersByTags(all, tags);
return calculateGoodMembers(taggedSecondaries,
calculateBestPingTime(taggedSecondaries, false), acceptableLatencyMS, false);
}
public List<ReplicaSetNode> getGoodMembersByTags(final List<Tag> tags) {
checkStatus();
List<ReplicaSetNode> taggedMembers = getMembersByTags(all, tags);
return calculateGoodMembers(taggedMembers,
calculateBestPingTime(taggedMembers, true), acceptableLatencyMS, true);
}
public String getSetName() {
checkStatus();
return setName;
}
public ReplicaSetErrorStatus getErrorStatus(){
return errorStatus;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("[ ");
for (Node node : getAll())
for (ReplicaSetNode node : getAll())
sb.append(node.toJSON()).append(",");
sb.setLength(sb.length() - 1); //remove last comma
sb.append(" ]");
return sb.toString();
}
public Node findMaster() {
for (Node node : all) {
private void checkStatus(){
if (!errorStatus.isOk())
throw new MongoException(errorStatus.getError());
}
private ReplicaSetNode findMaster() {
for (ReplicaSetNode node : all) {
if (node.master())
return node;
}
return null;
}
private String determineSetName() {
for (ReplicaSetNode node : all) {
String nodeSetName = node.getSetName();
static float calculateBestPingTime(List<Node> members) {
float bestPingTime = Float.MAX_VALUE;
for (Node cur : members) {
if (!cur.secondary()) {
continue;
if (nodeSetName != null && !nodeSetName.equals("")) {
return nodeSetName;
}
}
return null;
}
private ReplicaSetErrorStatus validate() {
//make sure all nodes have the same set name
HashSet<String> nodeNames = new HashSet<String>();
for(ReplicaSetNode node : all) {
String nodeSetName = node.getSetName();
if(nodeSetName != null && !nodeSetName.equals("")) {
nodeNames.add(nodeSetName);
}
}
if(nodeNames.size() <= 1)
return new ReplicaSetErrorStatus(true, null);
else {
return new ReplicaSetErrorStatus(false, "nodes with different set names detected: " + nodeNames.toString());
}
}
static float calculateBestPingTime(List<ReplicaSetNode> members, boolean includeMaster) {
float bestPingTime = Float.MAX_VALUE;
for (ReplicaSetNode cur : members) {
if (cur.secondary() || (includeMaster && cur.master())) {
if (cur._pingTime < bestPingTime) {
bestPingTime = cur._pingTime;
}
}
}
return bestPingTime;
}
static List<Node> calculateGoodSecondaries(List<Node> members, float bestPingTime, int acceptableLatencyMS) {
List<Node> goodSecondaries = new ArrayList<Node>(members.size());
for (Node cur : members) {
if (!cur.secondary()) {
continue;
}
if (cur._pingTime - acceptableLatencyMS <= bestPingTime ) {
static List<ReplicaSetNode> calculateGoodMembers(List<ReplicaSetNode> members, float bestPingTime, int acceptableLatencyMS, boolean includeMaster) {
List<ReplicaSetNode> goodSecondaries = new ArrayList<ReplicaSetNode>(members.size());
for (ReplicaSetNode cur : members) {
if (cur.secondary() || (includeMaster && cur.master())) {
if (cur._pingTime - acceptableLatencyMS <= bestPingTime) {
goodSecondaries.add(cur);
}
}
}
return goodSecondaries;
}
static List<Node> getMembersByTag(List<Node> members, Tag tag) {
List<Node> membersByTag = new ArrayList<Node>();
static List<ReplicaSetNode> getMembersByTags(List<ReplicaSetNode> members, List<Tag> tags) {
for (Node cur : members) {
if (cur._tags.contains(tag)) {
List<ReplicaSetNode> membersByTag = new ArrayList<ReplicaSetNode>();
for (ReplicaSetNode cur : members) {
if (tags != null && cur.getTags() != null && cur.getTags().containsAll(tags)) {
membersByTag.add(cur);
}
}
return membersByTag;
}
}
// Represents the state of a node in the replica set. Instances of this class are immutable.
@Immutable
static class Node {
Node(ServerAddress addr, Set<String> names, float pingTime, boolean ok, boolean isMaster, boolean isSecondary,
static class ReplicaSetNode extends Node {
ReplicaSetNode(ServerAddress addr, Set<String> names, String setName, float pingTime, boolean ok, boolean isMaster, boolean isSecondary,
LinkedHashMap<String, String> tags, int maxBsonObjectSize) {
this._addr = addr;
super(pingTime, addr, maxBsonObjectSize, ok);
this._names = Collections.unmodifiableSet(new HashSet<String>(names));
this._pingTime = pingTime;
this._ok = ok;
this._setName = setName;
this._isMaster = isMaster;
this._isSecondary = isSecondary;
this._tags = Collections.unmodifiableSet(getTagsFromMap(tags));
this._maxBsonObjectSize = maxBsonObjectSize;
}
private static Set<Tag> getTagsFromMap(LinkedHashMap<String,String> tagMap) {
@ -349,34 +428,30 @@ public class ReplicaSetStatus {
return tagSet;
}
public boolean isOk() {
return _ok;
}
public boolean master(){
return _ok && _isMaster;
}
public int getMaxBsonObjectSize() {
return _maxBsonObjectSize;
}
public boolean secondary(){
return _ok && _isSecondary;
}
public ServerAddress getServerAddress() {
return _addr;
}
public Set<String> getNames() {
return _names;
}
public String getSetName() {
return _setName;
}
public Set<Tag> getTags() {
return _tags;
}
public float getPingTime() {
return _pingTime;
}
public String toJSON(){
StringBuilder buf = new StringBuilder();
buf.append( "{ address:'" ).append( _addr ).append( "', " );
@ -384,9 +459,16 @@ public class ReplicaSetStatus {
buf.append( "ping:" ).append( _pingTime ).append( ", " );
buf.append( "isMaster:" ).append( _isMaster ).append( ", " );
buf.append( "isSecondary:" ).append( _isSecondary ).append( ", " );
buf.append( "setName:" ).append( _setName ).append( ", " );
buf.append( "maxBsonObjectSize:" ).append( _maxBsonObjectSize ).append( ", " );
if(_tags != null && _tags.size() > 0)
buf.append( "tags:" ).append( JSON.serialize(_tags ) );
if(_tags != null && _tags.size() > 0){
List<DBObject> tagObjects = new ArrayList<DBObject>();
for( Tag tag : _tags)
tagObjects.add(tag.toDBObject());
buf.append(new BasicDBObject("tags", tagObjects) );
}
buf.append("}");
return buf.toString();
@ -397,7 +479,7 @@ public class ReplicaSetStatus {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Node node = (Node) o;
ReplicaSetNode node = (ReplicaSetNode) o;
if (_isMaster != node._isMaster) return false;
if (_maxBsonObjectSize != node._maxBsonObjectSize) return false;
@ -407,6 +489,7 @@ public class ReplicaSetStatus {
if (!_addr.equals(node._addr)) return false;
if (!_names.equals(node._names)) return false;
if (!_tags.equals(node._tags)) return false;
if (!_setName.equals(node._setName)) return false;
return true;
}
@ -420,18 +503,36 @@ public class ReplicaSetStatus {
result = 31 * result + (_ok ? 1 : 0);
result = 31 * result + (_isMaster ? 1 : 0);
result = 31 * result + (_isSecondary ? 1 : 0);
result = 31 * result + _setName.hashCode();
result = 31 * result + _maxBsonObjectSize;
return result;
}
private final ServerAddress _addr;
private final float _pingTime;
private final Set<String> _names;
private final Set<Tag> _tags;
private final boolean _ok;
private final boolean _isMaster;
private final boolean _isSecondary;
private final int _maxBsonObjectSize;
private final String _setName;
}
@Immutable
static final class ReplicaSetErrorStatus{
final boolean ok;
final String error;
ReplicaSetErrorStatus(boolean ok, String error){
this.ok = ok;
this.error = error;
}
public boolean isOk(){
return ok;
}
public String getError(){
return error;
}
}
// Simple class to hold a single tag, both key and value
@ -464,28 +565,25 @@ public class ReplicaSetStatus {
result = 31 * result + (value != null ? value.hashCode() : 0);
return result;
}
public DBObject toDBObject(){
return new BasicDBObject(key, value);
}
}
// Represents the state of a node in the replica set. Instances of this class are mutable.
static class UpdatableNode {
static class UpdatableReplicaSetNode extends UpdatableNode {
UpdatableNode(ServerAddress addr,
List<UpdatableNode> all,
UpdatableReplicaSetNode(ServerAddress addr,
List<UpdatableReplicaSetNode> all,
AtomicReference<Logger> logger,
Mongo mongo,
MongoOptions mongoOptions,
AtomicReference<String> setName,
AtomicReference<String> lastPrimarySignal)
{
_addr = addr;
AtomicReference<String> lastPrimarySignal) {
super(addr, mongo, mongoOptions);
_all = all;
_mongoOptions = mongoOptions;
_port = new DBPort( addr , null , _mongoOptions );
_names.add( addr.toString() );
_names.add(addr.toString());
_logger = logger;
_mongo = mongo;
_setName = setName;
_lastPrimarySignal = lastPrimarySignal;
}
@ -502,118 +600,83 @@ public class ReplicaSetStatus {
}
}
@SuppressWarnings("rawtypes")
synchronized void update(Set<UpdatableNode> seenNodes){
try {
long start = System.nanoTime();
CommandResult res = _port.runCommand( _mongo.getDB("admin") , _isMasterCmd );
long end = System.nanoTime();
float newPingMS = (end - start) / 1000000F;
if (!successfullyContacted)
_pingTimeMS = newPingMS;
else
_pingTimeMS = _pingTimeMS + ((newPingMS - _pingTimeMS) / latencySmoothFactor);
_rootLogger.log( Level.FINE , "Latency to " + _addr + " actual=" + newPingMS + " smoothed=" + _pingTimeMS);
successfullyContacted = true;
if ( res == null ){
throw new MongoInternalException("Invalid null value returned from isMaster");
void update(Set<UpdatableReplicaSetNode> seenNodes) {
CommandResult res = update();
if (res == null || !_ok) {
return;
}
if (!_ok) {
_logger.get().log( Level.INFO , "Server seen up: " + _addr );
}
_ok = true;
_isMaster = res.getBoolean( "ismaster" , false );
_isSecondary = res.getBoolean( "secondary" , false );
_lastPrimarySignal.set( res.getString( "primary" ) );
_isMaster = res.getBoolean("ismaster", false);
_isSecondary = res.getBoolean("secondary", false);
_lastPrimarySignal.set(res.getString("primary"));
if ( res.containsField( "hosts" ) ){
for ( Object x : (List)res.get("hosts") ){
if (res.containsField("hosts")) {
for (Object x : (List) res.get("hosts")) {
String host = x.toString();
UpdatableNode node = _addIfNotHere(host);
UpdatableReplicaSetNode node = _addIfNotHere(host);
if (node != null && seenNodes != null)
seenNodes.add(node);
}
}
if ( res.containsField( "passives" ) ){
for ( Object x : (List)res.get("passives") ){
if (res.containsField("passives")) {
for (Object x : (List) res.get("passives")) {
String host = x.toString();
UpdatableNode node = _addIfNotHere(host);
UpdatableReplicaSetNode node = _addIfNotHere(host);
if (node != null && seenNodes != null)
seenNodes.add(node);
}
}
// Tags were added in 2.0 but may not be present
if (res.containsField( "tags" )) {
DBObject tags = (DBObject) res.get( "tags" );
for ( String key : tags.keySet() ) {
_tags.put( key, tags.get( key ).toString() );
if (res.containsField("tags")) {
DBObject tags = (DBObject) res.get("tags");
for (String key : tags.keySet()) {
_tags.put(key, tags.get(key).toString());
}
}
// max size was added in 1.8
if (res.containsField("maxBsonObjectSize")) {
_maxBsonObjectSize = (Integer) res.get("maxBsonObjectSize");
} else {
_maxBsonObjectSize = Bytes.MAX_OBJECT_SIZE;
}
//old versions of mongod don't report setName
if (res.containsField("setName")) {
String setName = res.get( "setName" ).toString();
if ( _setName.get() == null ){
_setName.set(setName);
_logger.set( Logger.getLogger( _rootLogger.getName() + "." + setName));
}
else if ( !_setName.get().equals( setName ) ){
_logger.get().log( Level.SEVERE , "mismatch set name old: " + _setName.get() + " new: " + setName );
_setName = res.getString("setName", "");
if(_logger.get() == null)
_logger.set(Logger.getLogger(_rootLogger.getName() + "." + _setName));
}
}
}
catch ( Exception e ){
if (_ok) {
_logger.get().log( Level.WARNING , "Server seen down: " + _addr, e );
} else if (Math.random() < 0.1) {
_logger.get().log( Level.WARNING , "Server seen down: " + _addr, e );
}
_ok = false;
}
@Override
protected Logger getLogger() {
return _logger.get();
}
UpdatableNode _addIfNotHere( String host ){
UpdatableNode n = findNode( host, _all, _logger );
if ( n == null ){
UpdatableReplicaSetNode _addIfNotHere(String host) {
UpdatableReplicaSetNode n = findNode(host, _all, _logger);
if (n == null) {
try {
n = new UpdatableNode( new ServerAddress( host ), _all, _logger, _mongo, _mongoOptions, _setName, _lastPrimarySignal );
_all.add( n );
}
catch ( UnknownHostException un ){
_logger.get().log( Level.WARNING , "couldn't resolve host [" + host + "]" );
n = new UpdatableReplicaSetNode(new ServerAddress(host), _all, _logger, _mongo, _mongoOptions, _lastPrimarySignal);
_all.add(n);
} catch (UnknownHostException un) {
_logger.get().log(Level.WARNING, "couldn't resolve host [" + host + "]");
}
}
return n;
}
private UpdatableNode findNode( String host, List<UpdatableNode> members, AtomicReference<Logger> logger ){
for (UpdatableNode node : members)
private UpdatableReplicaSetNode findNode(String host, List<UpdatableReplicaSetNode> members, AtomicReference<Logger> logger) {
for (UpdatableReplicaSetNode node : members)
if (node._names.contains(host))
return node;
ServerAddress addr;
try {
addr = new ServerAddress( host );
}
catch ( UnknownHostException un ){
logger.get().log( Level.WARNING , "couldn't resolve host [" + host + "]" );
addr = new ServerAddress(host);
} catch (UnknownHostException un) {
logger.get().log(Level.WARNING, "couldn't resolve host [" + host + "]");
return null;
}
for (UpdatableNode node : members) {
for (UpdatableReplicaSetNode node : members) {
if (node._addr.equals(addr)) {
node._names.add(host);
return node;
@ -628,40 +691,27 @@ public class ReplicaSetStatus {
_port = null;
}
final ServerAddress _addr;
private final Set<String> _names = Collections.synchronizedSet( new HashSet<String>() );
private DBPort _port; // we have our own port so we can set different socket options and don't have to owrry about the pool
final LinkedHashMap<String, String> _tags = new LinkedHashMap<String, String>( );
boolean successfullyContacted = false;
boolean _ok = false;
float _pingTimeMS = 0;
private final Set<String> _names = Collections.synchronizedSet(new HashSet<String>());
final LinkedHashMap<String, String> _tags = new LinkedHashMap<String, String>();
boolean _isMaster = false;
boolean _isSecondary = false;
int _maxBsonObjectSize;
double _priority = 0;
String _setName;
private final AtomicReference<Logger> _logger;
private final MongoOptions _mongoOptions;
private final Mongo _mongo;
private final AtomicReference<String> _setName;
private final AtomicReference<String> _lastPrimarySignal;
private final List<UpdatableNode> _all;
private final List<UpdatableReplicaSetNode> _all;
}
// Thread that monitors the state of the replica set. This thread is responsible for setting a new ReplicaSet
// instance on ReplicaSetStatus.members every pass through the members of the set.
class Updater extends Thread {
class Updater extends BackgroundUpdater {
Updater(List<ServerAddress> initial){
super( "ReplicaSetStatus:Updater" );
setDaemon( true );
_all = new ArrayList<UpdatableNode>(initial.size());
super("ReplicaSetStatus:Updater");
_all = new ArrayList<UpdatableReplicaSetNode>(initial.size());
for ( ServerAddress addr : initial ){
_all.add( new UpdatableNode( addr, _all, _logger, _mongo, _mongoOptions, _setName, _lastPrimarySignal ) );
_all.add( new UpdatableReplicaSetNode( addr, _all, _logger, _mongo, _mongoOptions, _lastPrimarySignal ) );
}
_nextResolveTime = System.currentTimeMillis() + inetAddrCacheMS;
}
@ -680,7 +730,7 @@ public class ReplicaSetStatus {
ReplicaSet replicaSet = new ReplicaSet(createNodeList(), _random, slaveAcceptableLatencyMS);
_replicaSetHolder.set(replicaSet);
if (replicaSet.hasMaster()) {
if (replicaSet.getErrorStatus().isOk() && replicaSet.hasMaster()) {
_mongo.getConnector().setMaster(replicaSet.getMaster());
curUpdateIntervalMS = updaterIntervalMS;
}
@ -699,12 +749,8 @@ public class ReplicaSetStatus {
closeAllNodes();
}
public long getNextResolveTime() {
return _nextResolveTime;
}
public synchronized void updateAll(){
HashSet<UpdatableNode> seenNodes = new HashSet<UpdatableNode>();
HashSet<UpdatableReplicaSetNode> seenNodes = new HashSet<UpdatableReplicaSetNode>();
for (int i = 0; i < _all.size(); i++) {
_all.get(i).update(seenNodes);
@ -713,7 +759,7 @@ public class ReplicaSetStatus {
if (seenNodes.size() > 0) {
// not empty, means that at least 1 server gave node list
// remove unused hosts
Iterator<UpdatableNode> it = _all.iterator();
Iterator<UpdatableReplicaSetNode> it = _all.iterator();
while (it.hasNext()) {
if (!seenNodes.contains(it.next()))
it.remove();
@ -721,10 +767,10 @@ public class ReplicaSetStatus {
}
}
private List<Node> createNodeList() {
List<Node> nodeList = new ArrayList<Node>(_all.size());
for (UpdatableNode cur : _all) {
nodeList.add(new Node(cur._addr, cur._names, cur._pingTimeMS, cur._ok, cur._isMaster, cur._isSecondary, cur._tags, cur._maxBsonObjectSize));
private List<ReplicaSetNode> createNodeList() {
List<ReplicaSetNode> nodeList = new ArrayList<ReplicaSetNode>(_all.size());
for (UpdatableReplicaSetNode cur : _all) {
nodeList.add(new ReplicaSetNode(cur._addr, cur._names, cur._setName, cur._pingTimeMS, cur._ok, cur._isMaster, cur._isSecondary, cur._tags, cur._maxBsonObjectSize));
}
return nodeList;
}
@ -733,37 +779,28 @@ public class ReplicaSetStatus {
long now = System.currentTimeMillis();
if (inetAddrCacheMS > 0 && _nextResolveTime < now) {
_nextResolveTime = now + inetAddrCacheMS;
for (UpdatableNode node : _all) {
for (UpdatableReplicaSetNode node : _all) {
node.updateAddr();
}
}
}
private void closeAllNodes() {
for (UpdatableNode node : _all) {
for (UpdatableReplicaSetNode node : _all) {
try {
node.close();
} catch (final Throwable t) { /* nada */ }
}
}
private final List<UpdatableNode> _all;
private final List<UpdatableReplicaSetNode> _all;
private volatile long _nextResolveTime;
private final Random _random = new Random();
}
/**
* Ensures that we have the current master, if there is one. If the current snapshot of the replica set
* has no master, this method waits one cycle to find a new master, and returns it if found, or null if not.
*
* @return address of the current master, or null if there is none
*/
@Override
Node ensureMaster() {
if (_closed) {
return null;
}
Node masterNode = getMasterNode();
ReplicaSetNode masterNode = getMasterNode();
if (masterNode != null) {
return masterNode;
}
@ -780,20 +817,16 @@ public class ReplicaSetStatus {
List<ServerAddress> getServerAddressList() {
List<ServerAddress> addrs = new ArrayList<ServerAddress>();
for (Node node : _replicaSetHolder.get().getAll())
for (ReplicaSetNode node : _replicaSetHolder.get().getAll())
addrs.add(node.getServerAddress());
return addrs;
}
void close() {
_closed = true;
_updater.interrupt();
}
/**
* Gets the maximum size for a BSON object supported by the current master server.
* Note that this value may change over time depending on which server is master.
* @return the maximum size, or 0 if not obtained from servers yet.
* @throws MongoException
*/
public int getMaxBsonObjectSize() {
return _replicaSetHolder.get().getMaxBsonObjectSize();
@ -801,34 +834,16 @@ public class ReplicaSetStatus {
final ReplicaSetHolder _replicaSetHolder = new ReplicaSetHolder();
final Updater _updater;
private final Mongo _mongo;
private final AtomicReference<String> _setName = new AtomicReference<String>(); // null until init
// will get changed to use set name once its found
private final AtomicReference<Logger> _logger = new AtomicReference<Logger>(_rootLogger);
private final AtomicReference<String> _lastPrimarySignal = new AtomicReference<String>();
private volatile boolean _closed;
final static int updaterIntervalMS;
final static int updaterIntervalNoMasterMS;
final static int slaveAcceptableLatencyMS;
final static int inetAddrCacheMS;
final static float latencySmoothFactor;
private final MongoOptions _mongoOptions;
private static final MongoOptions _mongoOptionsDefaults = new MongoOptions();
static {
updaterIntervalMS = Integer.parseInt(System.getProperty("com.mongodb.updaterIntervalMS", "5000"));
updaterIntervalNoMasterMS = Integer.parseInt(System.getProperty("com.mongodb.updaterIntervalNoMasterMS", "10"));
slaveAcceptableLatencyMS = Integer.parseInt(System.getProperty("com.mongodb.slaveAcceptableLatencyMS", "15"));
inetAddrCacheMS = Integer.parseInt(System.getProperty("com.mongodb.inetAddrCacheMS", "300000"));
latencySmoothFactor = Float.parseFloat(System.getProperty("com.mongodb.latencySmoothFactor", "4"));
_mongoOptionsDefaults.connectTimeout = Integer.parseInt(System.getProperty("com.mongodb.updaterConnectTimeoutMS", "20000"));
_mongoOptionsDefaults.socketTimeout = Integer.parseInt(System.getProperty("com.mongodb.updaterSocketTimeoutMS", "20000"));
}
static final DBObject _isMasterCmd = new BasicDBObject( "ismaster" , 1 );
}

View File

@ -43,8 +43,9 @@ class Response {
_len = Bits.readInt(b, pos);
pos += 4;
if (_len > MAX_LENGTH)
if (_len > MAX_LENGTH) {
throw new IllegalArgumentException( "response too long: " + _len );
}
_id = Bits.readInt(b, pos);
pos += 4;

View File

@ -0,0 +1,215 @@
package com.massivecraft.mcore.xlib.mongodb;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Abstract base class for all preference which can be combined with tags
*
* @author breinero
*/
public abstract class TaggableReadPreference extends ReadPreference {
private final static List<DBObject> EMPTY = new ArrayList<DBObject>();
TaggableReadPreference() {
_tags = EMPTY;
}
TaggableReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) {
if (firstTagSet == null) {
throw new IllegalArgumentException("Must have at least one tag set");
}
_tags = new ArrayList<DBObject>();
_tags.add(firstTagSet);
Collections.addAll(_tags, remainingTagSets);
}
@Override
public boolean isSlaveOk() {
return true;
}
@Override
public DBObject toDBObject() {
DBObject readPrefObject = new BasicDBObject("mode", getName());
if (!_tags.isEmpty())
readPrefObject.put("tags", _tags);
return readPrefObject;
}
public List<DBObject> getTagSets() {
List<DBObject> tags = new ArrayList<DBObject>();
for (DBObject tagSet : _tags) {
tags.add(tagSet);
}
return tags;
}
@Override
public String toString() {
return getName() + printTags();
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final TaggableReadPreference that = (TaggableReadPreference) o;
if (!_tags.equals(that._tags)) return false;
return true;
}
@Override
public int hashCode() {
int result = _tags.hashCode();
result = 31 * result + getName().hashCode();
return result;
}
String printTags() {
return (_tags.isEmpty() ? "" : " : " + new BasicDBObject("tags", _tags));
}
private static List<ReplicaSetStatus.Tag> getTagListFromDBObject(final DBObject curTagSet) {
List<ReplicaSetStatus.Tag> tagList = new ArrayList<ReplicaSetStatus.Tag>();
for (String key : curTagSet.keySet()) {
tagList.add(new ReplicaSetStatus.Tag(key, curTagSet.get(key).toString()));
}
return tagList;
}
final List<DBObject> _tags;
/**
* Read from secondary
*
* @author breinero
*/
static class SecondaryReadPreference extends TaggableReadPreference {
SecondaryReadPreference() {
}
SecondaryReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) {
super(firstTagSet, remainingTagSets);
}
@Override
public String getName() {
return "secondary";
}
@Override
ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
if (_tags.isEmpty())
return set.getASecondary();
for (DBObject curTagSet : _tags) {
List<ReplicaSetStatus.Tag> tagList = getTagListFromDBObject(curTagSet);
ReplicaSetStatus.ReplicaSetNode node = set.getASecondary(tagList);
if (node != null) {
return node;
}
}
return null;
}
}
/**
* Read from secondary if available, otherwise from primary, irrespective of tags.
*
* @author breinero
*/
static class SecondaryPreferredReadPreference extends SecondaryReadPreference {
SecondaryPreferredReadPreference() {
}
SecondaryPreferredReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) {
super(firstTagSet, remainingTagSets);
}
@Override
public String getName() {
return "secondaryPreferred";
}
@Override
ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
ReplicaSetStatus.ReplicaSetNode node = super.getNode(set);
return (node != null) ? node : set.getMaster();
}
}
/**
* Read from nearest node respective of tags.
*
* @author breinero
*/
static class NearestReadPreference extends TaggableReadPreference {
NearestReadPreference() {
}
NearestReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) {
super(firstTagSet, remainingTagSets);
}
@Override
public String getName() {
return "nearest";
}
@Override
ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
if (_tags.isEmpty())
return set.getAMember();
for (DBObject curTagSet : _tags) {
List<ReplicaSetStatus.Tag> tagList = getTagListFromDBObject(curTagSet);
ReplicaSetStatus.ReplicaSetNode node = set.getAMember(tagList);
if (node != null) {
return node;
}
}
return null;
}
}
/**
* Read from primary if available, otherwise a secondary.
*
* @author breinero
*/
static class PrimaryPreferredReadPreference extends SecondaryReadPreference {
PrimaryPreferredReadPreference() {}
PrimaryPreferredReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) {
super(firstTagSet, remainingTagSets);
}
@Override
public String getName() {
return "primaryPreferred";
}
@Override
ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) {
ReplicaSetStatus.ReplicaSetNode node = set.getMaster();
return (node != null) ? node : super.getNode(set);
}
}
}

View File

@ -25,56 +25,132 @@ import java.util.HashMap;
import java.util.Map;
/**
* <p>WriteConcern control the write behavior for with various options, as well as exception raising on error conditions.</p>
*
* <p>WriteConcern control the acknowledgment of write operations with various options.
* <p>
* <b>w</b>
* <ul>
* <li>-1 = don't even report network errors </li>
* <li> 0 = default, don't call getLastError by default </li>
* <li> 1 = basic, call getLastError, but don't wait for slaves</li>
* <li> 2+= wait for slaves </li>
* <li>-1 = Don't even report network errors </li>
* <li> 0 = Don't wait for acknowledgement from the server </li>
* <li> 1 = Wait for acknowledgement, but don't wait for secondaries to replicate</li>
* <li> 2+= Wait for one or more secondaries to also acknowledge </li>
* </ul>
* <b>wtimeout</b> how long to wait for slaves before failing
* <ul>
* <li>0 = indefinite </li>
* <li>> 0 = ms to wait </li>
* <li>0: indefinite </li>
* <li>greater than 0: ms to wait </li>
* </ul>
* </p>
* <p><b>fsync</b> force fsync to disk </p>
*
* <p>
* Other options:
* <ul>
* <li><b>j</b>: wait for group commit to journal</li>
* <li><b>fsync</b>: force fsync to disk</li>
* </ul>
* @dochub databases
*/
public class WriteConcern implements Serializable {
private static final long serialVersionUID = 1884671104750417011L;
/** No exceptions are raised, even for network issues */
/**
* No exceptions are raised, even for network issues.
*/
public final static WriteConcern ERRORS_IGNORED = new WriteConcern(-1);
/**
* Write operations that use this write concern will wait for acknowledgement from the primary server before returning.
* Exceptions are raised for network issues, and server errors.
* @since 2.10.0
*/
public final static WriteConcern ACKNOWLEDGED = new WriteConcern(1);
/**
* Write operations that use this write concern will return as soon as the message is written to the socket.
* Exceptions are raised for network issues, but not server errors.
* @since 2.10.0
*/
public final static WriteConcern UNACKNOWLEDGED = new WriteConcern(0);
/**
* Exceptions are raised for network issues, and server errors; the write operation waits for the server to flush
* the data to disk.
*/
public final static WriteConcern FSYNCED = new WriteConcern(true);
/**
* Exceptions are raised for network issues, and server errors; the write operation waits for the server to
* group commit to the journal file on disk.
*/
public final static WriteConcern JOURNALED = new WriteConcern( 1, 0, false, true );
/**
* Exceptions are raised for network issues, and server errors; waits for at least 2 servers for the write operation.
*/
public final static WriteConcern REPLICA_ACKNOWLEDGED= new WriteConcern(2);
/**
* No exceptions are raised, even for network issues.
* <p>
* This field has been superseded by {@code WriteConcern.ERRORS_IGNORED}, and may be deprecated in a future release.
* @see WriteConcern#ERRORS_IGNORED
*/
public final static WriteConcern NONE = new WriteConcern(-1);
/** Exceptions are raised for network issues, but not server errors */
/**
* Write operations that use this write concern will return as soon as the message is written to the socket.
* Exceptions are raised for network issues, but not server errors.
* <p>
* This field has been superseded by {@code WriteConcern.UNACKNOWLEDGED}, and may be deprecated in a future release.
* @see WriteConcern#UNACKNOWLEDGED
*/
public final static WriteConcern NORMAL = new WriteConcern(0);
/** Exceptions are raised for network issues, and server errors; waits on a server for the write operation */
/**
* Write operations that use this write concern will wait for acknowledgement from the primary server before returning.
* Exceptions are raised for network issues, and server errors.
* <p>
* This field has been superseded by {@code WriteConcern.ACKNOWLEDGED}, and may be deprecated in a future release.
* @see WriteConcern#ACKNOWLEDGED
*/
public final static WriteConcern SAFE = new WriteConcern(1);
/** Exceptions are raised for network issues, and server errors; waits on a majority of servers for the write operation */
/**
* Exceptions are raised for network issues, and server errors; waits on a majority of servers for the write operation.
*/
public final static WriteConcern MAJORITY = new Majority();
/** Exceptions are raised for network issues, and server errors; the write operation waits for the server to flush the data to disk*/
/**
* Exceptions are raised for network issues, and server errors; the write operation waits for the server to flush
* the data to disk.
* <p>
* This field has been superseded by {@code WriteConcern.FSYNCED}, and may be deprecated in a future release.
* @see WriteConcern#FSYNCED
*/
public final static WriteConcern FSYNC_SAFE = new WriteConcern(true);
/** Exceptions are raised for network issues, and server errors; the write operation waits for the server to group commit to the journal file on disk*/
/**
* Exceptions are raised for network issues, and server errors; the write operation waits for the server to
* group commit to the journal file on disk.
* <p>
* This field has been superseded by {@code WriteConcern.JOURNALED}, and may be deprecated in a future release.
* @see WriteConcern#JOURNALED
*/
public final static WriteConcern JOURNAL_SAFE = new WriteConcern( 1, 0, false, true );
/** Exceptions are raised for network issues, and server errors; waits for at least 2 servers for the write operation*/
/**
* Exceptions are raised for network issues, and server errors; waits for at least 2 servers for the write operation.
* <p>
* This field has been superseded by {@code WriteConcern.REPLICA_ACKNOWLEDGED}, and may be deprecated in a future release.
* @see WriteConcern#REPLICA_ACKNOWLEDGED
*/
public final static WriteConcern REPLICAS_SAFE = new WriteConcern(2);
// map of the constants from above for use by fromString
private static Map<String, WriteConcern> _namedConcerns = null;
/**
* Default constructor keeping all options as default
* Default constructor keeping all options as default. Be careful using this constructor, as it's equivalent to
* {@code WriteConcern.UNACKNOWLEDGED}, so writes may be lost without any errors being reported.
* @see WriteConcern#UNACKNOWLEDGED
*/
public WriteConcern(){
this(0);
@ -227,12 +303,19 @@ public class WriteConcern implements Serializable {
_continueOnErrorForInsert = continueOnInsertError;
}
public BasicDBObject getCommand(){
/**
* Gets the getlasterror command for this write concern.
*
* @return getlasterror command, even if <code>w <= 0</code>
*/
public BasicDBObject getCommand() {
BasicDBObject _command = new BasicDBObject( "getlasterror" , 1 );
if ( _w instanceof Integer && ( (Integer) _w > 0) ||
( _w instanceof String && _w != null ) ){
if (_w instanceof Integer && ((Integer) _w > 1) || (_w instanceof String)){
_command.put( "w" , _w );
}
if (_wtimeout > 0) {
_command.put( "wtimeout" , _wtimeout );
}
@ -273,7 +356,7 @@ public class WriteConcern implements Serializable {
/**
* Gets the w parameter (the write strategy) in String format
* @return
* @return w as a string
*/
public String getWString(){
return _w.toString();
@ -335,7 +418,8 @@ public class WriteConcern implements Serializable {
for (Field f : WriteConcern.class.getFields())
if (Modifier.isStatic( f.getModifiers() ) && f.getType().equals( WriteConcern.class )) {
try {
newMap.put( f.getName().toLowerCase(), (WriteConcern) f.get( null ) );
String key = f.getName().toLowerCase();
newMap.put(key, (WriteConcern) f.get( null ) );
} catch (Exception e) {
throw new RuntimeException( e );
}
@ -350,7 +434,7 @@ public class WriteConcern implements Serializable {
}
@Override
public String toString(){
public String toString() {
return "WriteConcern " + getCommand() + " / (Continue Inserting on Errors? " + getContinueOnErrorForInsert() + ")";
}

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.massivecraft.mcore.xlib.mongodb;
/**
* An exception representing an error reported due to a write failure.
*/
public class WriteConcernException extends MongoException {
private static final long serialVersionUID = 841056799207039974L;
private final CommandResult commandResult;
/**
* Construct a new instance with the CommandResult from getlasterror command
*
* @param commandResult the command result
*/
public WriteConcernException(final CommandResult commandResult) {
super(commandResult.getCode(), commandResult.toString());
this.commandResult = commandResult;
}
/**
* Gets the getlasterror command result document.
*
* @return the command result
*/
public CommandResult getCommandResult() {
return commandResult;
}
}

View File

@ -41,7 +41,7 @@ public class WriteResult {
WriteResult( DB db , DBPort p , WriteConcern concern ){
_db = db;
_port = p;
_lastCall = p._calls;
_lastCall = p._calls.get();
_lastConcern = concern;
_lazy = true;
}
@ -67,6 +67,7 @@ public class WriteResult {
/**
* calls {@link WriteResult#getLastError(com.mongodb.WriteConcern)} with concern=null
* @return
* @throws MongoException
*/
public synchronized CommandResult getLastError(){
return getLastError(null);
@ -78,6 +79,7 @@ public class WriteResult {
* - otherwise attempts to obtain a CommandResult by calling getLastError with the concern
* @param concern the concern
* @return
* @throws MongoException
*/
public synchronized CommandResult getLastError(WriteConcern concern){
if ( _lastErrorResult != null ) {
@ -110,6 +112,7 @@ public class WriteResult {
/**
* Gets the error String ("err" field)
* @return
* @throws MongoException
*/
public String getError(){
Object foo = getField( "err" );
@ -122,6 +125,7 @@ public class WriteResult {
* Gets the "n" field, which contains the number of documents
* affected in the write operation.
* @return
* @throws MongoException
*/
public int getN(){
return getLastError().getInt( "n" );
@ -131,6 +135,7 @@ public class WriteResult {
* Gets a field
* @param name field name
* @return
* @throws MongoException
*/
public Object getField( String name ){
return getLastError().get( name );

View File

@ -18,18 +18,20 @@
package com.massivecraft.mcore.xlib.mongodb.gridfs;
import com.massivecraft.mcore.xlib.mongodb.DBObject;
import com.massivecraft.mcore.xlib.mongodb.Mongo;
import com.massivecraft.mcore.xlib.mongodb.MongoClient;
import com.massivecraft.mcore.xlib.mongodb.util.Util;
import java.io.File;
import java.security.DigestInputStream;
import java.security.MessageDigest;
import com.massivecraft.mcore.xlib.mongodb.DBObject;
import com.massivecraft.mcore.xlib.mongodb.Mongo;
import com.massivecraft.mcore.xlib.mongodb.util.Util;
/**
* a simple CLI for Gridfs
*/
@SuppressWarnings({"unused"})
public class CLI {
/**
@ -48,10 +50,11 @@ public class CLI {
private static String db = "test";
private static Mongo _mongo = null;
private static Mongo getMongo()
throws Exception {
if ( _mongo == null )
_mongo = new Mongo( host );
_mongo = new MongoClient( host );
return _mongo;
}
@ -63,7 +66,6 @@ public class CLI {
return _gridfs;
}
@SuppressWarnings("unused")
public static void main(String[] args) throws Exception {
if ( args.length < 1 ){

View File

@ -68,6 +68,7 @@ public class GridFS {
* in the given database. Set the preferred WriteConcern on the give DB with DB.setWriteConcern
* @see com.massivecraft.mcore.xlib.mongodb.WriteConcern
* @param db database to work with
* @throws MongoException
*/
public GridFS(DB db) {
this(db, DEFAULT_BUCKET);
@ -80,6 +81,7 @@ public class GridFS {
* @see com.massivecraft.mcore.xlib.mongodb.WriteConcern
* @param db database to work with
* @param bucket bucket to use in the given database
* @throws MongoException
*/
public GridFS(DB db, String bucket) {
_db = db;
@ -109,7 +111,7 @@ public class GridFS {
* @return cursor of file objects
*/
public DBCursor getFileList(){
return _filesCollection.find().sort(new BasicDBObject("filename",1));
return getFileList(new BasicDBObject());
}
/**
@ -119,7 +121,18 @@ public class GridFS {
* @return cursor of file objects
*/
public DBCursor getFileList( DBObject query ){
return _filesCollection.find( query ).sort(new BasicDBObject("filename",1));
return getFileList(query, new BasicDBObject("filename",1));
}
/**
* gets a filtered list of files stored in this gridfs, sorted by param sort
*
* @param query filter to apply
* @param sort sorting to apply
* @return cursor of file objects
*/
public DBCursor getFileList( DBObject query, DBObject sort){
return _filesCollection.find( query ).sort(sort);
}
@ -131,6 +144,7 @@ public class GridFS {
* finds one file matching the given id. Equivalent to findOne(id)
* @param id
* @return
* @throws MongoException
*/
public GridFSDBFile find( ObjectId id ){
return findOne( id );
@ -139,6 +153,7 @@ public class GridFS {
* finds one file matching the given id.
* @param id
* @return
* @throws MongoException
*/
public GridFSDBFile findOne( ObjectId id ){
return findOne( new BasicDBObject( "_id" , id ) );
@ -147,6 +162,7 @@ public class GridFS {
* finds one file matching the given filename
* @param filename
* @return
* @throws MongoException
*/
public GridFSDBFile findOne( String filename ){
return findOne( new BasicDBObject( "filename" , filename ) );
@ -155,6 +171,7 @@ public class GridFS {
* finds one file matching the given query
* @param query
* @return
* @throws MongoException
*/
public GridFSDBFile findOne( DBObject query ){
return _fix( _filesCollection.findOne( query ) );
@ -164,26 +181,61 @@ public class GridFS {
* finds a list of files matching the given filename
* @param filename
* @return
* @throws MongoException
*/
public List<GridFSDBFile> find( String filename ){
return find( new BasicDBObject( "filename" , filename ) );
return find( filename, null );
}
/**
* finds a list of files matching the given filename
* @param filename
* @param sort
* @return
* @throws MongoException
*/
public List<GridFSDBFile> find( String filename , DBObject sort){
return find( new BasicDBObject( "filename" , filename ), sort );
}
/**
* finds a list of files matching the given query
* @param query
* @return
* @throws MongoException
*/
public List<GridFSDBFile> find( DBObject query ){
return find(query, null);
}
/**
* finds a list of files matching the given query
* @param query
* @param sort
* @return
* @throws MongoException
*/
public List<GridFSDBFile> find( DBObject query , DBObject sort){
List<GridFSDBFile> files = new ArrayList<GridFSDBFile>();
DBCursor c = _filesCollection.find( query );
DBCursor c = null;
try {
c = _filesCollection.find( query );
if (sort != null) {
c.sort(sort);
}
while ( c.hasNext() ){
files.add( _fix( c.next() ) );
}
} finally {
if (c != null){
c.close();
}
}
return files;
}
private GridFSDBFile _fix( Object o ){
protected GridFSDBFile _fix( Object o ){
if ( o == null )
return null;
@ -203,6 +255,7 @@ public class GridFS {
/**
* removes the file matching the given id
* @param id
* @throws MongoException
*/
public void remove( ObjectId id ){
_filesCollection.remove( new BasicDBObject( "_id" , id ) );
@ -212,6 +265,7 @@ public class GridFS {
/**
* removes all files matching the given filename
* @param filename
* @throws MongoException
*/
public void remove( String filename ){
remove( new BasicDBObject( "filename" , filename ) );
@ -220,6 +274,7 @@ public class GridFS {
/**
* removes all files matching the given query
* @param query
* @throws MongoException
*/
public void remove( DBObject query ){
for ( GridFSDBFile f : find( query ) ){

View File

@ -53,6 +53,7 @@ public class GridFSDBFile extends GridFSFile {
* @param filename the file name on disk
* @return
* @throws IOException
* @throws MongoException
*/
public long writeTo( String filename ) throws IOException {
return writeTo( new File( filename ) );
@ -62,6 +63,7 @@ public class GridFSDBFile extends GridFSFile {
* @param f the File object
* @return
* @throws IOException
* @throws MongoException
*/
public long writeTo( File f ) throws IOException {
@ -80,6 +82,7 @@ public class GridFSDBFile extends GridFSFile {
* @param out the OutputStream
* @return
* @throws IOException
* @throws MongoException
*/
public long writeTo( OutputStream out )
throws IOException {

View File

@ -37,6 +37,7 @@ import com.massivecraft.mcore.xlib.mongodb.util.JSON;
* The abstract class representing a GridFS file
* @author antoine
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public abstract class GridFSFile implements DBObject {
@ -46,6 +47,7 @@ public abstract class GridFSFile implements DBObject {
/**
* Saves the file entry to the files collection
* @throws MongoException
*/
public void save(){
if ( _fs == null )
@ -58,7 +60,7 @@ public abstract class GridFSFile implements DBObject {
* This should be called after transferring a file.
* @throws MongoException
*/
public void validate() throws MongoException {
public void validate(){
if ( _fs == null )
throw new MongoException( "no _fs" );
if ( _md5 == null )
@ -147,7 +149,6 @@ public abstract class GridFSFile implements DBObject {
* note: to set aliases, call put( "aliases" , List<String> )
* @return
*/
@SuppressWarnings("unchecked")
public List<String> getAliases(){
return (List<String>)_extradata.get( "aliases" );
}
@ -226,12 +227,10 @@ public abstract class GridFSFile implements DBObject {
throw new UnsupportedOperationException();
}
@SuppressWarnings("rawtypes")
public void putAll( Map m ){
throw new UnsupportedOperationException();
}
@SuppressWarnings("rawtypes")
public Map toMap(){
throw new UnsupportedOperationException();
}
@ -252,7 +251,6 @@ public abstract class GridFSFile implements DBObject {
return keySet().contains( s );
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public Set<String> keySet(){
Set<String> keys = new HashSet();
keys.addAll(VALID_FIELDS);
@ -297,7 +295,6 @@ public abstract class GridFSFile implements DBObject {
DBObject _extradata = new BasicDBObject();
String _md5;
@SuppressWarnings({ "unchecked", "rawtypes" })
final static Set<String> VALID_FIELDS = Collections.unmodifiableSet( new HashSet( Arrays.asList( new String[]{
"_id" , "filename" , "contentType" , "length" , "chunkSize" ,
"uploadDate" , "aliases" , "md5"

View File

@ -18,20 +18,20 @@
package com.massivecraft.mcore.xlib.mongodb.gridfs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.MessageDigest;
import java.util.Date;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import com.massivecraft.mcore.xlib.mongodb.BasicDBObjectBuilder;
import com.massivecraft.mcore.xlib.mongodb.DBObject;
import com.massivecraft.mcore.xlib.mongodb.MongoException;
import com.massivecraft.mcore.xlib.mongodb.util.SimplePool;
import com.massivecraft.mcore.xlib.mongodb.util.Util;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Date;
/**
* This class represents a GridFS file to be written to the database
* Operations include:
@ -64,7 +64,11 @@ public class GridFSInputFile extends GridFSFile {
_id = new ObjectId();
_chunkSize = GridFS.DEFAULT_CHUNKSIZE;
_uploadDate = new Date();
_messageDigester = _md5Pool.get();
try {
_messageDigester = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException("No MD5!");
}
_messageDigester.reset();
_buffer = new byte[(int) _chunkSize];
}
@ -148,6 +152,7 @@ public class GridFSInputFile extends GridFSFile {
/**
* calls {@link GridFSInputFile#save(long)} with the existing chunk size
* @throws MongoException
*/
@Override
public void save() {
@ -160,6 +165,7 @@ public class GridFSInputFile extends GridFSFile {
*
* @param chunkSize
* Size of chunks for file in bytes.
* @throws MongoException
*/
public void save( long chunkSize ) {
if (_outputStream != null)
@ -185,6 +191,7 @@ public class GridFSInputFile extends GridFSFile {
* @throws IOException
* on problems reading the new entry's
* {@link java.io.InputStream}.
* @throws MongoException
*/
public int saveChunks() throws IOException {
return saveChunks( _chunkSize );
@ -201,6 +208,7 @@ public class GridFSInputFile extends GridFSFile {
* @throws IOException
* on problems reading the new entry's
* {@link java.io.InputStream}.
* @throws MongoException
*/
public int saveChunks( long chunkSize ) throws IOException {
if (_outputStream != null)
@ -249,10 +257,9 @@ public class GridFSInputFile extends GridFSFile {
* Dumps a new chunk into the chunks collection. Depending on the flag, also
* partial buffers (at the end) are going to be written immediately.
*
* @param data
* Data for chunk.
* @param writePartial
* Write also partial buffers full.
* @throws MongoException
*/
private void _dumpBuffer( boolean writePartial ) {
if ( ( _currentBufferPosition < _chunkSize ) && !writePartial ) {
@ -314,7 +321,6 @@ public class GridFSInputFile extends GridFSFile {
private void _finishData() {
if (!_savedChunks) {
_md5 = Util.toHex( _messageDigester.digest() );
_md5Pool.done( _messageDigester );
_messageDigester = null;
_length = _totalBytes;
_savedChunks = true;
@ -337,25 +343,6 @@ public class GridFSInputFile extends GridFSFile {
private MessageDigest _messageDigester = null;
private OutputStream _outputStream = null;
/**
* A pool of {@link java.security.MessageDigest} objects.
*/
static SimplePool<MessageDigest> _md5Pool
= new SimplePool<MessageDigest>( "md5" , 10 , -1 , false , false ) {
/**
* {@inheritDoc}
*
* @see com.massivecraft.mcore.xlib.mongodb.util.SimplePool#createNew()
*/
protected MessageDigest createNew() {
try {
return MessageDigest.getInstance( "MD5" );
} catch ( java.security.NoSuchAlgorithmException e ) {
throw new RuntimeException( "your system doesn't have md5!" );
}
}
};
/**
* An output stream implementation that can be used to successively write to
* a GridFS file.

View File

@ -0,0 +1,312 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.tools;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.JMException;
import javax.management.MBeanException;
import javax.management.MBeanServerConnection;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import javax.management.openmbean.CompositeData;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import java.io.CharArrayWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.lang.management.ManagementFactory;
import java.util.Set;
/**
* A simple class that formats Mongo Java driver connection pool statistics in an easily-accessible JSON format.
* It can be used to get statistics on connection pool in the same VM by using the no-args constructor, or in any
* VM by using the constructor that takes an MBeanServerConnection.
* <p>
* This class also exposes a command line interface modeled after mongostat. For usage, run:
* <pre> java -cp mongo.jar com.mongodb.util.management.jmx.ConnectionPoolStat --help}</pre>
*
* @mongodb.driver.manual reference/mongostat mongostat
*
*/
public class ConnectionPoolStat {
/**
* Use the given MBean server connection to access statistics for connection pools.
*
* @param mBeanConnection the MBean server to connect to
*/
public ConnectionPoolStat(MBeanServerConnection mBeanConnection) {
this.mBeanConnection = mBeanConnection;
}
/**
* Use the platform MBean server. This is useful if you want to access statistics
* for connection pools in the same virtual machine.
*
* @see java.lang.management.ManagementFactory#getPlatformMBeanServer()
*/
public ConnectionPoolStat() {
this.mBeanConnection = ManagementFactory.getPlatformMBeanServer();
}
/**
* Gets the statistics for all Mongo connection pools registered with the MBean server used
* by this instance. The format will always be JSON, but the specific JSON fields may change in a
* future release. An example of the output, which should not be taken as a specification:
*
* <pre>
{ pools : [
{ objectName: 'com.mongodb:type=ConnectionPool,host=localhost/127.0.0.1,port=27018,instance=1',
host: 'localhost', port: 27018, maxSize: 10, total: 10, inUse: 3,
inUseConnections: [
{ namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-19', durationMS: 843, localPort: 64062 },
{ namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-1', durationMS: 4331, localPort: 64095 },
{ namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-16', durationMS: 4343, localPort: 64087 }
]
},
{ objectName: 'com.mongodb:type=ConnectionPool,host=localhost/127.0.0.1,port=27017,instance=1',
host: 'localhost', port: 27017, maxSize: 10, total: 10, inUse: 2,
inUseConnections: [
{ namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-5', durationMS: 920, localPort: 64093 },
{ namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-11', durationMS: 1468, localPort: 64068 },
]
}
]
}</pre>
*
* @return JSON-formatted stats for all connection pools registered in JMX
* @throws JMException for any JMX-related exceptions
* @throws IOException for any I/O exceptions
*/
public String getStats() throws JMException, IOException {
CharArrayWriter charArrayWriter = new CharArrayWriter();
PrintWriter printWriter = new PrintWriter(charArrayWriter);
print(printWriter);
return charArrayWriter.toString();
}
/**
* Command line interface for displaying connection pool stats. In order to connect to a remote JMX server to
* get these stats, currently you must set com.sun.management.jmxremote.port system property on the remote server
* and specify that port using the --port argument.
*
* @param args program arguments
* @throws Exception JMX-related exceptions
* @see ConnectionPoolStat#printUsage()
*/
public static void main(String[] args) throws Exception {
String host = "localhost";
int port = -1;
long rowCount = 0;
int sleepTime = 1000;
int pos = 0;
for (; pos < args.length; pos++) {
if (args[pos].equals("--help")) {
printUsage();
System.exit(0);
} else if (args[pos].equals("--host") || args[pos].equals("-h")) {
host = args[++pos];
} else if (args[pos].equals("--port")) {
port = getIntegerArg(args[++pos], "--port");
} else if (args[pos].equals("--rowcount") || args[pos].equals("-n")) {
rowCount = getIntegerArg(args[++pos], "--rowCount");
} else if (args[pos].startsWith("-")) {
printErrorAndUsageAndExit("unknown option " + args[pos]);
}
else {
sleepTime = getIntegerArg(args[pos++], "sleep time") * 1000;
break;
}
}
if (pos != args.length) {
printErrorAndUsageAndExit("too many positional options");
}
if (port == -1 && !host.contains(":")) {
printErrorAndUsageAndExit("port is required");
}
String hostAndPort = (port != -1) ? host + ":" + port : host;
if (rowCount == 0) {
rowCount = Long.MAX_VALUE;
}
JMXServiceURL u = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://" + hostAndPort + "/jmxrmi");
JMXConnector connector = JMXConnectorFactory.connect(u);
MBeanServerConnection mBeanConnection = connector.getMBeanServerConnection();
try {
ConnectionPoolStat printer = new ConnectionPoolStat(mBeanConnection);
for (int i = 0; i < rowCount; i++) {
System.out.println(printer.getStats());
if (i != rowCount - 1) {
Thread.sleep(sleepTime);
}
}
} finally {
connector.close();
}
}
private static int getIntegerArg(String arg, String argName) {
try {
return Integer.parseInt(arg);
} catch (NumberFormatException e) {
printErrorAndUsageAndExit(argName + " arg must be an integer");
}
throw new IllegalStateException();
}
private static void printErrorAndUsageAndExit(final String error) {
System.err.println("ERROR: " + error);
System.out.println();
printUsage();
System.exit(1);
}
private static void printUsage() {
System.out.println("View live MongoDB connection pool statistics from a remote JMX server.");
System.out.println();
System.out.println("usage: java com.mongodb.tools.ConnectionPoolStat [options] [sleep time");
System.out.println("sleep time: time to wait (in seconds) between calls. Defaults to 1");
System.out.println("options:");
System.out.println(" --help produce help message");
System.out.println(" --port arg JMX remote port. Required. Can also use --host hostname:port");
System.out.println(" -h [ --host ] arg JMX remote host. Defaults to localhost");
System.out.println(" -n [ --rowcount ] arg number of times to print stats (0 for indefinite)");
System.out.println();
System.out.println("Fields");
System.out.println(" objectName - name of the JMX bean for this connection pool");
System.out.println(" host - host of the mongod/mongos server");
System.out.println(" port - port of the mongod/mongos server");
System.out.println(" maxSize - max # of connections allowed");
System.out.println(" total - # of connections allocated");
System.out.println(" inUse - # of connections in use");
System.out.println(" inUseConnections - list of all in use connections");
System.out.println(" inUseConnections.namespace - namespace on which connection is operating");
System.out.println(" inUseConnections.opCode - operation connection is executing");
System.out.println(" inUseConnections.query - query the connection is executing (for query/update/remove)");
System.out.println(" inUseConnections.numDocuments - # of documents in the message (mostly relevant for batch inserts)");
System.out.println(" inUseConnections.threadName - name of thread on which connection is executing");
System.out.println(" inUseConnections.durationMS - duration that the operation has been executing so far");
System.out.println(" inUseConnections.localPort - local port of the connection");
}
private void print(PrintWriter pw) throws JMException, IOException {
Set<ObjectName> beanSet = mBeanConnection.queryNames(new ObjectName("com.mongodb:type=ConnectionPool,*"), null);
pw.println("{ pools : [");
int i = 0;
for (ObjectName objectName : beanSet) {
pw.print(" { ");
printAttribute("ObjectName", objectName.toString(), pw);
pw.println();
pw.print(" ");
printAttribute("Host", objectName, pw);
printAttribute("Port", objectName, pw);
printAttribute("MaxSize", objectName, pw);
printStatistics(pw, objectName);
pw.println(" }" + (i == beanSet.size() - 1 ? "" : ","));
i++;
}
pw.println(" ]");
pw.println("}");
}
private void printStatistics(final PrintWriter pw, final ObjectName objectName) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException {
String key = "Statistics";
CompositeData statistics = (CompositeData) mBeanConnection.getAttribute(objectName, key);
printSimpleStatistics(pw, statistics);
printInUseConnections(statistics, pw);
}
private void printSimpleStatistics(final PrintWriter pw, final CompositeData statistics) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException {
printCompositeDataAttribute("total", statistics, pw);
printCompositeDataAttribute("inUse", statistics, pw);
pw.println();
}
private void printInUseConnections(final CompositeData statistics, final PrintWriter pw) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException {
String key = "inUseConnections";
CompositeData[] compositeDataArray = (CompositeData[]) statistics.get(key);
pw.println(" " + getKeyString(key) + ": [");
for (int i = 0; i < compositeDataArray.length; i++) {
CompositeData compositeData = compositeDataArray[i];
pw.print(" { ");
printCompositeDataAttribute("namespace", compositeData, pw);
printCompositeDataAttribute("opCode", compositeData, pw);
printCompositeDataAttribute("query", compositeData, pw, StringType.JSON);
printCompositeDataAttribute("numDocuments", compositeData, pw);
printCompositeDataAttribute("threadName", compositeData, pw);
printCompositeDataAttribute("durationMS", compositeData, pw);
printCompositeDataAttribute("localPort", compositeData, pw, Position.LAST);
pw.println(" }" + (i == compositeDataArray.length -1 ? "" : ", "));
}
pw.println(" ]");
}
private void printCompositeDataAttribute(String key, final CompositeData compositeData, final PrintWriter pw) {
printCompositeDataAttribute(key, compositeData, pw, Position.REGULAR);
}
private void printCompositeDataAttribute(String key, final CompositeData compositeData, final PrintWriter pw, Position position) {
printCompositeDataAttribute(key, compositeData, pw, position, StringType.REGULAR);
}
private void printCompositeDataAttribute(final String key, final CompositeData compositeData, final PrintWriter pw, final StringType stringType) {
printCompositeDataAttribute(key, compositeData, pw, Position.REGULAR, stringType);
}
private void printCompositeDataAttribute(String key, final CompositeData compositeData, final PrintWriter pw, Position position, StringType stringType) {
printAttribute(key, compositeData.get(key), pw, position, stringType);
}
private void printAttribute(final String key, final ObjectName objectName, final PrintWriter pw) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException {
printAttribute(key, mBeanConnection.getAttribute(objectName, key), pw);
}
private void printAttribute(final String key, final Object value, final PrintWriter pw) {
printAttribute(key, value, pw, Position.REGULAR, StringType.REGULAR);
}
private void printAttribute(final String key, final Object value, final PrintWriter pw, Position position, StringType stringType) {
if (value != null ) {
pw.print(getKeyString(key) + ": " + getValueString(value, stringType) + (position == Position.LAST ? "" : ", "));
}
}
private String getKeyString(final String key) {
return Character.toLowerCase(key.charAt(0)) + key.substring(1);
}
private String getValueString(final Object value, final StringType stringType) {
if (value instanceof String && stringType == StringType.REGULAR) {
return "" + "'" + value + "'";
}
return value.toString();
}
enum StringType { REGULAR, JSON }
enum Position { REGULAR, LAST}
private final MBeanServerConnection mBeanConnection;
}

View File

@ -35,54 +35,97 @@ package com.massivecraft.mcore.xlib.mongodb.util;
/**
* Provides Base64 encoding and decoding </a>.
*
* <p/>
* <p>
* This class implements Base64 encoding
*
* <p/>
* Thanks to Apache Commons project. This class refactored from org.apache.commons.codec.binary
*
* <p/>
* Original Thanks to "commons" project in ws.apache.org for this code.
* http://svn.apache.org/repos/asf/webservices/commons/trunk/modules/util/
* </p>
*
*/
public class Base64Codec {
private static int BYTES_PER_UNENCODED_BLOCK = 3;
private static int BYTES_PER_ENCODED_BLOCK = 4;
private static final int BYTES_PER_UNENCODED_BLOCK = 3;
private static final int BYTES_PER_ENCODED_BLOCK = 4;
/** Mask used to extract 6 bits, used when encoding */
/**
* Mask used to extract 6 bits, used when encoding
*/
private static final int SixBitMask = 0x3f;
/** padding char */
/**
* padding char
*/
private static final byte PAD = '=';
/**
* This array is a lookup table that translates 6-bit positive integer index values into their "Base64 Alphabet"
* equivalents as specified in Table 1 of RFC 2045.
*
*/
private static final byte[] EncodeTable = { 'A', 'B', 'C', 'D', 'E', 'F',
private static final byte[] EncodeTable = {'A', 'B', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5',
'6', '7', '8', '9', '+', '/' };
'6', '7', '8', '9', '+', '/'};
private static final int[] DecodeTable = new int[128];
static {
for (int i = 0; i < EncodeTable.length; i++) {
DecodeTable[EncodeTable[i]] = i;
}
}
/**
* Translates the specified Base64 string into a byte array.
*
* @param s the Base64 string (not null)
* @return the byte array (not null)
*/
public byte[] decode(String s) {
int delta = s.endsWith("==") ? 2 : s.endsWith("=") ? 1 : 0;
byte[] buffer = new byte[s.length() * BYTES_PER_UNENCODED_BLOCK / BYTES_PER_ENCODED_BLOCK - delta];
int mask = 0xFF;
int pos = 0;
for (int i = 0; i < s.length(); i += BYTES_PER_ENCODED_BLOCK) {
int c0 = DecodeTable[s.charAt(i)];
int c1 = DecodeTable[s.charAt(i + 1)];
buffer[pos++] = (byte) (((c0 << 2) | (c1 >> 4)) & mask);
if (pos >= buffer.length) {
return buffer;
}
int c2 = DecodeTable[s.charAt(i + 2)];
buffer[pos++] = (byte) (((c1 << 4) | (c2 >> 2)) & mask);
if (pos >= buffer.length) {
return buffer;
}
int c3 = DecodeTable[s.charAt(i + 3)];
buffer[pos++] = (byte) (((c2 << 6) | c3) & mask);
}
return buffer;
}
/**
* Translates the specified byte array into Base64 string.
*
* @param in the byte array (not null)
* @return the translated Base64 string (not null)
*/
public String encode(byte[] in) {
int modulus = 0;
int bitWorkArea = 0;
int numEncodedBytes = (in.length/BYTES_PER_UNENCODED_BLOCK)*BYTES_PER_ENCODED_BLOCK
+ ((in.length%BYTES_PER_UNENCODED_BLOCK == 0 )?0:4);
int numEncodedBytes = (in.length / BYTES_PER_UNENCODED_BLOCK) * BYTES_PER_ENCODED_BLOCK
+ ((in.length % BYTES_PER_UNENCODED_BLOCK == 0) ? 0 : 4);
byte[] buffer = new byte[numEncodedBytes];
int pos = 0;
for (int i = 0; i < in.length; i++) {
modulus = (modulus+1) % BYTES_PER_UNENCODED_BLOCK;
int b = in[i];
for (int b : in) {
modulus = (modulus + 1) % BYTES_PER_UNENCODED_BLOCK;
if (b < 0)
b += 256;
@ -97,18 +140,18 @@ public class Base64Codec {
}
switch (modulus) { // 0-2
case 1 : // 8 bits = 6 + 2
case 1: // 8 bits = 6 + 2
buffer[pos++] = EncodeTable[(bitWorkArea >> 2) & SixBitMask]; // top 6 bits
buffer[pos++] = EncodeTable[(bitWorkArea << 4) & SixBitMask]; // remaining 2
buffer[pos++] = PAD;
buffer[pos++] = PAD;
buffer[pos] = PAD; // Last entry no need to ++
break;
case 2 : // 16 bits = 6 + 6 + 4
case 2: // 16 bits = 6 + 6 + 4
buffer[pos++] = EncodeTable[(bitWorkArea >> 10) & SixBitMask];
buffer[pos++] = EncodeTable[(bitWorkArea >> 4) & SixBitMask];
buffer[pos++] = EncodeTable[(bitWorkArea << 2) & SixBitMask];
buffer[pos++] = PAD;
buffer[pos] = PAD; // Last entry no need to ++
break;
}

View File

@ -31,6 +31,7 @@ import java.util.List;
*
* @author breinero
*/
@SuppressWarnings({"rawtypes"})
class ClassMapBasedObjectSerializer extends AbstractObjectSerializer {
/**
@ -41,7 +42,6 @@ class ClassMapBasedObjectSerializer extends AbstractObjectSerializer {
* This means that it is only necessary assign ObjectSerializers to base classes. @see org.bson.util.ClassMap
* @param serializer performs the serialization mapping specific to the @param key type
*/
@SuppressWarnings("rawtypes")
void addObjectSerializer(Class c, ObjectSerializer serializer) {
_serializers.put(c , serializer);
}

View File

@ -0,0 +1,61 @@
/**
* Copyright (c) 2008 - 2012 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util;
import com.massivecraft.mcore.xlib.mongodb.InUseConnectionBean;
/**
* A bean representing connection pool statistics.
*/
public class ConnectionPoolStatisticsBean {
private final int total;
private final int inUse;
private final InUseConnectionBean[] inUseConnections;
public ConnectionPoolStatisticsBean(final int total, final int inUse, final InUseConnectionBean[] inUseConnections) {
//To change body of created methods use File | Settings | File Templates.
this.total = total;
this.inUse = inUse;
this.inUseConnections = inUseConnections;
}
/**
* Gets the total number of pool members, including idle and and in-use members.
*
* @return total number of members
*/
public int getTotal() {
return total;
}
/**
* Gets the number of pool members that are currently in use.
*
* @return number of in-use members
*/
public int getInUse() {
return inUse;
}
/**
* Gets an array of beans describing all the connections that are currently in use.
*
* @return array of in-use connection beans
*/
public InUseConnectionBean[] getInUseConnections() {
return inUseConnections;
}
}

View File

@ -28,7 +28,7 @@ import com.massivecraft.mcore.xlib.mongodb.DBObject;
public class JSON {
/**
* Serializes an object into it's JSON form.
* Serializes an object into its JSON form.
* <p>
* This method delegates serialization to <code>JSONSerializers.getLegacy</code>
*
@ -43,7 +43,7 @@ public class JSON {
}
/**
* Serializes an object into it's JSON form
* Serializes an object into its JSON form.
* <p>
* This method delegates serialization to <code>JSONSerializers.getLegacy</code>
*

View File

@ -18,6 +18,16 @@
package com.massivecraft.mcore.xlib.mongodb.util;
import com.massivecraft.mcore.xlib.bson.BSON;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.BasicBSONCallback;
import com.massivecraft.mcore.xlib.bson.types.*;
import com.massivecraft.mcore.xlib.mongodb.BasicDBList;
import com.massivecraft.mcore.xlib.mongodb.BasicDBObject;
import com.massivecraft.mcore.xlib.mongodb.DBObject;
import com.massivecraft.mcore.xlib.mongodb.DBRef;
import java.text.ParsePosition;
import java.text.SimpleDateFormat;
import java.util.Date;
@ -26,25 +36,10 @@ import java.util.SimpleTimeZone;
import java.util.UUID;
import java.util.regex.Pattern;
import com.massivecraft.mcore.xlib.bson.BSON;
import com.massivecraft.mcore.xlib.bson.BSONObject;
import com.massivecraft.mcore.xlib.bson.BasicBSONCallback;
import com.massivecraft.mcore.xlib.bson.types.BSONTimestamp;
import com.massivecraft.mcore.xlib.bson.types.Code;
import com.massivecraft.mcore.xlib.bson.types.CodeWScope;
import com.massivecraft.mcore.xlib.bson.types.MaxKey;
import com.massivecraft.mcore.xlib.bson.types.MinKey;
import com.massivecraft.mcore.xlib.bson.types.ObjectId;
import com.massivecraft.mcore.xlib.mongodb.BasicDBList;
import com.massivecraft.mcore.xlib.mongodb.BasicDBObject;
import com.massivecraft.mcore.xlib.mongodb.DBObject;
import com.massivecraft.mcore.xlib.mongodb.DBRef;
public class JSONCallback extends BasicBSONCallback {
@Override
public BSONObject create(){
public BSONObject create() {
return new BasicDBObject();
}
@ -53,30 +48,26 @@ public class JSONCallback extends BasicBSONCallback {
return new BasicDBList();
}
public void objectStart(boolean array, String name){
public void objectStart(boolean array, String name) {
_lastArray = array;
super.objectStart( array , name );
super.objectStart(array, name);
}
public Object objectDone(){
public Object objectDone() {
String name = curName();
Object o = super.objectDone();
BSONObject b = (BSONObject)o;
if (_lastArray) {
return o;
}
BSONObject b = (BSONObject) o;
// override the object if it's a special type
if (!_lastArray) {
if (b.containsField("$oid")) {
o = new ObjectId((String) b.get("$oid"));
if (!isStackEmpty()) {
gotObjectId(name, (ObjectId) o);
} else {
setRoot(o);
}
} else if (b.containsField("$date")) {
if(b.get("$date") instanceof Number){
o = new Date(((Number)b.get("$date")).longValue());
}else {
if (b.get("$date") instanceof Number) {
o = new Date(((Number) b.get("$date")).longValue());
} else {
SimpleDateFormat format = new SimpleDateFormat(_msDateFormat);
format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT")));
o = format.parse(b.get("$date").toString(), new ParsePosition(0));
@ -88,69 +79,44 @@ public class JSONCallback extends BasicBSONCallback {
o = format.parse(b.get("$date").toString(), new ParsePosition(0));
}
}
if (!isStackEmpty()) {
cur().put(name, o);
} else {
setRoot(o);
}
} else if ( b.containsField( "$regex" ) ) {
o = Pattern.compile( (String)b.get( "$regex" ),
BSON.regexFlags( (String)b.get( "$options" )) );
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
} else if ( b.containsField( "$ts" ) ) {
Long ts = ((Number)b.get("$ts")).longValue();
Long inc = ((Number)b.get("$inc")).longValue();
o = new BSONTimestamp(ts.intValue(), inc.intValue());
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
} else if ( b.containsField( "$code" ) ) {
} else if (b.containsField("$regex")) {
o = Pattern.compile((String) b.get("$regex"),
BSON.regexFlags((String) b.get("$options")));
} else if (b.containsField("$ts")) { //Legacy timestamp format
Integer ts = ((Number) b.get("$ts")).intValue();
Integer inc = ((Number) b.get("$inc")).intValue();
o = new BSONTimestamp(ts, inc);
} else if (b.containsField("$timestamp")) {
BSONObject tsObject = (BSONObject) b.get("$timestamp");
Integer ts = ((Number) tsObject.get("t")).intValue();
Integer inc = ((Number) tsObject.get("i")).intValue();
o = new BSONTimestamp(ts, inc);
} else if (b.containsField("$code")) {
if (b.containsField("$scope")) {
o = new CodeWScope((String)b.get("$code"), (DBObject)b.get("$scope"));
o = new CodeWScope((String) b.get("$code"), (DBObject) b.get("$scope"));
} else {
o = new Code((String)b.get("$code"));
o = new Code((String) b.get("$code"));
}
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
} else if ( b.containsField( "$ref" ) ) {
o = new DBRef(null, (String)b.get("$ref"), b.get("$id"));
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
} else if ( b.containsField( "$minKey" ) ) {
} else if (b.containsField("$ref")) {
o = new DBRef(null, (String) b.get("$ref"), b.get("$id"));
} else if (b.containsField("$minKey")) {
o = new MinKey();
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
} else if ( b.containsField( "$maxKey" ) ) {
} else if (b.containsField("$maxKey")) {
o = new MaxKey();
} else if (b.containsField("$uuid")) {
o = UUID.fromString((String) b.get("$uuid"));
} else if (b.containsField("$binary")) {
int type = (Integer) b.get("$type");
byte[] bytes = (new Base64Codec()).decode((String) b.get("$binary"));
o = new Binary((byte) type, bytes);
}
if (!isStackEmpty()) {
cur().put( name, o );
_put(name, o);
} else {
o = !BSON.hasDecodeHooks() ? o : BSON.applyDecodingHooks( o );
setRoot(o);
}
} else if ( b.containsField( "$uuid" ) ) {
o = UUID.fromString((String)b.get("$uuid"));
if (!isStackEmpty()) {
cur().put( name, o );
} else {
setRoot(o);
}
}
}
return o;
}

View File

@ -406,8 +406,8 @@ public class JSONSerializers {
public void serialize(Object obj, StringBuilder buf) {
BSONTimestamp t = (BSONTimestamp) obj;
BasicDBObject temp = new BasicDBObject();
temp.put("$t", Integer.valueOf(t.getTime()));
temp.put("$i", Integer.valueOf(t.getInc()));
temp.put("t", Integer.valueOf(t.getTime()));
temp.put("i", Integer.valueOf(t.getInc()));
BasicDBObject timestampObj = new BasicDBObject();
timestampObj.put("$timestamp", temp);
serializer.serialize(timestampObj, buf);

View File

@ -88,6 +88,11 @@ public class MyAsserts {
_assertEquals( a , b == null ? null : b.toString() );
}
public static void assertSame(Object a, Object b) {
if ( a != b )
throw new MyAssert( a + " != " + b );
}
public static void assertEquals( Object a , Object b ){
_assertEquals( a , b );
}
@ -119,6 +124,13 @@ public class MyAsserts {
throw new MyAssert("These arrays are different, but they might be big so not printing them here");
}
public static void assertArrayEquals(char[] expected, char[] result) {
if (Arrays.equals(expected, result))
return;
throw new MyAssert("These arrays are different, but they might be big so not printing them here");
}
public static void assertNotEquals( Object a , Object b ){
if ( a == null ){
if ( b != null )

View File

@ -1,7 +1,5 @@
// SimplePool.java
/**
* Copyright (C) 2008 10gen Inc.
* Copyright (C) 2008-2012 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -19,79 +17,47 @@
package com.massivecraft.mcore.xlib.mongodb.util;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import javax.management.Attribute;
import javax.management.AttributeList;
import javax.management.DynamicMBean;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
public abstract class SimplePool<T> implements DynamicMBean {
static final boolean TRACK_LEAKS = Boolean.getBoolean( "MONGO-TRACKLEAKS" );
static final long _sleepTime = 2;
/**
* See full constructor docs
/**
* This class is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases.
*/
public SimplePool( String name , int maxToKeep , int maxTotal ){
this( name , maxToKeep , maxTotal , false , false );
}
public abstract class SimplePool<T> {
/** Initializes a new pool of objects.
* @param name name for the pool
* @param maxToKeep max to hold to at any given time. if < 0 then no limit
* @param maxTotal max to have allocated at any point. if there are no more, get() will block
* @param trackLeaks if leaks should be tracked
* @param size max to hold to at any given time. if < 0 then no limit
*/
public SimplePool( String name , int maxToKeep , int maxTotal , boolean trackLeaks , boolean debug ){
public SimplePool(String name, int size){
_name = name;
_maxToKeep = maxToKeep;
_maxTotal = maxTotal;
_trackLeaks = trackLeaks || TRACK_LEAKS;
_debug = debug;
_mbeanInfo = new MBeanInfo( this.getClass().getName() , _name ,
new MBeanAttributeInfo[]{
new MBeanAttributeInfo( "name" , "java.lang.String" , "name of pool" , true , false , false ) ,
new MBeanAttributeInfo( "size" , "java.lang.Integer" , "total size of pool" , true , false , false ) ,
new MBeanAttributeInfo( "available" , "java.lang.Integer" , "total connections available" , true , false , false ) ,
new MBeanAttributeInfo( "inUse" , "java.lang.Integer" , "number connections in use right now" , true , false , false ) ,
new MBeanAttributeInfo( "everCreated" , "java.lang.Integer" , "number connections ever created" , true , false , false )
} , null , null , null );
_size = size;
_sem = new Semaphore(size);
}
/** Creates a new object of this pool's type.
/** Creates a new object of this pool's type. Implementations should throw a runtime exception if unable to create.
* @return the new object.
*/
protected abstract T createNew();
/**
* callback to determine if an object is ok to be added back to the pool or used
* will be called when something is put back into the queue and when it comes out
* @return true if the object is ok to be added back to pool
* override this if you need to do any cleanup
*/
public boolean ok( T t ){
return true;
public void cleanup( T t ) {
}
/**
* override this if you need to do any cleanup
* Pick a member of {@code _avail}. This method is called with a lock held on {@code _avail}, so it may be used safely.
*
* @param recommended the recommended member to choose.
* @param couldCreate true if there is room in the pool to create a new object
* @return >= 0 the one to use, -1 create a new one
*/
public void cleanup( T t ){}
/**
* @return >= 0 the one to use, -1 don't use any
*/
protected int pick( int iThink , boolean couldCreate ){
return iThink;
protected int pick( int recommended , boolean couldCreate ){
return recommended;
}
/**
@ -100,49 +66,36 @@ public abstract class SimplePool<T> implements DynamicMBean {
* @param t Object to add
*/
public void done( T t ){
done( t , ok( t ) );
}
void done( T t , boolean ok ){
if ( _trackLeaks ){
synchronized ( _where ){
_where.remove( _hash( t ) );
}
}
if ( ! ok ){
synchronized ( _avail ){
_all.remove( t );
}
synchronized ( this ) {
if (_closed) {
cleanup(t);
return;
}
synchronized ( _avail ){
if ( _maxToKeep < 0 || _avail.size() < _maxToKeep ){
for ( int i=0; i<_avail.size(); i++ )
if ( _avail.get( i ) == t )
throw new RuntimeException( "trying to put something back in the pool that's already there" );
assertConditions();
// if all doesn't contain it, it probably means this was cleared, so we don't want it
if ( _all.contains( t ) ){
_avail.add( t );
_waiting.release();
}
}
else {
cleanup( t );
}
}
if (!_out.remove(t)) {
throw new RuntimeException("trying to put something back in the pool wasn't checked out");
}
public void remove( T t ){
done( t , false );
_avail.add(t);
}
_sem.release();
}
private void assertConditions() {
assert getTotal() <= getMaxSize();
}
public void remove( T t ) {
done(t);
}
/** Gets an object from the pool - will block if none are available
* @return An object from the pool
*/
public T get(){
public T get() throws InterruptedException {
return get(-1);
}
@ -151,210 +104,100 @@ public abstract class SimplePool<T> implements DynamicMBean {
* negative - forever
* 0 - return immediately no matter what
* positive ms to wait
* @return An object from the pool
* @return An object from the pool, or null if can't get one in the given waitTime
*/
public T get( long waitTime ){
final T t = _get( waitTime );
if ( t != null ){
if ( _trackLeaks ){
Throwable stack = new Throwable();
stack.fillInStackTrace();
synchronized ( _where ){
_where.put( _hash( t ) , stack );
}
}
}
return t;
}
private int _hash( T t ){
return System.identityHashCode( t );
}
private T _get( long waitTime ){
long totalSlept = 0;
while ( true ){
synchronized ( _avail ){
boolean couldCreate = _maxTotal <= 0 || _all.size() < _maxTotal;
while ( _avail.size() > 0 ){
int toTake = _avail.size() - 1;
toTake = pick( toTake, couldCreate );
if ( toTake >= 0 ){
T t = _avail.remove( toTake );
if ( ok( t ) ){
_debug( "got an old one" );
return t;
}
_debug( "old one was not ok" );
_all.remove( t );
continue;
}
else if ( ! couldCreate ) {
throw new IllegalStateException( "can't pick nothing if can't create" );
}
break;
}
if ( couldCreate ){
_everCreated++;
T t = createNew();
_all.add( t );
return t;
}
if ( _trackLeaks && _trackPrintCount++ % 200 == 0 ){
_wherePrint();
_trackPrintCount = 1;
}
}
if ( waitTime == 0 )
public T get(long waitTime) throws InterruptedException {
if (!permitAcquired(waitTime)) {
return null;
}
if ( waitTime > 0 && totalSlept >= waitTime )
return null;
synchronized (this) {
assertConditions();
long start = System.currentTimeMillis();
int toTake = pick(_avail.size() - 1, getTotal() < getMaxSize());
T t;
if (toTake >= 0) {
t = _avail.remove(toTake);
} else {
t = createNewAndReleasePermitIfFailure();
}
_out.add(t);
return t;
}
}
private T createNewAndReleasePermitIfFailure() {
try {
_waiting.tryAcquire( _sleepTime , TimeUnit.MILLISECONDS );
T newMember = createNew();
if (newMember == null) {
throw new IllegalStateException("null pool members are not allowed");
}
catch ( InterruptedException ie ){
}
totalSlept += ( System.currentTimeMillis() - start );
return newMember;
} catch (RuntimeException e) {
_sem.release();
throw e;
} catch (Error e) {
_sem.release();
throw e;
}
}
private void _wherePrint(){
StringBuilder buf = new StringBuilder( toString() ).append( " waiting \n" );
synchronized ( _where ){
for ( Throwable t : _where.values() ){
buf.append( "--\n" );
final StackTraceElement[] st = t.getStackTrace();
for ( int i=0; i<st.length; i++ )
buf.append( " " ).append( st[i] ).append( "\n" );
buf.append( "----\n" );
private boolean permitAcquired(final long waitTime) throws InterruptedException {
if (waitTime > 0) {
return _sem.tryAcquire(waitTime, TimeUnit.MILLISECONDS);
} else if (waitTime < 0) {
_sem.acquire();
return true;
} else {
return _sem.tryAcquire();
}
}
System.out.println( buf );
}
/** Clears the pool of all objects. */
protected void clear(){
synchronized( _avail ){
for ( T t : _avail )
cleanup( t );
protected synchronized void close(){
_closed = true;
for (T t : _avail)
cleanup(t);
_avail.clear();
_all.clear();
synchronized ( _where ){
_where.clear(); // is this correct
}
}
_out.clear();
}
public int total(){
return _all.size();
}
public int inUse(){
return _all.size() - _avail.size();
}
public Iterator<T> getAll(){
return _all.getAll().iterator();
}
public int available(){
if ( _maxTotal <= 0 )
throw new IllegalStateException( "this pool has an infinite number of things available" );
return _maxTotal - inUse();
}
public int everCreated(){
return _everCreated;
}
private void _debug( String msg ){
if( _debug )
System.out.println( "SimplePool [" + _name + "] : " + msg );
}
public int maxToKeep(){
return _maxToKeep;
}
public Object getAttribute(String attribute){
if ( attribute.equals( "name" ) )
public String getName() {
return _name;
if ( attribute.equals( "size" ) )
return _maxToKeep;
if ( attribute.equals( "available" ) )
return available();
if ( attribute.equals( "inUse" ) )
return inUse();
if ( attribute.equals( "everCreated" ) )
return _everCreated;
System.err.println( "com.mongo.util.SimplePool unknown attribute: " + attribute );
throw new RuntimeException( "unknown attribute: " + attribute );
}
public AttributeList getAttributes(String[] attributes){
AttributeList l = new AttributeList();
for ( int i=0; i<attributes.length; i++ ){
String name = attributes[i];
l.add( new Attribute( name , getAttribute( name ) ) );
}
return l;
public synchronized int getTotal(){
return _avail.size() + _out.size();
}
public MBeanInfo getMBeanInfo(){
return _mbeanInfo;
public synchronized int getInUse(){
return _out.size();
}
public Object invoke(String actionName, Object[] params, String[] signature){
throw new RuntimeException( "not allowed to invoke anything" );
public synchronized int getAvailable(){
return _avail.size();
}
public void setAttribute(Attribute attribute){
throw new RuntimeException( "not allowed to set anything" );
public int getMaxSize(){
return _size;
}
public AttributeList setAttributes(AttributeList attributes){
throw new RuntimeException( "not allowed to set anything" );
}
public String toString(){
public synchronized String toString(){
StringBuilder buf = new StringBuilder();
buf.append( "pool: " ).append( _name )
.append( " maxToKeep: " ).append( _maxToKeep )
.append( " maxTotal: " ).append( _maxToKeep )
.append( " where " ).append( _where.size() )
.append( " avail " ).append( _avail.size() )
.append( " all " ).append( _all.size() )
buf.append("pool: ").append(_name)
.append(" maxToKeep: ").append(_size)
.append(" avail ").append(_avail.size())
.append(" out ").append(_out.size())
;
return buf.toString();
}
protected final String _name;
protected final int _maxToKeep;
protected final int _maxTotal;
protected final boolean _trackLeaks;
protected final boolean _debug;
protected final MBeanInfo _mbeanInfo;
private final List<T> _avail = new ArrayList<T>();
protected final List<T> _availSafe = Collections.unmodifiableList( _avail );
private final WeakBag<T> _all = new WeakBag<T>();
private final Map<Integer,Throwable> _where = new HashMap<Integer,Throwable>();
private final Semaphore _waiting = new Semaphore(0);
private int _everCreated = 0;
private int _trackPrintCount = 0;
protected final int _size;
protected final List<T> _avail = new ArrayList<T>();
protected final Set<T> _out = new HashSet<T>();
private final Semaphore _sem;
private boolean _closed;
}

View File

@ -22,11 +22,9 @@ public class StringBuilderPool extends SimplePool<StringBuilder> {
/** Initializes a pool of a given number of StringBuilders, each of a certain size.
* @param maxToKeep the number of string builders in the pool
* @param maxSize the size of each string builder
*/
public StringBuilderPool( String name , int maxToKeep , int maxSize ){
super( "StringBuilderPool-" + name , maxToKeep , -1 );
_maxSize = maxSize;
public StringBuilderPool( String name , int maxToKeep ){
super( "StringBuilderPool-" + name , maxToKeep );
}
/** Create a new string builder.
@ -41,7 +39,7 @@ public class StringBuilderPool extends SimplePool<StringBuilder> {
* @return if it is not too big
*/
public boolean ok( StringBuilder buf ){
if ( buf.length() > _maxSize )
if ( buf.length() > getMaxSize() )
return false;
buf.setLength( 0 );
return true;
@ -50,6 +48,4 @@ public class StringBuilderPool extends SimplePool<StringBuilder> {
protected long memSize( StringBuilder buf ){
return buf.length() * 2;
}
final int _maxSize;
}

View File

@ -24,6 +24,12 @@ import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A thread pool implementation.
*
* @deprecated This class is no longer in use and should not have been public. It may be removed in a future release.
*/
@Deprecated
public abstract class ThreadPool<T> {
/** Initializes a new thread pool with a given name and number of threads.

View File

@ -26,7 +26,7 @@ import java.util.List;
/**
* if its not obvious what a weak bag should do, then, well...
* very very not thead safe
* very very not thread safe
*/
public class WeakBag<T> implements Iterable<T> {

View File

@ -0,0 +1,29 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util.management;
/**
*
* This class is NOT part of the public API. It may change at any time without notification.
*/
public class JMException extends Exception {
static final long serialVersionUID = -2052972874393271421L;
public JMException(Throwable cause) {
super(cause);
}
}

View File

@ -0,0 +1,28 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util.management;
/**
* This class is NOT part of the public API. It may change at any time without notification.
*/
public interface MBeanServer {
boolean isRegistered(String mBeanName) throws JMException;
void unregisterMBean(String mBeanName) throws JMException;
void registerMBean(Object mBean, String mBeanName) throws JMException;
}

View File

@ -0,0 +1,43 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util.management;
import com.massivecraft.mcore.xlib.mongodb.util.management.jmx.JMXMBeanServer;
/**
* This class is NOT part of the public API. It may change at any time without notification.
*
* This class is used to insulate the rest of the driver from the possibility that JMX is not available,
* as currently is the case on Android VM
*/
public class MBeanServerFactory {
static {
MBeanServer tmp;
try {
tmp = new JMXMBeanServer();
} catch (Throwable e) {
tmp = new NullMBeanServer();
}
mBeanServer = tmp;
}
public static MBeanServer getMBeanServer() {
return mBeanServer;
}
private static final MBeanServer mBeanServer;
}

View File

@ -0,0 +1,35 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util.management;
/**
* This class is NOT part of the public API. It may change at any time without notification.
*/
public class NullMBeanServer implements MBeanServer {
@Override
public boolean isRegistered(String mBeanName) {
return false;
}
@Override
public void unregisterMBean(String mBeanName) {
}
@Override
public void registerMBean(Object mBean, String mBeanName) {
}
}

View File

@ -0,0 +1,67 @@
/**
* Copyright (c) 2008 - 2011 10gen, Inc. <http://10gen.com>
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivecraft.mcore.xlib.mongodb.util.management.jmx;
import com.massivecraft.mcore.xlib.mongodb.util.management.JMException;
import com.massivecraft.mcore.xlib.mongodb.util.management.MBeanServer;
import javax.management.*;
import java.lang.management.ManagementFactory;
/**
* This class is NOT part of the public API. It may change at any time without notification.
*/
public class JMXMBeanServer implements MBeanServer {
@Override
public boolean isRegistered(String mBeanName) throws JMException {
return server.isRegistered(createObjectName(mBeanName));
}
@Override
public void unregisterMBean(String mBeanName) throws JMException {
try {
server.unregisterMBean(createObjectName(mBeanName));
} catch (InstanceNotFoundException e) {
throw new JMException(e);
} catch (MBeanRegistrationException e) {
throw new JMException(e);
}
}
@Override
public void registerMBean(Object mBean, String mBeanName) throws JMException {
try {
server.registerMBean(mBean, createObjectName(mBeanName));
} catch (InstanceAlreadyExistsException e) {
throw new JMException(e);
} catch (MBeanRegistrationException e) {
throw new JMException(e);
} catch (NotCompliantMBeanException e) {
throw new JMException(e);
}
}
private ObjectName createObjectName(String mBeanName) throws JMException {
try {
return new ObjectName(mBeanName);
} catch (MalformedObjectNameException e) {
throw new JMException(e);
}
}
private final javax.management.MBeanServer server = ManagementFactory.getPlatformMBeanServer();
}